@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/include/xenomai/version.h	1970-01-01 01:00:00.000000000 +0100
+#include <stdarg.h>
+++ linux-patched/include/xenomai/version.h	2022-03-21 12:58:32.309860487 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_VERSION_H
+#define _XENOMAI_VERSION_H
+
+#ifndef __KERNEL__
+#include <xeno_config.h>
+#include <boilerplate/compiler.h>
+#endif
+
+#define XENO_VERSION(maj, min, rev)  (((maj)<<16)|((min)<<8)|(rev))
+
+#define XENO_VERSION_CODE	XENO_VERSION(CONFIG_XENO_VERSION_MAJOR,	\
+					     CONFIG_XENO_VERSION_MINOR,	\
+					     CONFIG_XENO_REVISION_LEVEL)
+
+#define XENO_VERSION_STRING	CONFIG_XENO_VERSION_STRING
+
+#endif /* _XENOMAI_VERSION_H */
+++ linux-patched/include/xenomai/pipeline/sched.h	2022-03-21 12:58:32.033863179 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/sirq.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SCHED_H
+#define _COBALT_KERNEL_DOVETAIL_SCHED_H
+
+#include <cobalt/kernel/lock.h>
+
+struct xnthread;
+struct xnsched;
+struct task_struct;
+
+void pipeline_init_shadow_tcb(struct xnthread *thread);
+
+void pipeline_init_root_tcb(struct xnthread *thread);
+
+int ___xnsched_run(struct xnsched *sched);
+
+static inline int pipeline_schedule(struct xnsched *sched)
+{
+	return run_oob_call((int (*)(void *))___xnsched_run, sched);
+}
+
+static inline void pipeline_prep_switch_oob(struct xnthread *root)
+{
+	/* N/A */
+}
+
+bool pipeline_switch_to(struct xnthread *prev,
+			struct xnthread *next,
+			bool leaving_inband);
+
+int pipeline_leave_inband(void);
+
+int pipeline_leave_oob_prepare(void);
+
+static inline void pipeline_leave_oob_unlock(void)
+{
+	/*
+	 * We may not re-enable hard irqs due to the specifics of
+	 * stage escalation via run_oob_call(), to prevent breaking
+	 * the (virtual) interrupt state.
+	 */
+	xnlock_put(&nklock);
+}
+
+void pipeline_leave_oob_finish(void);
+
+static inline
+void pipeline_finalize_thread(struct xnthread *thread)
+{
+	/* N/A */
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk);
+
+void pipeline_clear_mayday(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SCHED_H */
+++ linux-patched/include/xenomai/pipeline/sirq.h	2022-03-21 12:58:32.026863247 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SIRQ_H
+#define _COBALT_KERNEL_DOVETAIL_SIRQ_H
+
+#include <linux/irq_pipeline.h>
+#include <cobalt/kernel/assert.h>
+
+/*
+ * Wrappers to create "synthetic IRQs" the Dovetail way. Those
+ * interrupt channels can only be trigged by software, in order to run
+ * a handler on the in-band execution stage.
+ */
+
+static inline
+int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id))
+{
+	/*
+	 * Allocate an IRQ from the synthetic interrupt domain then
+	 * trap it to @handler, to be fired from the in-band stage.
+	 */
+	int sirq, ret;
+
+	sirq = irq_create_direct_mapping(synthetic_irq_domain);
+	if (sirq == 0)
+		return -EAGAIN;
+
+	ret = __request_percpu_irq(sirq,
+			handler,
+			IRQF_NO_THREAD,
+			"Inband sirq",
+			&cobalt_machine_cpudata);
+
+	if (ret) {
+		irq_dispose_mapping(sirq);
+		return ret;
+	}
+
+	return sirq;
+}
+
+static inline
+void pipeline_delete_inband_sirq(int sirq)
+{
+	/*
+	 * Free the synthetic IRQ then deallocate it to its
+	 * originating domain.
+	 */
+	free_percpu_irq(sirq,
+		&cobalt_machine_cpudata);
+
+	irq_dispose_mapping(sirq);
+}
+
+static inline void pipeline_post_sirq(int sirq)
+{
+	/* Trigger the synthetic IRQ */
+	irq_post_inband(sirq);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SIRQ_H */
+++ linux-patched/include/xenomai/pipeline/wrappers.h	2022-03-21 12:58:32.019863315 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/kevents.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+#define _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+
+/* No wrapper needed so far. */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_WRAPPERS_H */
+++ linux-patched/include/xenomai/pipeline/kevents.h	2022-03-21 12:58:32.011863393 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/vdso_fallback.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+#define _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+
+#define KEVENT_PROPAGATE   0
+#define KEVENT_STOP        1
+
+struct cobalt_process;
+struct cobalt_thread;
+
+static inline
+int pipeline_attach_process(struct cobalt_process *process)
+{
+	return 0;
+}
+
+static inline
+void pipeline_detach_process(struct cobalt_process *process)
+{ }
+
+int pipeline_prepare_current(void);
+
+void pipeline_attach_current(struct xnthread *thread);
+
+int pipeline_trap_kevents(void);
+
+void pipeline_enable_kevents(void);
+
+void pipeline_cleanup_process(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_KEVENTS_H */
+++ linux-patched/include/xenomai/pipeline/vdso_fallback.h	2022-03-21 12:58:32.004863461 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ * Copyright (c) Siemens AG, 2021
+ */
+
+#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/clock.h>
+
+#define is_clock_gettime(__nr)		((__nr) == __NR_clock_gettime)
+
+#ifndef __NR_clock_gettime64
+#define is_clock_gettime64(__nr)	0
+#else
+#define is_clock_gettime64(__nr)	((__nr) == __NR_clock_gettime64)
+#endif
+
+static __always_inline bool 
+pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs)
+{
+	struct __kernel_old_timespec __user *u_old_ts;
+	struct __kernel_timespec uts, __user *u_uts;
+	struct __kernel_old_timespec old_ts;
+	struct timespec64 ts64;
+	int clock_id, ret = 0;
+	unsigned long args[6];
+
+	if (!is_clock_gettime(nr) && !is_clock_gettime64(nr))
+		return false;
+
+	/*
+	 * We need to fetch the args again because not all archs use the same
+	 * calling convention for Linux and Xenomai syscalls.
+	 */
+	syscall_get_arguments(current, regs, args);
+
+	clock_id = (int)args[0];
+	switch (clock_id) {
+	case CLOCK_MONOTONIC:
+		ns2ts(&ts64, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_REALTIME:
+		ns2ts(&ts64, xnclock_read_realtime(&nkclock));
+		break;
+	default:
+		return false;
+	}
+
+	if (is_clock_gettime(nr)) {
+		old_ts.tv_sec = (__kernel_old_time_t)ts64.tv_sec;
+		old_ts.tv_nsec = ts64.tv_nsec;
+		u_old_ts = (struct __kernel_old_timespec __user *)args[1];
+		if (raw_copy_to_user(u_old_ts, &old_ts, sizeof(old_ts)))
+			ret = -EFAULT;
+	} else if (is_clock_gettime64(nr)) {
+		uts.tv_sec = ts64.tv_sec;
+		uts.tv_nsec = ts64.tv_nsec;
+		u_uts = (struct __kernel_timespec __user *)args[1];
+		if (raw_copy_to_user(u_uts, &uts, sizeof(uts)))
+			ret = -EFAULT;
+	}
+
+	__xn_status_return(regs, ret);
+
+	return true;
+}
+
+#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */
+++ linux-patched/include/xenomai/pipeline/machine.h	2022-03-21 12:58:31.997863530 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/irq.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_MACHINE_H
+#define _COBALT_KERNEL_DOVETAIL_MACHINE_H
+
+#include <linux/percpu.h>
+
+#ifdef CONFIG_FTRACE
+#define boot_lat_trace_notice "[LTRACE]"
+#else
+#define boot_lat_trace_notice ""
+#endif
+
+struct vm_area_struct;
+
+struct cobalt_machine {
+	const char *name;
+	int (*init)(void);
+	int (*late_init)(void);
+	void (*cleanup)(void);
+	void (*prefault)(struct vm_area_struct *vma);
+	const char *const *fault_labels;
+};
+
+extern struct cobalt_machine cobalt_machine;
+
+struct cobalt_machine_cpudata {
+	unsigned int faults[32];
+};
+
+DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+
+struct cobalt_pipeline {
+#ifdef CONFIG_SMP
+	cpumask_t supported_cpus;
+#endif
+};
+
+int pipeline_init(void);
+
+int pipeline_late_init(void);
+
+void pipeline_cleanup(void);
+
+extern struct cobalt_pipeline cobalt_pipeline;
+
+#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */
+++ linux-patched/include/xenomai/pipeline/irq.h	2022-03-21 12:58:31.989863608 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/tick.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_IRQ_H
+#define _COBALT_KERNEL_DOVETAIL_IRQ_H
+
+static inline void xnintr_init_proc(void)
+{
+	/* N/A */
+}
+
+static inline void xnintr_cleanup_proc(void)
+{
+	/* N/A */
+}
+
+static inline int xnintr_mount(void)
+{
+	/* N/A */
+	return 0;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_IRQ_H */
+++ linux-patched/include/xenomai/pipeline/tick.h	2022-03-21 12:58:31.982863676 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_TICK_H
+#define _COBALT_KERNEL_IPIPE_TICK_H
+
+int pipeline_install_tick_proxy(void);
+
+void pipeline_uninstall_tick_proxy(void);
+
+struct xnsched;
+
+bool pipeline_must_force_program_tick(struct xnsched *sched);
+
+#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */
+++ linux-patched/include/xenomai/pipeline/thread.h	2022-03-21 12:58:31.974863754 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/inband_work.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_THREAD_H
+#define _COBALT_KERNEL_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnthread;
+
+#define cobalt_threadinfo oob_thread_state
+
+static inline struct cobalt_threadinfo *pipeline_current(void)
+{
+	return dovetail_current_state();
+}
+
+static inline
+struct xnthread *pipeline_thread_from_task(struct task_struct *p)
+{
+	return dovetail_task_state(p)->thread;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_THREAD_H */
+++ linux-patched/include/xenomai/pipeline/inband_work.h	2022-03-21 12:58:31.967863822 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/lock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+#define _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+
+#include <linux/irq_work.h>
+
+/*
+ * This field must be named inband_work and appear first in the
+ * container work struct.
+ */
+struct pipeline_inband_work {
+	struct irq_work work;
+};
+
+#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler)		\
+	{								\
+		.work = IRQ_WORK_INIT((void (*)(struct irq_work *))__handler), \
+	}
+
+#define pipeline_post_inband_work(__work)				\
+			irq_work_queue(&(__work)->inband_work.work)
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_INBAND_WORK_H */
+++ linux-patched/include/xenomai/pipeline/lock.h	2022-03-21 12:58:31.960863890 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/pipeline.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_LOCK_H
+#define _COBALT_KERNEL_DOVETAIL_LOCK_H
+
+#include <linux/spinlock.h>
+
+typedef hard_spinlock_t pipeline_spinlock_t;
+
+#define PIPELINE_SPIN_LOCK_UNLOCKED(__name)  __HARD_SPIN_LOCK_INITIALIZER(__name)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+/* Disable UP-over-SMP kernel optimization in debug mode. */
+#define __locking_active__  1
+
+#else
+
+#ifdef CONFIG_SMP
+#define __locking_active__  1
+#else
+#define __locking_active__  IS_ENABLED(CONFIG_SMP)
+#endif
+
+#endif
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_LOCK_H */
+++ linux-patched/include/xenomai/pipeline/pipeline.h	2022-03-21 12:58:31.952863968 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+#define _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+
+#include <linux/irq_pipeline.h>
+#include <linux/cpumask.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/syscall.h>
+#include <pipeline/machine.h>
+
+typedef unsigned long spl_t;
+
+/*
+ * We only keep the LSB when testing in SMP mode in order to strip off
+ * the recursion marker (0x2) the nklock may store there.
+ */
+#define splhigh(x)  ((x) = oob_irq_save() & 1)
+#ifdef CONFIG_SMP
+#define splexit(x)  oob_irq_restore(x & 1)
+#else /* !CONFIG_SMP */
+#define splexit(x)  oob_irq_restore(x)
+#endif /* !CONFIG_SMP */
+#define splmax()    oob_irq_disable()
+#define splnone()   oob_irq_enable()
+#define spltest()   oob_irqs_disabled()
+
+#define is_secondary_domain()	running_inband()
+#define is_primary_domain()	running_oob()
+
+#ifdef CONFIG_SMP
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id);
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	if (num_possible_cpus() == 1)
+		return 0;
+
+	/* Trap the out-of-band rescheduling interrupt. */
+	return __request_percpu_irq(RESCHEDULE_OOB_IPI,
+			pipeline_reschedule_ipi_handler,
+			IRQF_OOB,
+			"Xenomai reschedule",
+			&cobalt_machine_cpudata);
+}
+
+static inline void pipeline_free_resched_ipi(void)
+{
+	if (num_possible_cpus() > 1)
+		/* Release the out-of-band rescheduling interrupt. */
+		free_percpu_irq(RESCHEDULE_OOB_IPI, &cobalt_machine_cpudata);
+}
+
+static inline void pipeline_send_resched_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band rescheduling interrupt on remote
+	 * CPU(s).
+	 */
+	irq_send_oob_ipi(RESCHEDULE_OOB_IPI, dest);
+}
+
+static inline void pipeline_send_timer_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band timer interrupt on remote CPU(s).
+	 */
+	irq_send_oob_ipi(TIMER_OOB_IPI, dest);
+}
+
+#else  /* !CONFIG_SMP */
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	return 0;
+}
+
+
+static inline void pipeline_free_resched_ipi(void)
+{
+}
+
+#endif	/* CONFIG_SMP */
+
+static inline void pipeline_prepare_panic(void)
+{
+	/* N/A */
+}
+
+static inline void pipeline_collect_features(struct cobalt_featinfo *f)
+{
+	f->clock_freq = 0;	/* N/A */
+}
+
+#ifndef pipeline_get_syscall_args
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	syscall_get_arguments(task, regs, args);
+}
+#endif	/* !pipeline_get_syscall_args */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_PIPELINE_H */
+++ linux-patched/include/xenomai/pipeline/trace.h	2022-03-21 12:58:31.945864037 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_TRACE_H
+#define _COBALT_KERNEL_DOVETAIL_TRACE_H
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <cobalt/uapi/kernel/trace.h>
+#include <trace/events/cobalt-core.h>
+#include <cobalt/kernel/assert.h>
+
+static inline int xntrace_max_begin(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_max_end(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_max_reset(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_start(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_stop(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_freeze(unsigned long v, int once)
+{
+	trace_cobalt_trace_longval(0, v);
+	trace_cobalt_trigger("user-freeze");
+	return 0;
+}
+
+static inline void xntrace_latpeak_freeze(int delay)
+{
+	trace_cobalt_latpeak(delay);
+	trace_cobalt_trigger("latency-freeze");
+}
+
+static inline int xntrace_special(unsigned char id, unsigned long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_special_u64(unsigned char id,
+				unsigned long long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_pid(pid_t pid, short prio)
+{
+	trace_cobalt_trace_pid(pid, prio);
+	return 0;
+}
+
+static inline int xntrace_tick(unsigned long delay_ticks) /* ns */
+{
+	trace_cobalt_tick_shot(delay_ticks);
+	return 0;
+}
+
+static inline int xntrace_panic_freeze(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_panic_dump(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline bool xntrace_enabled(void)
+{
+	return IS_ENABLED(CONFIG_DOVETAIL_TRACE);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_TRACE_H */
+++ linux-patched/include/xenomai/pipeline/clock.h	2022-03-21 12:58:31.938864105 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/ipc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_CLOCK_H
+#define _COBALT_KERNEL_DOVETAIL_CLOCK_H
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/kernel/assert.h>
+#include <linux/ktime.h>
+#include <linux/errno.h>
+
+struct timespec64;
+
+static inline u64 pipeline_read_cycle_counter(void)
+{
+	/*
+	 * With Dovetail, our idea of time is directly based on a
+	 * refined count of nanoseconds since the epoch, the hardware
+	 * time counter is transparent to us. For this reason,
+	 * xnclock_ticks_to_ns() and xnclock_ns_to_ticks() are
+	 * idempotent when building for Dovetail.
+	 */
+	return ktime_get_mono_fast_ns();
+}
+
+static inline xnticks_t pipeline_read_wallclock(void)
+{
+	return ktime_get_real_fast_ns();
+}
+
+static inline int pipeline_set_wallclock(xnticks_t epoch_ns)
+{
+	return -EOPNOTSUPP;
+}
+
+void pipeline_set_timer_shot(unsigned long cycles);
+
+const char *pipeline_timer_name(void);
+
+static inline const char *pipeline_clock_name(void)
+{
+	/* Return the name of the current clock source. */
+	TODO();
+
+	return "?";
+}
+
+static inline int pipeline_get_host_time(struct timespec64 *tp)
+{
+	/* Convert ktime_get_real_fast_ns() to timespec. */
+	*tp = ktime_to_timespec64(ktime_get_real_fast_ns());
+
+	return 0;
+}
+
+static inline void pipeline_init_clock(void)
+{
+	/* N/A */
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns)
+{
+	return ns;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_CLOCK_H */
+++ linux-patched/include/xenomai/rtdm/uapi/ipc.h	2022-03-21 12:58:32.298860595 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/udd.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _RTDM_UAPI_IPC_H
+#define _RTDM_UAPI_IPC_H
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_ipc Real-time IPC
+ *
+ * @b Profile @b Revision: 1
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @n
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n
+ * @n
+ * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_RTIPC @n
+ * @n
+ * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_DGRAM @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_RTIPC @n
+ * @n
+ * @{
+ *
+ * @anchor rtipc_operations @name Supported operations
+ * Standard socket operations supported by the RTIPC protocols.
+ * @{
+ */
+
+/** Create an endpoint for communication in the AF_RTIPC domain.
+ *
+ * @param[in] domain The communication domain. Must be AF_RTIPC.
+ *
+ * @param[in] type The socket type. Must be SOCK_DGRAM.
+ *
+ * @param [in] protocol Any of @ref IPCPROTO_XDDP, @ref IPCPROTO_IDDP,
+ * or @ref IPCPROTO_BUFP. @ref IPCPROTO_IPC is also valid, and refers
+ * to the default RTIPC protocol, namely @ref IPCPROTO_IDDP.
+ *
+ * @return In addition to the standard error codes for @c socket(2),
+ * the following specific error code may be returned:
+ * - -ENOPROTOOPT (Protocol is known, but not compiled in the RTIPC driver).
+ *   See @ref RTIPC_PROTO "RTIPC protocols"
+ *   for available protocols.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int socket__AF_RTIPC(int domain =AF_RTIPC, int type =SOCK_DGRAM, int protocol);
+#endif
+
+/**
+ * Close a RTIPC socket descriptor.
+ *
+ * Blocking calls to any of the @ref sendmsg__AF_RTIPC "sendmsg" or @ref
+ * recvmsg__AF_RTIPC "recvmsg" functions will be unblocked when the socket
+ * is closed and return with an error.
+ *
+ * @param[in] sockfd The socket descriptor to close.
+ *
+ * @return In addition to the standard error codes for @c close(2),
+ * the following specific error code may be returned:
+ * none
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int close__AF_RTIPC(int sockfd);
+#endif
+
+/**
+ * Bind a RTIPC socket to a port.
+ *
+ * Bind the socket to a destination port.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to bind the socket to (see struct
+ * sockaddr_ipc). The meaning of such address depends on the RTIPC
+ * protocol in use for the socket:
+ *
+ * - IPCPROTO_XDDP
+ *
+ *   This action creates an endpoint for channelling traffic between
+ *   the Xenomai and Linux domains.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_PIPE_NRDEV-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned automatically.
+ *
+ *   Upon success, the pseudo-device /dev/rtp@em N will be reserved
+ *   for this communication channel, where @em N is the assigned port
+ *   number. The non real-time side shall open this device to exchange
+ *   data over the bound socket.
+ *
+ * @anchor xddp_label_binding
+ *   If a label was assigned (see @ref XDDP_LABEL) prior to
+ *   binding the socket to a port, a registry link referring to the
+ *   created pseudo-device will be automatically set up as
+ *   @c /proc/xenomai/registry/rtipc/xddp/@em label, where @em label is the
+ *   label string passed to setsockopt() for the @ref XDDP_LABEL option.
+ *
+ * - IPCPROTO_IDDP
+ *
+ *   This action creates an endpoint for exchanging datagrams within
+ *   the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_IDDP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor iddp_label_binding
+ *   If a label was assigned (see @ref IDDP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/iddp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @ref IDDP_LABEL
+ *   option.
+ *
+ * - IPCPROTO_BUFP
+ *
+ *   This action creates an endpoint for a one-way byte
+ *   stream within the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and CONFIG_XENO_OPT_BUFP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, an available port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor bufp_label_binding
+ *   If a label was assigned (see @ref BUFP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/bufp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @a BUFP_LABEL
+ *   option.
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c
+ * bind(2), the following specific error code may be returned:
+ *   - -EFAULT (Invalid data address given)
+ *   - -ENOMEM (Not enough memory)
+ *   - -EINVAL (Invalid parameter)
+ *   - -EADDRINUSE (Socket already bound to a port, or no port available)
+ *   - -EAGAIN (no registry slot available, check/raise
+ *     CONFIG_XENO_OPT_REGISTRY_NRSLOTS) .
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int bind__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		   socklen_t addrlen);
+#endif
+
+/**
+ * Initiate a connection on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to connect the socket to (see struct
+ * sockaddr_ipc).
+ *
+ * - If sipc_port is a valid port for the protocol, it is used
+ * verbatim and the connection succeeds immediately, regardless of
+ * whether the destination is bound at the time of the call.
+ *
+ * - If sipc_port is -1 and a label was assigned to the socket,
+ * connect() blocks for the requested amount of time (see @ref
+ * SO_RCVTIMEO) until a socket is bound to the same label via @c
+ * bind(2) (see @ref XDDP_LABEL, @ref IDDP_LABEL, @ref BUFP_LABEL), in
+ * which case a connection is established between both endpoints.
+ *
+ * - If sipc_port is -1 and no label was assigned to the socket, the
+ * default destination address is cleared, meaning that any subsequent
+ * write to the socket will return -EDESTADDRREQ, until a valid
+ * destination address is set via @c connect(2) or @c bind(2).
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c connect(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int connect__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		      socklen_t addrlen);
+#endif
+
+/**
+ * Set options on RTIPC sockets.
+ *
+ * These functions allow to set various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * setsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int setsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 const void *optval, socklen_t optlen);
+#endif
+/**
+ * Get options on RTIPC sockets.
+ *
+ * These functions allow to get various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * getsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 void *optval, socklen_t *optlen);
+#endif
+
+/**
+ * Send a message on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[in] msg The address of the message header conveying the
+ * datagram.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_OOB Send out-of-band message.  For all RTIPC protocols except
+ *   @ref IPCPROTO_BUFP, sending out-of-band data actually means
+ *   pushing them to the head of the receiving queue, so that the
+ *   reader will always receive them before normal messages. @ref
+ *   IPCPROTO_BUFP does not support out-of-band sending.
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever the message cannot be sent immediately at the
+ *   time of the call (e.g. memory shortage), but will rather return
+ *   with -EWOULDBLOCK. Unlike other RTIPC protocols, @ref
+ *   IPCPROTO_XDDP accepts but never considers MSG_DONTWAIT since
+ *   writing to a real-time XDDP endpoint is inherently a non-blocking
+ *   operation.
+ *
+ * - MSG_MORE Accumulate data before sending. This flag is accepted by
+ *   the @ref IPCPROTO_XDDP protocol only, and tells the send service
+ *   to accumulate the outgoing data into an internal streaming
+ *   buffer, instead of issuing a datagram immediately for it. See
+ *   @ref XDDP_BUFSZ for more.
+ *
+ * @note No RTIPC protocol allows for short writes, and only complete
+ * messages are sent to the peer.
+ *
+ * @return In addition to the standard error codes for @c sendmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t sendmsg__AF_RTIPC(int sockfd, const struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Receive a message from a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[out] msg The address the message header will be copied at.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever no message is immediately available for receipt
+ *   at the time of the call, but will rather return with
+ *   -EWOULDBLOCK.
+ *
+ * @note @ref IPCPROTO_BUFP does not allow for short reads and always
+ * returns the requested amount of bytes, except in one situation:
+ * whenever some writer is waiting for sending data upon a buffer full
+ * condition, while the caller would have to wait for receiving a
+ * complete message.  This is usually the sign of a pathological use
+ * of the BUFP socket, like defining an incorrect buffer size via @ref
+ * BUFP_BUFSZ. In that case, a short read is allowed to prevent a
+ * deadlock.
+ *
+ * @return In addition to the standard error codes for @c recvmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t recvmsg__AF_RTIPC(int sockfd, struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Get socket name.
+ *
+ * The name of the local endpoint for the socket is copied back (see
+ * struct sockaddr_ipc).
+ *
+ * @return In addition to the standard error codes for @c getsockname(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockname__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/**
+ * Get socket peer.
+ *
+ * The name of the remote endpoint for the socket is copied back (see
+ * struct sockaddr_ipc). This is the default destination address for
+ * messages sent on the socket. It can be set either explicitly via @c
+ * connect(2), or implicitly via @c bind(2) if no @c connect(2) was
+ * called prior to binding the socket to a port, in which case both
+ * the local and remote names are equal.
+ *
+ * @return In addition to the standard error codes for @c getpeername(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getpeername__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/** @} */
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/pipe.h>
+#include <rtdm/rtdm.h>
+
+/* Address family */
+#define AF_RTIPC		111
+
+/* Protocol family */
+#define PF_RTIPC		AF_RTIPC
+
+/**
+ * @anchor RTIPC_PROTO @name RTIPC protocol list
+ * protocols for the PF_RTIPC protocol family
+ *
+ * @{ */
+enum {
+/** Default protocol (IDDP) */
+	IPCPROTO_IPC  = 0,
+/**
+ * Cross-domain datagram protocol (RT <-> non-RT).
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. primary mode). The RTDM-based XDDP
+ * protocol is available for this purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp@em \<minor\>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. socket port 7 will proxy the traffic to/from
+ * /dev/rtp7.
+ *
+ * All data sent through a bound/connected XDDP socket via @c
+ * sendto(2) or @c write(2) will be passed to the peer endpoint in the
+ * Linux domain, and made available for reading via the standard @c
+ * read(2) system call. Conversely, all data sent using @c write(2)
+ * through the non real-time endpoint will be conveyed to the
+ * real-time socket endpoint, and made available to the @c recvfrom(2)
+ * or @c read(2) system calls.
+ */
+	IPCPROTO_XDDP = 1,
+/**
+ * Intra-domain datagram protocol (RT <-> RT).
+ *
+ * The RTDM-based IDDP protocol enables real-time threads to exchange
+ * datagrams within the Xenomai domain, via socket endpoints.
+ */
+	IPCPROTO_IDDP = 2,
+/**
+ * Buffer protocol (RT <-> RT, byte-oriented).
+ *
+ * The RTDM-based BUFP protocol implements a lightweight,
+ * byte-oriented, one-way Producer-Consumer data path. All messages
+ * written are buffered into a single memory area in strict FIFO
+ * order, until read by the consumer.
+ *
+ * This protocol always prevents short writes, and only allows short
+ * reads when a potential deadlock situation arises (i.e. readers and
+ * writers waiting for each other indefinitely).
+ */
+	IPCPROTO_BUFP = 3,
+	IPCPROTO_MAX
+};
+/** @} */
+
+/**
+ * Port number type for the RTIPC address family.
+ */
+typedef int16_t rtipc_port_t;
+
+/**
+ * Port label information structure.
+ */
+struct rtipc_port_label {
+	/** Port label string, null-terminated. */
+	char label[XNOBJECT_NAME_LEN];
+};
+
+/**
+ * Socket address structure for the RTIPC address family.
+ */
+struct sockaddr_ipc {
+	/** RTIPC address family, must be @c AF_RTIPC */
+	sa_family_t sipc_family;
+	/** Port number. */
+	rtipc_port_t sipc_port;
+};
+
+#define SOL_XDDP		311
+/**
+ * @anchor sockopts_xddp @name XDDP socket options
+ * Setting and getting XDDP socket options.
+ * @{ */
+/**
+ * XDDP label assignment
+ *
+ * ASCII label strings can be attached to XDDP ports, so that opening
+ * the non-RT endpoint can be done by specifying this symbolic device
+ * name rather than referring to a raw pseudo-device entry
+ * (i.e. /dev/rtp@em N).
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref xddp_label_binding
+ * "XDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_LABEL		1
+/**
+ * XDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_POOLSZ		2
+/**
+ * XDDP streaming buffer size configuration
+ *
+ * In addition to sending datagrams, real-time threads may stream data
+ * in a byte-oriented mode through the port as well. This increases
+ * the bandwidth and reduces the overhead, when the overall data to
+ * send to the Linux domain is collected by bits, and keeping the
+ * message boundaries is not required.
+ *
+ * This feature is enabled when a non-zero buffer size is set for the
+ * socket. In that case, the real-time data accumulates into the
+ * streaming buffer when MSG_MORE is passed to any of the @ref
+ * sendmsg__AF_RTIPC "send functions", until:
+ *
+ * - the receiver from the Linux domain wakes up and consumes it,
+ * - a different source port attempts to send data to the same
+ *   destination port,
+ * - MSG_MORE is absent from the send flags,
+ * - the buffer is full,
+ * .
+ * whichever comes first.
+ *
+ * Setting *@a optval to zero disables the streaming buffer, in which
+ * case all sendings are conveyed in separate datagrams, regardless of
+ * MSG_MORE.
+ *
+ * @note only a single streaming buffer exists per socket. When this
+ * buffer is full, the real-time data stops accumulating and sending
+ * operations resume in mere datagram mode. Accumulation may happen
+ * again after some or all data in the streaming buffer is consumed
+ * from the Linux domain endpoint.
+ *
+ * The streaming buffer size may be adjusted multiple times during the
+ * socket lifetime; the latest configuration change will take effect
+ * when the accumulation resumes after the previous buffer was
+ * flushed.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the streaming buffer
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -ENOMEM (Not enough memory)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_BUFSZ		3
+/**
+ * XDDP monitoring callback
+ *
+ * Other RTDM drivers may install a user-defined callback via the @ref
+ * rtdm_setsockopt call from the inter-driver API, in order to collect
+ * particular events occurring on the channel.
+ *
+ * This notification mechanism is particularly useful to monitor a
+ * channel asynchronously while performing other tasks.
+ *
+ * The user-provided routine will be passed the RTDM file descriptor
+ * of the socket receiving the event, the event code, and an optional
+ * argument.  Four events are currently defined, see @ref XDDP_EVENTS.
+ *
+ * The XDDP_EVTIN and XDDP_EVTOUT events are fired on behalf of a
+ * fully atomic context; therefore, care must be taken to keep their
+ * overhead low. In those cases, the Xenomai services that may be
+ * called from the callback are restricted to the set allowed to a
+ * real-time interrupt handler.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_MONITOR
+ * @param [in] optval Pointer to a pointer to function of type int
+ *             (*)(int fd, int event, long arg), containing the address of the
+ *             user-defined callback.Passing a NULL callback pointer
+ *             in @a optval disables monitoring.
+ * @param [in] optlen sizeof(int (*)(int fd, int event, long arg))
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EPERM (Operation not allowed from user-space)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT, kernel space only
+ */
+#define XDDP_MONITOR		4
+/** @} */
+
+/**
+ * @anchor XDDP_EVENTS @name XDDP events
+ * Specific events occurring on XDDP channels, which can be monitored
+ * via the @ref XDDP_MONITOR socket option.
+ *
+ * @{ */
+/**
+ * @ref XDDP_MONITOR "Monitor" writes to the non real-time endpoint.
+ *
+ * XDDP_EVTIN is sent when data is written to the non real-time
+ * endpoint the socket is bound to (i.e. via /dev/rtp@em N), which
+ * means that some input is pending for the real-time endpoint. The
+ * argument is the size of the incoming message.
+ */
+#define XDDP_EVTIN		1
+/**
+ * @ref XDDP_MONITOR "Monitor" reads from the non real-time endpoint.
+ *
+ * XDDP_EVTOUT is sent when the non real-time endpoint successfully
+ * reads a complete message (i.e. via /dev/rtp@em N). The argument is
+ * the size of the outgoing message.
+ */
+#define XDDP_EVTOUT		2
+/**
+ * @ref XDDP_MONITOR "Monitor" close from the non real-time endpoint.
+ *
+ * XDDP_EVTDOWN is sent when the non real-time endpoint is closed. The
+ * argument is always 0.
+ */
+#define XDDP_EVTDOWN		3
+/**
+ * @ref XDDP_MONITOR "Monitor" memory shortage for non real-time
+ * datagrams.
+ *
+ * XDDP_EVTNOBUF is sent when no memory is available from the pool to
+ * hold the message currently sent from the non real-time
+ * endpoint. The argument is the size of the failed allocation. Upon
+ * return from the callback, the caller will block and retry until
+ * enough space is available from the pool; during that process, the
+ * callback might be invoked multiple times, each time a new attempt
+ * to get the required memory fails.
+ */
+#define XDDP_EVTNOBUF		4
+/** @} */
+
+#define SOL_IDDP		312
+/**
+ * @anchor sockopts_iddp @name IDDP socket options
+ * Setting and getting IDDP socket options.
+ * @{ */
+/**
+ * IDDP label assignment
+ *
+ * ASCII label strings can be attached to IDDP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref iddp_label_binding
+ * "IDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_LABEL		1
+/**
+ * IDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_POOLSZ		2
+/** @} */
+
+#define SOL_BUFP		313
+/**
+ * @anchor sockopts_bufp @name BUFP socket options
+ * Setting and getting BUFP socket options.
+ * @{ */
+/**
+ * BUFP label assignment
+ *
+ * ASCII label strings can be attached to BUFP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref bufp_label_binding
+ * "BUFP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_LABEL		1
+/**
+ * BUFP buffer size configuration
+ *
+ * All messages written to a BUFP socket are buffered in a single
+ * per-socket memory area. Configuring the size of such buffer prior
+ * to binding the socket to a destination port is mandatory.
+ *
+ * It is not allowed to configure a buffer size after the socket was
+ * bound. However, multiple configuration calls are allowed prior to
+ * the binding; the last value set will be used.
+ *
+ * @note: the buffer memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the buffer to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_BUFSZ		2
+/** @} */
+
+/**
+ * @anchor sockopts_socket @name Socket level options
+ * Setting and getting supported standard socket level options.
+ * @{ */
+/**
+ *
+ * @ref IPCPROTO_IDDP and @ref IPCPROTO_BUFP protocols support the
+ * standard SO_SNDTIMEO socket option, from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_SNDTIMEO defined_by_kernel_header_file
+#endif
+/**
+ *
+ * All RTIPC protocols support the standard SO_RCVTIMEO socket option,
+ * from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_RCVTIMEO defined_by_kernel_header_file
+#endif
+/** @} */
+
+/**
+ * @anchor rtdm_ipc_examples @name RTIPC examples
+ * @{ */
+/** @example bufp-readwrite.c */
+/** @example bufp-label.c */
+/** @example iddp-label.c */
+/** @example iddp-sendrecv.c */
+/** @example xddp-echo.c */
+/** @example xddp-label.c */
+/** @example xddp-stream.c */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_IPC_H */
+++ linux-patched/include/xenomai/rtdm/uapi/udd.h	2022-03-21 12:58:32.291860663 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/testing.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @author Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_UDD_H
+#define _RTDM_UAPI_UDD_H
+
+/**
+ * @addtogroup rtdm_udd
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_signotify
+ * @brief UDD event notification descriptor
+ *
+ * This structure shall be used to pass the information required to
+ * enable/disable the notification by signal upon interrupt receipt.
+ *
+ * If PID is zero or negative, the notification is disabled.
+ * Otherwise, the Cobalt thread whose PID is given will receive the
+ * Cobalt signal also mentioned, along with the count of interrupts at
+ * the time of the receipt stored in siginfo.si_int. A Cobalt thread
+ * must explicitly wait for notifications using the sigwaitinfo() or
+ * sigtimedwait() services (no asynchronous mode available).
+ */
+struct udd_signotify {
+	/**
+	 * PID of the Cobalt thread to notify upon interrupt
+	 * receipt. If @a pid is zero or negative, the notification is
+	 * disabled.
+	 */
+	pid_t pid;
+	/**
+	 * Signal number to send to PID for notifying, which must be
+	 * in the range [SIGRTMIN .. SIGRTMAX] inclusive. This value
+	 * is not considered if @a pid is zero or negative.
+	 */
+	int sig;
+};
+
+/**
+ * @anchor udd_ioctl_codes @name UDD_IOCTL
+ * IOCTL requests
+ *
+ * @{
+ */
+
+/**
+ * Enable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core enables the interrupt line in the
+ * interrupt controller before returning to the caller.
+ */
+#define UDD_RTIOC_IRQEN		_IO(RTDM_CLASS_UDD, 0)
+/**
+ * Disable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core disables the interrupt line in
+ * the interrupt controller before returning to the caller.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQEN request for a
+ * custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQDIS	_IO(RTDM_CLASS_UDD, 1)
+/**
+ * Enable/Disable signal notification upon interrupt event. A valid
+ * @ref udd_signotify "notification descriptor" must be passed along
+ * with this request, which is handled by the UDD core directly.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQDIS request for
+ * a custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQSIG	_IOW(RTDM_CLASS_UDD, 2, struct udd_signotify)
+
+/** @} */
+/** @} */
+
+#endif /* !_RTDM_UAPI_UDD_H */
+++ linux-patched/include/xenomai/rtdm/uapi/testing.h	2022-03-21 12:58:32.284860731 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/analogy.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, testing device profile header
+ *
+ * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_TESTING_H
+#define _RTDM_UAPI_TESTING_H
+
+#include <linux/types.h>
+
+#define RTTST_PROFILE_VER		2
+
+typedef struct rttst_bench_res {
+	__s32 avg;
+	__s32 min;
+	__s32 max;
+	__s32 overruns;
+	__s32 test_loops;
+} rttst_bench_res_t;
+
+typedef struct rttst_interm_bench_res {
+	struct rttst_bench_res last;
+	struct rttst_bench_res overall;
+} rttst_interm_bench_res_t;
+
+typedef struct rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	__s32 *histogram_avg;
+	__s32 *histogram_min;
+	__s32 *histogram_max;
+} rttst_overall_bench_res_t;
+
+#define RTTST_TMBENCH_INVALID		-1 /* internal use only */
+#define RTTST_TMBENCH_TASK		0
+#define RTTST_TMBENCH_HANDLER		1
+
+typedef struct rttst_tmbench_config {
+	int mode;
+	int priority;
+	__u64 period;
+	int warmup_loops;
+	int histogram_size;
+	int histogram_bucketsize;
+	int freeze_max;
+} rttst_tmbench_config_t;
+
+struct rttst_swtest_task {
+	unsigned int index;
+	unsigned int flags;
+};
+
+/* Possible values for struct rttst_swtest_task::flags. */
+#define RTTST_SWTEST_FPU		0x1
+#define RTTST_SWTEST_USE_FPU		0x2 /* Only for kernel-space tasks. */
+#define RTTST_SWTEST_FREEZE		0x4 /* Only for kernel-space tasks. */
+
+struct rttst_swtest_dir {
+	unsigned int from;
+	unsigned int to;
+};
+
+struct rttst_swtest_error {
+	struct rttst_swtest_dir last_switch;
+	unsigned int fp_val;
+};
+
+#define RTTST_RTDM_NORMAL_CLOSE		0
+#define RTTST_RTDM_DEFER_CLOSE_CONTEXT	1
+
+#define RTTST_RTDM_MAGIC_PRIMARY	0xfefbfefb
+#define RTTST_RTDM_MAGIC_SECONDARY	0xa5b9a5b9
+
+#define RTTST_HEAPCHECK_ZEROOVRD   1
+#define RTTST_HEAPCHECK_SHUFFLE    2
+#define RTTST_HEAPCHECK_PATTERN    4
+#define RTTST_HEAPCHECK_HOT        8
+
+struct rttst_heap_parms {
+	__u64 heap_size;
+	__u64 block_size;
+	int flags;
+	int nrstats;
+};
+
+struct rttst_heap_stats {
+	__u64 heap_size;
+	__u64 user_size;
+	__u64 block_size;
+	__s64 alloc_avg_ns;
+	__s64 alloc_max_ns;
+	__s64 free_avg_ns;
+	__s64 free_max_ns;
+	__u64 maximum_free;
+	__u64 largest_free;
+	int nrblocks;
+	int flags;
+};
+
+struct rttst_heap_stathdr {
+	int nrstats;
+	struct rttst_heap_stats *buf;
+};
+
+#define RTIOC_TYPE_TESTING		RTDM_CLASS_TESTING
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_TESTING
+ * @{ */
+/** subclass name: "timerbench" */
+#define RTDM_SUBCLASS_TIMERBENCH	0
+/** subclass name: "irqbench" */
+#define RTDM_SUBCLASS_IRQBENCH		1
+/** subclass name: "switchtest" */
+#define RTDM_SUBCLASS_SWITCHTEST	2
+/** subclase name: "rtdm" */
+#define RTDM_SUBCLASS_RTDMTEST		3
+/** subclase name: "heapcheck" */
+#define RTDM_SUBCLASS_HEAPCHECK		4
+/** @} */
+
+/*!
+ * @anchor TSTIOCTLs @name IOCTLs
+ * Testing device IOCTLs
+ * @{ */
+#define RTTST_RTIOC_INTERM_BENCH_RES \
+	_IOWR(RTIOC_TYPE_TESTING, 0x00, struct rttst_interm_bench_res)
+
+#define RTTST_RTIOC_TMBENCH_START \
+	_IOW(RTIOC_TYPE_TESTING, 0x10, struct rttst_tmbench_config)
+
+#define RTTST_RTIOC_TMBENCH_STOP \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct rttst_overall_bench_res)
+
+#define RTTST_RTIOC_SWTEST_SET_TASKS_COUNT \
+	_IOW(RTIOC_TYPE_TESTING, 0x30, __u32)
+
+#define RTTST_RTIOC_SWTEST_SET_CPU \
+	_IOW(RTIOC_TYPE_TESTING, 0x31, __u32)
+
+#define RTTST_RTIOC_SWTEST_REGISTER_UTASK \
+	_IOW(RTIOC_TYPE_TESTING, 0x32, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_CREATE_KTASK \
+	_IOWR(RTIOC_TYPE_TESTING, 0x33, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_PEND \
+	_IOR(RTIOC_TYPE_TESTING, 0x34, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_SWITCH_TO \
+	_IOR(RTIOC_TYPE_TESTING, 0x35, struct rttst_swtest_dir)
+
+#define RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT \
+	_IOR(RTIOC_TYPE_TESTING, 0x36, __u32)
+
+#define RTTST_RTIOC_SWTEST_GET_LAST_ERROR \
+	_IOR(RTIOC_TYPE_TESTING, 0x37, struct rttst_swtest_error)
+
+#define RTTST_RTIOC_SWTEST_SET_PAUSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x38, __u32)
+
+#define RTTST_RTIOC_RTDM_DEFER_CLOSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x40, __u32)
+
+#define RTTST_RTIOC_RTDM_ACTOR_GET_CPU \
+	_IOR(RTIOC_TYPE_TESTING, 0x41, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_PRIMARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x42, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_SECONDARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x43, __u32)
+
+#define RTTST_RTIOC_HEAP_CHECK \
+	_IOR(RTIOC_TYPE_TESTING, 0x44, struct rttst_heap_parms)
+
+#define RTTST_RTIOC_HEAP_STAT_COLLECT \
+	_IOR(RTIOC_TYPE_TESTING, 0x45, int)
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/uapi/analogy.h	2022-03-21 12:58:32.276860809 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/gpio.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, UAPI bits
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_ANALOGY_H
+#define _RTDM_UAPI_ANALOGY_H
+
+/* --- Misc precompilation constant --- */
+#define A4L_NAMELEN 20
+
+#define A4L_INFINITE 0
+#define A4L_NONBLOCK (-1)
+
+/* --- Common Analogy types --- */
+
+typedef unsigned short sampl_t;
+typedef unsigned long lsampl_t;
+
+/* MMAP ioctl argument structure */
+struct a4l_mmap_arg {
+	unsigned int idx_subd;
+	unsigned long size;
+	void *ptr;
+};
+typedef struct a4l_mmap_arg a4l_mmap_t;
+
+/* Constants related with buffer size
+   (might be used with BUFCFG ioctl) */
+#define A4L_BUF_MAXSIZE 0x1000000
+#define A4L_BUF_DEFSIZE 0x10000
+#define A4L_BUF_DEFMAGIC 0xffaaff55
+
+/* BUFCFG ioctl argument structure */
+struct a4l_buffer_config {
+	/* NOTE: with the last buffer implementation, the field
+	   idx_subd became useless; the buffer are now
+	   per-context. So, the buffer size configuration is specific
+	   to an opened device. There is a little exception: we can
+	   define a default buffer size for a device.
+	   So far, a hack is used to implement the configuration of
+	   the default buffer size */
+	unsigned int idx_subd;
+	unsigned long buf_size;
+};
+typedef struct a4l_buffer_config a4l_bufcfg_t;
+
+/* BUFINFO ioctl argument structure */
+struct a4l_buffer_info {
+	unsigned int idx_subd;
+	unsigned long buf_size;
+	unsigned long rw_count;
+};
+typedef struct a4l_buffer_info a4l_bufinfo_t;
+
+/* BUFCFG2 / BUFINFO2 ioctl argument structure */
+struct a4l_buffer_config2 {
+	unsigned long wake_count;
+	unsigned long reserved[3];
+};
+typedef struct a4l_buffer_config2 a4l_bufcfg2_t;
+
+/* POLL ioctl argument structure */
+struct a4l_poll {
+	unsigned int idx_subd;
+	unsigned long arg;
+};
+typedef struct a4l_poll a4l_poll_t;
+
+/* DEVCFG ioctl argument structure */
+struct a4l_link_desc {
+	unsigned char bname_size;
+	char *bname;
+	unsigned int opts_size;
+	void *opts;
+};
+typedef struct a4l_link_desc a4l_lnkdesc_t;
+
+/* DEVINFO ioctl argument structure */
+struct a4l_dev_info {
+	char board_name[A4L_NAMELEN];
+	char driver_name[A4L_NAMELEN];
+	int nb_subd;
+	int idx_read_subd;
+	int idx_write_subd;
+};
+typedef struct a4l_dev_info a4l_dvinfo_t;
+
+#define CIO 'd'
+#define A4L_DEVCFG _IOW(CIO,0,a4l_lnkdesc_t)
+#define A4L_DEVINFO _IOR(CIO,1,a4l_dvinfo_t)
+#define A4L_SUBDINFO _IOR(CIO,2,a4l_sbinfo_t)
+#define A4L_CHANINFO _IOR(CIO,3,a4l_chinfo_arg_t)
+#define A4L_RNGINFO _IOR(CIO,4,a4l_rnginfo_arg_t)
+#define A4L_CMD _IOWR(CIO,5,a4l_cmd_t)
+#define A4L_CANCEL _IOR(CIO,6,unsigned int)
+#define A4L_INSNLIST _IOR(CIO,7,unsigned int)
+#define A4L_INSN _IOR(CIO,8,unsigned int)
+#define A4L_BUFCFG _IOR(CIO,9,a4l_bufcfg_t)
+#define A4L_BUFINFO _IOWR(CIO,10,a4l_bufinfo_t)
+#define A4L_POLL _IOR(CIO,11,unsigned int)
+#define A4L_MMAP _IOWR(CIO,12,unsigned int)
+#define A4L_NBCHANINFO _IOR(CIO,13,a4l_chinfo_arg_t)
+#define A4L_NBRNGINFO _IOR(CIO,14,a4l_rnginfo_arg_t)
+
+/* These IOCTLs are bound to be merged with A4L_BUFCFG and A4L_BUFINFO
+   at the next major release */
+#define A4L_BUFCFG2 _IOR(CIO,15,a4l_bufcfg_t)
+#define A4L_BUFINFO2 _IOWR(CIO,16,a4l_bufcfg_t)
+
+/*!
+ * @addtogroup analogy_lib_async1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_CMD_xxx @name ANALOGY_CMD_xxx
+ * @brief Common command flags definitions
+ * @{
+ */
+
+/**
+ * Do not execute the command, just check it
+ */
+#define A4L_CMD_SIMUL 0x1
+/**
+ * Perform data recovery / transmission in bulk mode
+ */
+#define A4L_CMD_BULK 0x2
+/**
+ * Perform a command which will write data to the device
+ */
+#define A4L_CMD_WRITE 0x4
+
+	  /*! @} ANALOGY_CMD_xxx */
+
+/*!
+ * @anchor TRIG_xxx @name TRIG_xxx
+ * @brief Command triggers flags definitions
+ * @{
+ */
+
+/**
+ * Never trigger
+ */
+#define TRIG_NONE	0x00000001
+/**
+ * Trigger now + N ns
+ */
+#define TRIG_NOW	0x00000002
+/**
+ * Trigger on next lower level trig
+ */
+#define TRIG_FOLLOW	0x00000004
+/**
+ * Trigger at time N ns
+ */
+#define TRIG_TIME	0x00000008
+/**
+ * Trigger at rate N ns
+ */
+#define TRIG_TIMER	0x00000010
+/**
+ * Trigger when count reaches N
+ */
+#define TRIG_COUNT	0x00000020
+/**
+ * Trigger on external signal N
+ */
+#define TRIG_EXT	0x00000040
+/**
+ * Trigger on analogy-internal signal N
+ */
+#define TRIG_INT	0x00000080
+/**
+ * Driver defined trigger
+ */
+#define TRIG_OTHER	0x00000100
+/**
+ * Wake up on end-of-scan
+ */
+#define TRIG_WAKE_EOS	0x0020
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_MASK 0x00030000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_NEAREST 0x00000000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_DOWN 0x00010000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP 0x00020000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP_NEXT 0x00030000
+
+	  /*! @} TRIG_xxx */
+
+/*!
+ * @anchor CHAN_RNG_AREF @name Channel macros
+ * @brief Specific precompilation macros and constants useful for the
+ * channels descriptors tab located in the command structure
+ * @{
+ */
+
+/**
+ * Channel indication macro
+ */
+#define CHAN(a) ((a) & 0xffff)
+/**
+ * Range definition macro
+ */
+#define RNG(a) (((a) & 0xff) << 16)
+/**
+ * Reference definition macro
+ */
+#define AREF(a) (((a) & 0x03) << 24)
+/**
+ * Flags definition macro
+ */
+#define FLAGS(a) ((a) & CR_FLAGS_MASK)
+/**
+ * Channel + range + reference definition macro
+ */
+#define PACK(a, b, c) (a | RNG(b) | AREF(c))
+/**
+ * Channel + range + reference + flags definition macro
+ */
+#define PACK_FLAGS(a, b, c, d) (PACK(a, b, c) | FLAGS(d))
+
+/**
+ * Analog reference is analog ground
+ */
+#define AREF_GROUND 0x00
+/**
+ * Analog reference is analog common
+ */
+#define AREF_COMMON 0x01
+/**
+ * Analog reference is differential
+ */
+#define AREF_DIFF 0x02
+/**
+ * Analog reference is undefined
+ */
+#define AREF_OTHER 0x03
+
+	  /*! @} CHAN_RNG_AREF */
+
+#if !defined(DOXYGEN_CPP)
+
+#define CR_FLAGS_MASK 0xfc000000
+#define CR_ALT_FILTER (1<<26)
+#define CR_DITHER CR_ALT_FILTER
+#define CR_DEGLITCH CR_ALT_FILTER
+#define CR_ALT_SOURCE (1<<27)
+#define CR_EDGE	(1<<30)
+#define CR_INVERT (1<<31)
+
+#endif /* !DOXYGEN_CPP */
+
+/*!
+ * @brief Structure describing the asynchronous instruction
+ * @see a4l_snd_command()
+ */
+
+struct a4l_cmd_desc {
+	unsigned char idx_subd;
+			       /**< Subdevice to which the command will be applied. */
+
+	unsigned long flags;
+			       /**< Command flags */
+
+	/* Command trigger characteristics */
+	unsigned int start_src;
+			       /**< Start trigger type */
+	unsigned int start_arg;
+			       /**< Start trigger argument */
+	unsigned int scan_begin_src;
+			       /**< Scan begin trigger type */
+	unsigned int scan_begin_arg;
+			       /**< Scan begin trigger argument */
+	unsigned int convert_src;
+			       /**< Convert trigger type */
+	unsigned int convert_arg;
+			       /**< Convert trigger argument */
+	unsigned int scan_end_src;
+			       /**< Scan end trigger type */
+	unsigned int scan_end_arg;
+			       /**< Scan end trigger argument */
+	unsigned int stop_src;
+			       /**< Stop trigger type */
+	unsigned int stop_arg;
+			   /**< Stop trigger argument */
+
+	unsigned char nb_chan;
+			   /**< Count of channels related with the command */
+	unsigned int *chan_descs;
+			    /**< Tab containing channels descriptors */
+
+	/* Driver specific fields */
+	unsigned int valid_simul_stages;
+			   /** < cmd simulation valid stages (driver dependent) */
+
+	unsigned int data_len;
+			   /**< Driver specific buffer size */
+	sampl_t *data;
+	                   /**< Driver specific buffer pointer */
+};
+typedef struct a4l_cmd_desc a4l_cmd_t;
+
+/*! @} analogy_lib_async1 */
+
+/* --- Range section --- */
+
+/** Constant for internal use only (must not be used by driver
+    developer).  */
+#define A4L_RNG_FACTOR 1000000
+
+/**
+ * Volt unit range flag
+ */
+#define A4L_RNG_VOLT_UNIT 0x0
+/**
+ * MilliAmpere unit range flag
+ */
+#define A4L_RNG_MAMP_UNIT 0x1
+/**
+ * No unit range flag
+ */
+#define A4L_RNG_NO_UNIT 0x2
+/**
+ * External unit range flag
+ */
+#define A4L_RNG_EXT_UNIT 0x4
+
+/**
+ * Macro to retrieve the range unit from the range flags
+ */
+#define A4L_RNG_UNIT(x) (x & (A4L_RNG_VOLT_UNIT |	\
+			      A4L_RNG_MAMP_UNIT |	\
+			      A4L_RNG_NO_UNIT |		\
+			      A4L_RNG_EXT_UNIT))
+
+/* --- Subdevice flags desc stuff --- */
+
+/* TODO: replace ANALOGY_SUBD_AI with ANALOGY_SUBD_ANALOG
+   and ANALOGY_SUBD_INPUT */
+
+/* Subdevice types masks */
+#define A4L_SUBD_MASK_READ 0x80000000
+#define A4L_SUBD_MASK_WRITE 0x40000000
+#define A4L_SUBD_MASK_SPECIAL 0x20000000
+
+/*!
+ * @addtogroup analogy_subdevice
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_SUBD_xxx @name Subdevices types
+ * @brief Flags to define the subdevice type
+ * @{
+ */
+
+/**
+ * Unused subdevice
+ */
+#define A4L_SUBD_UNUSED (A4L_SUBD_MASK_SPECIAL|0x1)
+/**
+ * Analog input subdevice
+ */
+#define A4L_SUBD_AI (A4L_SUBD_MASK_READ|0x2)
+/**
+ * Analog output subdevice
+ */
+#define A4L_SUBD_AO (A4L_SUBD_MASK_WRITE|0x4)
+/**
+ * Digital input subdevice
+ */
+#define A4L_SUBD_DI (A4L_SUBD_MASK_READ|0x8)
+/**
+ * Digital output subdevice
+ */
+#define A4L_SUBD_DO (A4L_SUBD_MASK_WRITE|0x10)
+/**
+ * Digital input/output subdevice
+ */
+#define A4L_SUBD_DIO (A4L_SUBD_MASK_SPECIAL|0x20)
+/**
+ * Counter subdevice
+ */
+#define A4L_SUBD_COUNTER (A4L_SUBD_MASK_SPECIAL|0x40)
+/**
+ * Timer subdevice
+ */
+#define A4L_SUBD_TIMER (A4L_SUBD_MASK_SPECIAL|0x80)
+/**
+ * Memory, EEPROM, DPRAM
+ */
+#define A4L_SUBD_MEMORY (A4L_SUBD_MASK_SPECIAL|0x100)
+/**
+ * Calibration subdevice  DACs
+ */
+#define A4L_SUBD_CALIB (A4L_SUBD_MASK_SPECIAL|0x200)
+/**
+ * Processor, DSP
+ */
+#define A4L_SUBD_PROC (A4L_SUBD_MASK_SPECIAL|0x400)
+/**
+ * Serial IO subdevice
+ */
+#define A4L_SUBD_SERIAL (A4L_SUBD_MASK_SPECIAL|0x800)
+/**
+ * Mask which gathers all the types
+ */
+#define A4L_SUBD_TYPES (A4L_SUBD_UNUSED |	 \
+			   A4L_SUBD_AI |	 \
+			   A4L_SUBD_AO |	 \
+			   A4L_SUBD_DI |	 \
+			   A4L_SUBD_DO |	 \
+			   A4L_SUBD_DIO |	 \
+			   A4L_SUBD_COUNTER | \
+			   A4L_SUBD_TIMER |	 \
+			   A4L_SUBD_MEMORY |	 \
+			   A4L_SUBD_CALIB |	 \
+			   A4L_SUBD_PROC |	 \
+			   A4L_SUBD_SERIAL)
+
+/*! @} ANALOGY_SUBD_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_FT_xxx @name Subdevice features
+ * @brief Flags to define the subdevice's capabilities
+ * @{
+ */
+
+/* Subdevice capabilities */
+/**
+ * The subdevice can handle command (i.e it can perform asynchronous
+ * acquisition)
+ */
+#define A4L_SUBD_CMD 0x1000
+/**
+ * The subdevice support mmap operations (technically, any driver can
+ * do it; however, the developer might want that his driver must be
+ * accessed through read / write
+ */
+#define A4L_SUBD_MMAP 0x8000
+
+/*! @} ANALOGY_SUBD_FT_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_ST_xxx @name Subdevice status
+ * @brief Flags to define the subdevice's status
+ * @{
+ */
+
+/* Subdevice status flag(s) */
+/**
+ * The subdevice is busy, a synchronous or an asynchronous acquisition
+ * is occuring
+ */
+#define A4L_SUBD_BUSY_NR 0
+#define A4L_SUBD_BUSY (1 << A4L_SUBD_BUSY_NR)
+
+/**
+ * The subdevice is about to be cleaned in the middle of the detach
+ * procedure
+ */
+#define A4L_SUBD_CLEAN_NR 1
+#define A4L_SUBD_CLEAN (1 << A4L_SUBD_CLEAN_NR)
+
+
+/*! @} ANALOGY_SUBD_ST_xxx */
+
+/* --- Subdevice related IOCTL arguments structures --- */
+
+/* SUDBINFO IOCTL argument */
+struct a4l_subd_info {
+	unsigned long flags;
+	unsigned long status;
+	unsigned char nb_chan;
+};
+typedef struct a4l_subd_info a4l_sbinfo_t;
+
+/* CHANINFO / NBCHANINFO IOCTL arguments */
+struct a4l_chan_info {
+	unsigned long chan_flags;
+	unsigned char nb_rng;
+	unsigned char nb_bits;
+};
+typedef struct a4l_chan_info a4l_chinfo_t;
+
+struct a4l_chinfo_arg {
+	unsigned int idx_subd;
+	void *info;
+};
+typedef struct a4l_chinfo_arg a4l_chinfo_arg_t;
+
+/* RNGINFO / NBRNGINFO IOCTL arguments */
+struct a4l_rng_info {
+	long min;
+	long max;
+	unsigned long flags;
+};
+typedef struct a4l_rng_info a4l_rnginfo_t;
+
+struct a4l_rng_info_arg {
+	unsigned int idx_subd;
+	unsigned int idx_chan;
+	void *info;
+};
+typedef struct a4l_rng_info_arg a4l_rnginfo_arg_t;
+
+/*! @} */
+
+#define A4L_INSN_MASK_READ 0x8000000
+#define A4L_INSN_MASK_WRITE 0x4000000
+#define A4L_INSN_MASK_SPECIAL 0x2000000
+
+/*!
+ * @addtogroup analogy_lib_sync1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_INSN_xxx @name Instruction type
+ * @brief Flags to define the type of instruction
+ * @{
+ */
+
+/**
+ * Read instruction
+ */
+#define A4L_INSN_READ (0 | A4L_INSN_MASK_READ)
+/**
+ * Write instruction
+ */
+#define A4L_INSN_WRITE (1 | A4L_INSN_MASK_WRITE)
+/**
+ * "Bits" instruction
+ */
+#define A4L_INSN_BITS (2 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_WRITE)
+/**
+ * Configuration instruction
+ */
+#define A4L_INSN_CONFIG (3 | A4L_INSN_MASK_READ | \
+			 A4L_INSN_MASK_WRITE)
+/**
+ * Get time instruction
+ */
+#define A4L_INSN_GTOD (4 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Wait instruction
+ */
+#define A4L_INSN_WAIT (5 | A4L_INSN_MASK_WRITE | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Trigger instruction (to start asynchronous acquisition)
+ */
+#define A4L_INSN_INTTRIG (6 | A4L_INSN_MASK_WRITE | \
+			  A4L_INSN_MASK_SPECIAL)
+
+	  /*! @} ANALOGY_INSN_xxx */
+
+/**
+ * Maximal wait duration
+ */
+#define A4L_INSN_WAIT_MAX 100000
+
+/*!
+ * @anchor INSN_CONFIG_xxx @name Configuration instruction type
+ * @brief Values to define the type of configuration instruction
+ * @{
+ */
+
+#define A4L_INSN_CONFIG_DIO_INPUT		0
+#define A4L_INSN_CONFIG_DIO_OUTPUT		1
+#define A4L_INSN_CONFIG_DIO_OPENDRAIN		2
+#define A4L_INSN_CONFIG_ANALOG_TRIG		16
+#define A4L_INSN_CONFIG_ALT_SOURCE		20
+#define A4L_INSN_CONFIG_DIGITAL_TRIG		21
+#define A4L_INSN_CONFIG_BLOCK_SIZE		22
+#define A4L_INSN_CONFIG_TIMER_1			23
+#define A4L_INSN_CONFIG_FILTER			24
+#define A4L_INSN_CONFIG_CHANGE_NOTIFY		25
+#define A4L_INSN_CONFIG_SERIAL_CLOCK		26
+#define A4L_INSN_CONFIG_BIDIRECTIONAL_DATA	27
+#define A4L_INSN_CONFIG_DIO_QUERY		28
+#define A4L_INSN_CONFIG_PWM_OUTPUT		29
+#define A4L_INSN_CONFIG_GET_PWM_OUTPUT		30
+#define A4L_INSN_CONFIG_ARM			31
+#define A4L_INSN_CONFIG_DISARM			32
+#define A4L_INSN_CONFIG_GET_COUNTER_STATUS	33
+#define A4L_INSN_CONFIG_RESET			34
+#define A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR	1001	/* Use CTR as single pulsegenerator */
+#define A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR	1002	/* Use CTR as pulsetraingenerator */
+#define A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER	1003	/* Use the counter as encoder */
+#define A4L_INSN_CONFIG_SET_GATE_SRC		2001	/* Set gate source */
+#define A4L_INSN_CONFIG_GET_GATE_SRC		2002	/* Get gate source */
+#define A4L_INSN_CONFIG_SET_CLOCK_SRC		2003	/* Set master clock source */
+#define A4L_INSN_CONFIG_GET_CLOCK_SRC		2004	/* Get master clock source */
+#define A4L_INSN_CONFIG_SET_OTHER_SRC		2005	/* Set other source */
+#define A4L_INSN_CONFIG_SET_COUNTER_MODE	4097
+#define A4L_INSN_CONFIG_SET_ROUTING		4099
+#define A4L_INSN_CONFIG_GET_ROUTING		4109
+
+/*! @} INSN_CONFIG_xxx */
+
+/*!
+ * @anchor ANALOGY_COUNTER_xxx @name Counter status bits
+ * @brief Status bits for INSN_CONFIG_GET_COUNTER_STATUS
+ * @{
+ */
+
+#define A4L_COUNTER_ARMED		0x1
+#define A4L_COUNTER_COUNTING		0x2
+#define A4L_COUNTER_TERMINAL_COUNT	0x4
+
+	  /*! @} ANALOGY_COUNTER_xxx */
+
+/*!
+ * @anchor ANALOGY_IO_DIRECTION @name IO direction
+ * @brief Values to define the IO polarity
+ * @{
+ */
+
+#define A4L_INPUT	0
+#define A4L_OUTPUT	1
+#define A4L_OPENDRAIN	2
+
+	  /*! @} ANALOGY_IO_DIRECTION */
+
+
+/*!
+ * @anchor ANALOGY_EV_xxx @name Events types
+ * @brief Values to define the Analogy events. They might used to send
+ * some specific events through the instruction interface.
+ * @{
+ */
+
+#define A4L_EV_START		0x00040000
+#define A4L_EV_SCAN_BEGIN	0x00080000
+#define A4L_EV_CONVERT		0x00100000
+#define A4L_EV_SCAN_END		0x00200000
+#define A4L_EV_STOP		0x00400000
+
+/*! @} ANALOGY_EV_xxx */
+
+/*!
+ * @brief Structure describing the synchronous instruction
+ * @see a4l_snd_insn()
+ */
+
+struct a4l_instruction {
+	unsigned int type;
+		       /**< Instruction type */
+	unsigned int idx_subd;
+			   /**< Subdevice to which the instruction will be applied. */
+	unsigned int chan_desc;
+			    /**< Channel descriptor */
+	unsigned int data_size;
+			    /**< Size of the intruction data */
+	void *data;
+		    /**< Instruction data */
+};
+typedef struct a4l_instruction a4l_insn_t;
+
+/*!
+ * @brief Structure describing the list of synchronous instructions
+ * @see a4l_snd_insnlist()
+ */
+
+struct a4l_instruction_list {
+	unsigned int count;
+			/**< Instructions count */
+	a4l_insn_t *insns;
+			  /**< Tab containing the instructions pointers */
+};
+typedef struct a4l_instruction_list a4l_insnlst_t;
+
+/*! @} analogy_lib_sync1 */
+
+struct a4l_calibration_subdev {
+	a4l_sbinfo_t *info;
+	char *name;
+	int slen;
+	int idx;
+};
+
+struct a4l_calibration_subdev_data {
+	int index;
+	int channel;
+	int range;
+	int expansion;
+	int nb_coeff;
+	double *coeff;
+
+};
+
+struct a4l_calibration_data {
+	char *driver_name;
+	char *board_name;
+	int nb_ai;
+	struct a4l_calibration_subdev_data *ai;
+	int nb_ao;
+	struct a4l_calibration_subdev_data *ao;
+};
+
+struct a4l_polynomial {
+	int expansion;
+	int order;
+	int nb_coeff;
+	double *coeff;
+};
+
+
+#endif /* _RTDM_UAPI_ANALOGY_H */
+++ linux-patched/include/xenomai/rtdm/uapi/gpio.h	2022-03-21 12:58:32.269860877 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/serial.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_GPIO_H
+#define _RTDM_UAPI_GPIO_H
+
+struct rtdm_gpio_readout {
+	nanosecs_abs_t timestamp;
+	__s32 value;
+};
+
+#define GPIO_RTIOC_DIR_OUT	_IOW(RTDM_CLASS_GPIO, 0, int)
+#define GPIO_RTIOC_DIR_IN	_IO(RTDM_CLASS_GPIO, 1)
+#define GPIO_RTIOC_IRQEN	_IOW(RTDM_CLASS_GPIO, 2, int) /* GPIO trigger */
+#define GPIO_RTIOC_IRQDIS	_IO(RTDM_CLASS_GPIO, 3)
+#define GPIO_RTIOC_REQS		_IO(RTDM_CLASS_GPIO, 4)
+#define GPIO_RTIOC_RELS		_IO(RTDM_CLASS_GPIO, 5)
+#define GPIO_RTIOC_TS_MONO	_IOR(RTDM_CLASS_GPIO, 7, int)
+#define GPIO_RTIOC_TS_REAL	_IOR(RTDM_CLASS_GPIO, 8, int)
+#define GPIO_RTIOC_TS		GPIO_RTIOC_TS_REAL
+
+#define GPIO_TRIGGER_NONE		0x0 /* unspecified */
+#define GPIO_TRIGGER_EDGE_RISING	0x1
+#define GPIO_TRIGGER_EDGE_FALLING	0x2
+#define GPIO_TRIGGER_LEVEL_HIGH		0x4
+#define GPIO_TRIGGER_LEVEL_LOW		0x8
+#define GPIO_TRIGGER_MASK		0xf
+
+#endif /* !_RTDM_UAPI_GPIO_H */
+++ linux-patched/include/xenomai/rtdm/uapi/serial.h	2022-03-21 12:58:32.262860946 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, serial device profile header
+ *
+ * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rtserial
+ */
+#ifndef _RTDM_UAPI_SERIAL_H
+#define _RTDM_UAPI_SERIAL_H
+
+#define RTSER_PROFILE_VER		3
+
+/*!
+ * @anchor RTSER_DEF_BAUD   @name RTSER_DEF_BAUD
+ * Default baud rate
+ * @{ */
+#define RTSER_DEF_BAUD			9600
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_PARITY   @name RTSER_xxx_PARITY
+ * Number of parity bits
+ * @{ */
+#define RTSER_NO_PARITY			0x00
+#define RTSER_ODD_PARITY		0x01
+#define RTSER_EVEN_PARITY		0x03
+#define RTSER_DEF_PARITY		RTSER_NO_PARITY
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_BITS   @name RTSER_xxx_BITS
+ * Number of data bits
+ * @{ */
+#define RTSER_5_BITS			0x00
+#define RTSER_6_BITS			0x01
+#define RTSER_7_BITS			0x02
+#define RTSER_8_BITS			0x03
+#define RTSER_DEF_BITS			RTSER_8_BITS
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_STOPB   @name RTSER_xxx_STOPB
+ * Number of stop bits
+ * @{ */
+#define RTSER_1_STOPB			0x00
+/** valid only in combination with 5 data bits */
+#define RTSER_1_5_STOPB			0x01
+#define RTSER_2_STOPB			0x01
+#define RTSER_DEF_STOPB			RTSER_1_STOPB
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_HAND   @name RTSER_xxx_HAND
+ * Handshake mechanisms
+ * @{ */
+#define RTSER_NO_HAND			0x00
+#define RTSER_RTSCTS_HAND		0x01
+#define RTSER_DEF_HAND			RTSER_NO_HAND
+/** @} */
+
+/*!
+ * @anchor RTSER_RS485_xxx   @name RTSER_RS485_xxx
+ * RS485 mode with automatic RTS handling
+ * @{ */
+#define RTSER_RS485_DISABLE		0x00
+#define RTSER_RS485_ENABLE		0x01
+#define RTSER_DEF_RS485			RTSER_RS485_DISABLE
+/** @} */
+
+/*!
+ * @anchor RTSER_FIFO_xxx   @name RTSER_FIFO_xxx
+ * Reception FIFO interrupt threshold
+ * @{ */
+#define RTSER_FIFO_DEPTH_1		0x00
+#define RTSER_FIFO_DEPTH_4		0x40
+#define RTSER_FIFO_DEPTH_8		0x80
+#define RTSER_FIFO_DEPTH_14		0xC0
+#define RTSER_DEF_FIFO_DEPTH		RTSER_FIFO_DEPTH_1
+/** @} */
+
+/*!
+ * @anchor RTSER_TIMEOUT_xxx   @name RTSER_TIMEOUT_xxx
+ * Special timeout values, see also @ref RTDM_TIMEOUT_xxx
+ * @{ */
+#define RTSER_TIMEOUT_INFINITE		RTDM_TIMEOUT_INFINITE
+#define RTSER_TIMEOUT_NONE		RTDM_TIMEOUT_NONE
+#define RTSER_DEF_TIMEOUT		RTDM_TIMEOUT_INFINITE
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_TIMESTAMP_HISTORY   @name RTSER_xxx_TIMESTAMP_HISTORY
+ * Timestamp history control
+ * @{ */
+#define RTSER_RX_TIMESTAMP_HISTORY	0x01
+#define RTSER_DEF_TIMESTAMP_HISTORY	0x00
+/** @} */
+
+/*!
+ * @anchor RTSER_EVENT_xxx   @name RTSER_EVENT_xxx
+ * Events bits
+ * @{ */
+#define RTSER_EVENT_RXPEND		0x01
+#define RTSER_EVENT_ERRPEND		0x02
+#define RTSER_EVENT_MODEMHI		0x04
+#define RTSER_EVENT_MODEMLO		0x08
+#define RTSER_EVENT_TXEMPTY		0x10
+#define RTSER_DEF_EVENT_MASK		0x00
+/** @} */
+
+
+/*!
+ * @anchor RTSER_SET_xxx   @name RTSER_SET_xxx
+ * Configuration mask bits
+ * @{ */
+#define RTSER_SET_BAUD			0x0001
+#define RTSER_SET_PARITY		0x0002
+#define RTSER_SET_DATA_BITS		0x0004
+#define RTSER_SET_STOP_BITS		0x0008
+#define RTSER_SET_HANDSHAKE		0x0010
+#define RTSER_SET_FIFO_DEPTH		0x0020
+#define RTSER_SET_TIMEOUT_RX		0x0100
+#define RTSER_SET_TIMEOUT_TX		0x0200
+#define RTSER_SET_TIMEOUT_EVENT		0x0400
+#define RTSER_SET_TIMESTAMP_HISTORY	0x0800
+#define RTSER_SET_EVENT_MASK		0x1000
+#define RTSER_SET_RS485			0x2000
+/** @} */
+
+
+/*!
+ * @anchor RTSER_LSR_xxx   @name RTSER_LSR_xxx
+ * Line status bits
+ * @{ */
+#define RTSER_LSR_DATA			0x01
+#define RTSER_LSR_OVERRUN_ERR		0x02
+#define RTSER_LSR_PARITY_ERR		0x04
+#define RTSER_LSR_FRAMING_ERR		0x08
+#define RTSER_LSR_BREAK_IND		0x10
+#define RTSER_LSR_THR_EMTPY		0x20
+#define RTSER_LSR_TRANSM_EMPTY		0x40
+#define RTSER_LSR_FIFO_ERR		0x80
+#define RTSER_SOFT_OVERRUN_ERR		0x0100
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MSR_xxx   @name RTSER_MSR_xxx
+ * Modem status bits
+ * @{ */
+#define RTSER_MSR_DCTS			0x01
+#define RTSER_MSR_DDSR			0x02
+#define RTSER_MSR_TERI			0x04
+#define RTSER_MSR_DDCD			0x08
+#define RTSER_MSR_CTS			0x10
+#define RTSER_MSR_DSR			0x20
+#define RTSER_MSR_RI			0x40
+#define RTSER_MSR_DCD			0x80
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MCR_xxx   @name RTSER_MCR_xxx
+ * Modem control bits
+ * @{ */
+#define RTSER_MCR_DTR			0x01
+#define RTSER_MCR_RTS			0x02
+#define RTSER_MCR_OUT1			0x04
+#define RTSER_MCR_OUT2			0x08
+#define RTSER_MCR_LOOP			0x10
+/** @} */
+
+
+/*!
+ * @anchor RTSER_BREAK_xxx   @name RTSER_BREAK_xxx
+ * Break control
+ * @{ */
+#define RTSER_BREAK_CLR			0x00
+#define RTSER_BREAK_SET			0x01
+
+
+/**
+ * Serial device configuration
+ */
+typedef struct rtser_config {
+	/** mask specifying valid fields, see @ref RTSER_SET_xxx */
+	int		config_mask;
+
+	/** baud rate, default @ref RTSER_DEF_BAUD */
+	int		baud_rate;
+
+	/** number of parity bits, see @ref RTSER_xxx_PARITY */
+	int		parity;
+
+	/** number of data bits, see @ref RTSER_xxx_BITS */
+	int		data_bits;
+
+	/** number of stop bits, see @ref RTSER_xxx_STOPB */
+	int		stop_bits;
+
+	/** handshake mechanisms, see @ref RTSER_xxx_HAND */
+	int		handshake;
+
+	/** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */
+	int		fifo_depth;
+
+	int		reserved;
+
+	/** reception timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	rx_timeout;
+
+	/** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	tx_timeout;
+
+	/** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */
+	nanosecs_rel_t	event_timeout;
+
+	/** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */
+	int		timestamp_history;
+
+	/** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see
+	 *  @ref RTSER_EVENT_xxx */
+	int		event_mask;
+
+	/** enable RS485 mode, see @ref RTSER_RS485_xxx */
+	int		rs485;
+} rtser_config_t;
+
+/**
+ * Serial device status
+ */
+typedef struct rtser_status {
+	/** line status register, see @ref RTSER_LSR_xxx */
+	int		line_status;
+
+	/** modem status register, see @ref RTSER_MSR_xxx */
+	int		modem_status;
+} rtser_status_t;
+
+/**
+ * Additional information about serial device events
+ */
+typedef struct rtser_event {
+	/** signalled events, see @ref RTSER_EVENT_xxx */
+	int		events;
+
+	/** number of pending input characters */
+	int		rx_pending;
+
+	/** last interrupt timestamp */
+	nanosecs_abs_t	last_timestamp;
+
+	/** reception timestamp of oldest character in input queue */
+	nanosecs_abs_t	rxpend_timestamp;
+} rtser_event_t;
+
+
+#define RTIOC_TYPE_SERIAL		RTDM_CLASS_SERIAL
+
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_SERIAL
+ * @{ */
+#define RTDM_SUBCLASS_16550A		0
+/** @} */
+
+
+/*!
+ * @anchor SERIOCTLs @name IOCTLs
+ * Serial device IOCTLs
+ * @{ */
+
+/**
+ * Get serial device configuration
+ *
+ * @param[out] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONFIG	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config)
+
+/**
+ * Set serial device configuration
+ *
+ * @param[in] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EPERM is returned if the caller's context is invalid, see note below.
+ *
+ * - -ENOMEM is returned if a new history buffer for timestamps cannot be
+ * allocated.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note If rtser_config contains a valid timestamp_history and the
+ * addressed device has been opened in non-real-time context, this IOCTL must
+ * be issued in non-real-time context as well. Otherwise, this command will
+ * fail.
+ */
+#define RTSER_RTIOC_SET_CONFIG	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config)
+
+/**
+ * Get serial device status
+ *
+ * @param[out] arg Pointer to status buffer (struct rtser_status)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR,
+ * @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have
+ * occured during previous read accesses to the device will be saved for being
+ * reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the
+ * saved state will be cleared.
+ */
+#define RTSER_RTIOC_GET_STATUS	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status)
+
+/**
+ * Get serial device's modem contol register
+ *
+ * @param[out] arg Pointer to variable receiving the content (int, see
+ *             @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONTROL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x03, int)
+
+/**
+ * Set serial device's modem contol register
+ *
+ * @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_SET_CONTROL	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x04, int)
+
+/**
+ * Wait on serial device events according to previously set mask
+ *
+ * @param[out] arg Pointer to event information buffer (struct rtser_event)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if another task is already waiting on events of this
+ * device.
+ *
+ * - -EBADF is returned if the file descriptor is invalid or the device has
+ * just been closed.
+ *
+ * @coretags{mode-unrestricted}
+ */
+#define RTSER_RTIOC_WAIT_EVENT	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event)
+/** @} */
+
+/**
+ * Set or clear break on UART output line
+ *
+ * @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note A set break condition may also be cleared on UART line
+ * reconfiguration.
+ */
+#define RTSER_RTIOC_BREAK_CTL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x06, int)
+/** @} */
+
+/*!
+ * @anchor SERutils @name RT Serial example and utility programs
+ * @{ */
+/** @example cross-link.c */
+/** @} */
+
+#endif /* !_RTDM_UAPI_SERIAL_H */
+++ linux-patched/include/xenomai/rtdm/uapi/rtdm.h	2022-03-21 12:58:32.254861024 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/gpiopwm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, user API header.
+ *
+ * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ * @ingroup rtdm_user_api
+ */
+#ifndef _RTDM_UAPI_RTDM_H
+#define _RTDM_UAPI_RTDM_H
+
+/*!
+ * @addtogroup rtdm
+ * @{
+ */
+
+/*!
+ * @anchor rtdm_api_versioning @name API Versioning
+ * @{ */
+/** Common user and driver API version */
+#define RTDM_API_VER			9
+
+/** Minimum API revision compatible with the current release */
+#define RTDM_API_MIN_COMPAT_VER		9
+/** @} API Versioning */
+
+/** RTDM type for representing absolute dates. Its base type is a 64 bit
+ *  unsigned integer. The unit is 1 nanosecond. */
+typedef uint64_t nanosecs_abs_t;
+
+/** RTDM type for representing relative intervals. Its base type is a 64 bit
+ *  signed integer. The unit is 1 nanosecond. Relative intervals can also
+ *  encode the special timeouts "infinite" and "non-blocking", see
+ *  @ref RTDM_TIMEOUT_xxx. */
+typedef int64_t nanosecs_rel_t;
+
+/*!
+ * @anchor RTDM_TIMEOUT_xxx @name RTDM_TIMEOUT_xxx
+ * Special timeout values
+ * @{ */
+/** Block forever. */
+#define RTDM_TIMEOUT_INFINITE		0
+
+/** Any negative timeout means non-blocking. */
+#define RTDM_TIMEOUT_NONE		(-1)
+/** @} RTDM_TIMEOUT_xxx */
+/** @} rtdm */
+
+/*!
+ * @addtogroup rtdm_profiles
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_CLASS_xxx   @name RTDM_CLASS_xxx
+ * Device classes
+ * @{ */
+#define RTDM_CLASS_PARPORT		1
+#define RTDM_CLASS_SERIAL		2
+#define RTDM_CLASS_CAN			3
+#define RTDM_CLASS_NETWORK		4
+#define RTDM_CLASS_RTMAC		5
+#define RTDM_CLASS_TESTING		6
+#define RTDM_CLASS_RTIPC		7
+#define RTDM_CLASS_COBALT		8
+#define RTDM_CLASS_UDD			9
+#define RTDM_CLASS_MEMORY		10
+#define RTDM_CLASS_GPIO			11
+#define RTDM_CLASS_SPI			12
+#define RTDM_CLASS_PWM			13
+
+#define RTDM_CLASS_MISC			223
+#define RTDM_CLASS_EXPERIMENTAL		224
+#define RTDM_CLASS_MAX			255
+/** @} RTDM_CLASS_xxx */
+
+#define RTDM_SUBCLASS_GENERIC		(-1)
+
+#define RTIOC_TYPE_COMMON		0
+
+/*!
+ * @anchor device_naming    @name Device Naming
+ * Maximum length of device names (excluding the final null character)
+ * @{
+ */
+#define RTDM_MAX_DEVNAME_LEN		31
+/** @} Device Naming */
+
+/**
+ * Device information
+ */
+typedef struct rtdm_device_info {
+	/** Device flags, see @ref dev_flags "Device Flags" for details */
+	int device_flags;
+
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int device_class;
+
+	/** Device sub-class, either RTDM_SUBCLASS_GENERIC or a
+	 *  RTDM_SUBCLASS_xxx definition of the related @ref rtdm_profiles
+	 *  "Device Profile" */
+	int device_sub_class;
+
+	/** Supported device profile version */
+	int profile_version;
+} rtdm_device_info_t;
+
+/*!
+ * @anchor RTDM_PURGE_xxx_BUFFER    @name RTDM_PURGE_xxx_BUFFER
+ * Flags selecting buffers to be purged
+ * @{ */
+#define RTDM_PURGE_RX_BUFFER		0x0001
+#define RTDM_PURGE_TX_BUFFER		0x0002
+/** @} RTDM_PURGE_xxx_BUFFER*/
+
+/*!
+ * @anchor common_IOCTLs    @name Common IOCTLs
+ * The following IOCTLs are common to all device rtdm_profiles.
+ * @{
+ */
+
+/**
+ * Retrieve information about a device or socket.
+ * @param[out] arg Pointer to information buffer (struct rtdm_device_info)
+ */
+#define RTIOC_DEVICE_INFO \
+	_IOR(RTIOC_TYPE_COMMON, 0x00, struct rtdm_device_info)
+
+/**
+ * Purge internal device or socket buffers.
+ * @param[in] arg Purge mask, see @ref RTDM_PURGE_xxx_BUFFER
+ */
+#define RTIOC_PURGE		_IOW(RTIOC_TYPE_COMMON, 0x10, int)
+/** @} Common IOCTLs */
+/** @} rtdm */
+
+/* Internally used for mapping socket functions on IOCTLs */
+struct _rtdm_getsockopt_args {
+	int level;
+	int optname;
+	void *optval;
+	socklen_t *optlen;
+};
+
+struct _rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const void *optval;
+	socklen_t optlen;
+};
+
+struct _rtdm_getsockaddr_args {
+	struct sockaddr *addr;
+	socklen_t *addrlen;
+};
+
+struct _rtdm_setsockaddr_args {
+	const struct sockaddr *addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x20,		\
+				     struct _rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x21,		\
+				     struct _rtdm_setsockopt_args)
+#define _RTIOC_BIND		_IOW(RTIOC_TYPE_COMMON, 0x22,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT		_IOW(RTIOC_TYPE_COMMON, 0x23,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_LISTEN		_IOW(RTIOC_TYPE_COMMON, 0x24,		\
+				     int)
+#define _RTIOC_ACCEPT		_IOW(RTIOC_TYPE_COMMON, 0x25,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME	_IOW(RTIOC_TYPE_COMMON, 0x26,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME	_IOW(RTIOC_TYPE_COMMON, 0x27,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_SHUTDOWN		_IOW(RTIOC_TYPE_COMMON, 0x28,		\
+				     int)
+
+/* Internally used for mmap() */
+struct _rtdm_mmap_request {
+	__u64 offset;
+	size_t length;
+	int prot;
+	int flags;
+};
+
+#endif /* !_RTDM_UAPI_RTDM_H */
+++ linux-patched/include/xenomai/rtdm/uapi/gpiopwm.h	2022-03-21 12:58:32.247861092 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/can.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, pwm header
+ *
+ * @note Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_PWM_H
+#define _RTDM_UAPI_PWM_H
+
+#include <linux/types.h>
+
+#define RTPWM_PROFILE_VER			1
+
+struct gpiopwm {
+	unsigned int duty_cycle;
+	unsigned int range_min;
+	unsigned int range_max;
+	unsigned int period;
+	unsigned int gpio;
+};
+
+#define RTIOC_TYPE_PWM		RTDM_CLASS_PWM
+
+#define GPIOPWM_RTIOC_SET_CONFIG \
+	_IOW(RTIOC_TYPE_PWM, 0x00, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_GET_CONFIG \
+	_IOR(RTIOC_TYPE_PWM, 0x10, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_START \
+	_IO(RTIOC_TYPE_PWM, 0x20)
+
+#define GPIOPWM_RTIOC_STOP \
+	_IO(RTIOC_TYPE_PWM, 0x30)
+
+#define GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE \
+	_IOW(RTIOC_TYPE_PWM, 0x40, unsigned int)
+
+
+#endif /* !_RTDM_UAPI_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/uapi/can.h	2022-03-21 12:58:32.240861160 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/net.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for RT-Socket-CAN, CAN device profile header
+ *
+ * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * @note Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This RTDM CAN device profile header is based on:
+ *
+ * include/linux/can.h, include/linux/socket.h, net/can/pf_can.h in
+ * linux-can.patch, a CAN socket framework for Linux
+ *
+ * Copyright (C) 2004, 2005,
+ * Robert Schwebel, Benedikt Spranger, Marc Kleine-Budde, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_CAN_H
+#define _RTDM_UAPI_CAN_H
+
+/**
+ * @addtogroup rtdm_can
+ * @{
+ */
+
+#define RTCAN_PROFILE_VER  2
+
+#ifndef AF_CAN
+
+/** CAN address family */
+#define AF_CAN	29
+
+/** CAN protocol family */
+#define PF_CAN	AF_CAN
+
+#endif
+
+/** CAN socket levels
+ *
+ *  Used for @ref Sockopts for the particular protocols.
+ */
+#define SOL_CAN_RAW  103
+
+/** Type of CAN id (see @ref CAN_xxx_MASK and @ref CAN_xxx_FLAG) */
+typedef uint32_t can_id_t;
+typedef uint32_t canid_t;
+
+/** Type of CAN error mask */
+typedef can_id_t can_err_mask_t;
+
+/*!
+ * @anchor CAN_xxx_MASK @name CAN ID masks
+ * Bit masks for masking CAN IDs
+ * @{ */
+
+/** Bit mask for extended CAN IDs */
+#define CAN_EFF_MASK  0x1FFFFFFF
+
+/** Bit mask for standard CAN IDs */
+#define CAN_SFF_MASK  0x000007FF
+
+/** @} */
+
+/*!
+ * @anchor CAN_xxx_FLAG @name CAN ID flags
+ * Flags within a CAN ID indicating special CAN frame attributes
+ * @{ */
+/** Extended frame */
+#define CAN_EFF_FLAG  0x80000000
+/** Remote transmission frame */
+#define CAN_RTR_FLAG  0x40000000
+/** Error frame (see @ref Errors), not valid in struct can_filter */
+#define CAN_ERR_FLAG  0x20000000
+/** Invert CAN filter definition, only valid in struct can_filter */
+#define CAN_INV_FILTER CAN_ERR_FLAG
+
+/** @} */
+
+/*!
+ * @anchor CAN_PROTO @name Particular CAN protocols
+ * Possible protocols for the PF_CAN protocol family
+ *
+ * Currently only the RAW protocol is supported.
+ * @{ */
+/** Raw protocol of @c PF_CAN, applicable to socket type @c SOCK_RAW */
+#define CAN_RAW  1
+/** @} */
+
+#define CAN_BAUDRATE_UNKNOWN       ((uint32_t)-1)
+#define CAN_BAUDRATE_UNCONFIGURED  0
+
+/**
+ * Baudrate definition in bits per second
+ */
+typedef uint32_t can_baudrate_t;
+
+/**
+ * Supported CAN bit-time types
+ */
+enum CAN_BITTIME_TYPE {
+	/** Standard bit-time definition according to Bosch */
+	CAN_BITTIME_STD,
+	/** Hardware-specific BTR bit-time definition */
+	CAN_BITTIME_BTR
+};
+
+/**
+ * See @ref CAN_BITTIME_TYPE
+ */
+typedef enum CAN_BITTIME_TYPE can_bittime_type_t;
+
+/**
+ * Standard bit-time parameters according to Bosch
+ */
+struct can_bittime_std {
+	uint32_t brp;		/**< Baud rate prescaler */
+	uint8_t prop_seg;	/**< from 1 to 8 */
+	uint8_t phase_seg1;	/**< from 1 to 8 */
+	uint8_t phase_seg2;	/**< from 1 to 8 */
+	uint8_t sjw:7;		/**< from 1 to 4 */
+	uint8_t sam:1;		/**< 1 - enable triple sampling */
+};
+
+/**
+ * Hardware-specific BTR bit-times
+ */
+struct can_bittime_btr {
+
+	uint8_t btr0;		/**< Bus timing register 0 */
+	uint8_t btr1;		/**< Bus timing register 1 */
+};
+
+/**
+ * Custom CAN bit-time definition
+ */
+struct can_bittime {
+	/** Type of bit-time definition */
+	can_bittime_type_t type;
+
+	union {
+		/** Standard bit-time */
+		struct can_bittime_std std;
+		/** Hardware-spcific BTR bit-time */
+		struct can_bittime_btr btr;
+	};
+};
+
+/*!
+ * @anchor CAN_MODE @name CAN operation modes
+ * Modes into which CAN controllers can be set
+ * @{ */
+enum CAN_MODE {
+	/*! Set controller in Stop mode (no reception / transmission possible) */
+	CAN_MODE_STOP = 0,
+
+	/*! Set controller into normal operation. @n
+	 *  Coming from stopped mode or bus off, the controller begins with no
+	 *  errors in @ref CAN_STATE_ACTIVE. */
+	CAN_MODE_START,
+
+	/*! Set controller into Sleep mode. @n
+	 *  This is only possible if the controller is not stopped or bus-off. @n
+	 *  Notice that sleep mode will only be entered when there is no bus
+	 *  activity. If the controller detects bus activity while "sleeping"
+	 *  it will go into operating mode again. @n
+	 *  To actively leave sleep mode again trigger @c CAN_MODE_START. */
+	CAN_MODE_SLEEP
+};
+/** @} */
+
+/** See @ref CAN_MODE */
+typedef enum CAN_MODE can_mode_t;
+
+/*!
+ * @anchor CAN_CTRLMODE @name CAN controller modes
+ * Special CAN controllers modes, which can be or'ed together.
+ *
+ * @note These modes are hardware-dependent. Please consult the hardware
+ * manual of the CAN controller for more detailed information.
+ *
+ * @{ */
+
+/*! Listen-Only mode
+ *
+ *  In this mode the CAN controller would give no acknowledge to the CAN-bus,
+ *  even if a message is received successfully and messages would not be
+ *  transmitted. This mode might be useful for bus-monitoring, hot-plugging
+ *  or throughput analysis. */
+#define CAN_CTRLMODE_LISTENONLY 0x1
+
+/*! Loopback mode
+ *
+ * In this mode the CAN controller does an internal loop-back, a message is
+ * transmitted and simultaneously received. That mode can be used for self
+ * test operation. */
+#define CAN_CTRLMODE_LOOPBACK   0x2
+
+/*! Triple sampling mode
+ *
+ * In this mode the CAN controller uses Triple sampling. */
+#define CAN_CTRLMODE_3_SAMPLES  0x4
+
+/** @} */
+
+/** See @ref CAN_CTRLMODE */
+typedef int can_ctrlmode_t;
+
+/*!
+ * @anchor CAN_STATE @name CAN controller states
+ * States a CAN controller can be in.
+ * @{ */
+enum CAN_STATE {
+	/** CAN controller is error active */
+	CAN_STATE_ERROR_ACTIVE = 0,
+	/** CAN controller is active */
+	CAN_STATE_ACTIVE = 0,
+
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_ERROR_WARNING = 1,
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_BUS_WARNING = 1,
+
+	/** CAN controller is error passive */
+	CAN_STATE_ERROR_PASSIVE = 2,
+	/** CAN controller is error passive */
+	CAN_STATE_BUS_PASSIVE = 2,
+
+	/** CAN controller went into Bus Off */
+	CAN_STATE_BUS_OFF,
+
+	/** CAN controller is scanning to get the baudrate */
+	CAN_STATE_SCANNING_BAUDRATE,
+
+	/** CAN controller is in stopped mode */
+	CAN_STATE_STOPPED,
+
+	/** CAN controller is in Sleep mode */
+	CAN_STATE_SLEEPING,
+};
+/** @} */
+
+/** See @ref CAN_STATE */
+typedef enum CAN_STATE can_state_t;
+
+#define CAN_STATE_OPERATING(state) ((state) < CAN_STATE_BUS_OFF)
+
+/**
+ * Filter for reception of CAN messages.
+ *
+ * This filter works as follows:
+ * A received CAN ID is AND'ed bitwise with @c can_mask and then compared to
+ * @c can_id. This also includes the @ref CAN_EFF_FLAG and @ref CAN_RTR_FLAG
+ * of @ref CAN_xxx_FLAG. If this comparison is true, the message will be
+ * received by the socket. The logic can be inverted with the @c can_id flag
+ * @ref CAN_INV_FILTER :
+ *
+ * @code
+ * if (can_id & CAN_INV_FILTER) {
+ *    if ((received_can_id & can_mask) != (can_id & ~CAN_INV_FILTER))
+ *       accept-message;
+ * } else {
+ *    if ((received_can_id & can_mask) == can_id)
+ *       accept-message;
+ * }
+ * @endcode
+ *
+ * Multiple filters can be arranged in a filter list and set with
+ * @ref Sockopts. If one of these filters matches a CAN ID upon reception
+ * of a CAN frame, this frame is accepted.
+ *
+ */
+typedef struct can_filter {
+	/** CAN ID which must match with incoming IDs after passing the mask.
+	 *  The filter logic can be inverted with the flag @ref CAN_INV_FILTER. */
+	uint32_t can_id;
+
+	/** Mask which is applied to incoming IDs. See @ref CAN_xxx_MASK
+	 *  "CAN ID masks" if exactly one CAN ID should come through. */
+	uint32_t can_mask;
+} can_filter_t;
+
+/**
+ * Socket address structure for the CAN address family
+ */
+struct sockaddr_can {
+	/** CAN address family, must be @c AF_CAN */
+	sa_family_t can_family;
+
+	/** Interface index of CAN controller. See @ref SIOCGIFINDEX. */
+	int can_ifindex;
+};
+
+/**
+ * Raw CAN frame
+ *
+ * Central structure for receiving and sending CAN frames.
+ */
+typedef struct can_frame {
+	/** CAN ID of the frame
+	 *
+	 *  See @ref CAN_xxx_FLAG "CAN ID flags" for special bits.
+	 */
+	can_id_t can_id;
+
+	/** Size of the payload in bytes */
+	uint8_t can_dlc;
+
+	/** Payload data bytes */
+	uint8_t data[8] __attribute__ ((aligned(8)));
+} can_frame_t;
+
+/**
+ * CAN interface request descriptor
+ *
+ * Parameter block for submitting CAN control requests.
+ */
+struct can_ifreq {
+	union {
+		char	ifrn_name[IFNAMSIZ];
+	} ifr_ifrn;
+	
+	union {
+		struct can_bittime bittime;
+		can_baudrate_t baudrate;
+		can_ctrlmode_t ctrlmode;
+		can_mode_t mode;
+		can_state_t state;
+		int ifru_ivalue;
+	} ifr_ifru;
+};
+
+/*!
+ * @anchor RTCAN_TIMESTAMPS   @name Timestamp switches
+ * Arguments to pass to @ref RTCAN_RTIOC_TAKE_TIMESTAMP
+ * @{ */
+#define RTCAN_TAKE_NO_TIMESTAMPS	0  /**< Switch off taking timestamps */
+#define RTCAN_TAKE_TIMESTAMPS		1  /**< Do take timestamps */
+/** @} */
+
+#define RTIOC_TYPE_CAN  RTDM_CLASS_CAN
+
+/*!
+ * @anchor Rawsockopts @name RAW socket options
+ * Setting and getting CAN RAW socket options.
+ * @{ */
+
+/**
+ * CAN filter definition
+ *
+ * A CAN raw filter list with elements of struct can_filter can be installed
+ * with @c setsockopt. This list is used upon reception of CAN frames to
+ * decide whether the bound socket will receive a frame. An empty filter list
+ * can also be defined using optlen = 0, which is recommanded for write-only
+ * sockets.
+ * @n
+ * If the socket was already bound with @ref Bind, the old filter list
+ * gets replaced with the new one. Be aware that already received, but
+ * not read out CAN frames may stay in the socket buffer.
+ * @n
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_FILTER
+ *
+ * @param [in] optval Pointer to array of struct can_filter.
+ *
+ * @param [in] optlen Size of filter list: count * sizeof( struct can_filter).
+ * @n
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -ENOMEM (Not enough memory to fulfill the operation)
+ * - -EINVAL (Invalid length "optlen")
+ * - -ENOSPC (No space to store filter list, check RT-Socket-CAN kernel
+ *            parameters)
+ * .
+ */
+#define CAN_RAW_FILTER		0x1
+
+/**
+ * CAN error mask
+ *
+ * A CAN error mask (see @ref Errors) can be set with @c setsockopt. This
+ * mask is then used to decide if error frames are delivered to this socket
+ * in case of error condidtions. The error frames are marked with the
+ * @ref CAN_ERR_FLAG of @ref CAN_xxx_FLAG and must be handled by the
+ * application properly. A detailed description of the errors can be
+ * found in the @c can_id and the @c data fields of struct can_frame
+ * (see @ref Errors for futher details).
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_ERR_FILTER
+ *
+ * @param [in] optval Pointer to error mask of type can_err_mask_t.
+ *
+ * @param [in] optlen Size of error mask: sizeof(can_err_mask_t).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * .
+ */
+#define CAN_RAW_ERR_FILTER	0x2
+
+/**
+ * CAN TX loopback
+ *
+ * The TX loopback to other local sockets can be selected with this
+ * @c setsockopt.
+ *
+ * @note The TX loopback feature must be enabled in the kernel and then
+ * the loopback to other local TX sockets is enabled by default.
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_LOOPBACK
+ *
+ * @param [in] optval Pointer to integer value.
+ *
+ * @param [in] optlen Size of int: sizeof(int).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * - -EOPNOTSUPP (not supported, check RT-Socket-CAN kernel parameters).
+ */
+#define CAN_RAW_LOOPBACK	0x3
+
+/**
+ * CAN receive own messages
+ *
+ * Not supported by RT-Socket-CAN, but defined for compatibility with
+ * Socket-CAN.
+ */
+#define CAN_RAW_RECV_OWN_MSGS   0x4
+
+/** @} */
+
+/*!
+ * @anchor CANIOCTLs @name IOCTLs
+ * CAN device IOCTLs
+ *
+ * @deprecated Passing \c struct \c ifreq as a request descriptor
+ * for CAN IOCTLs is still accepted for backward compatibility,
+ * however it is recommended to switch to \c struct \c can_ifreq at
+ * the first opportunity.
+ *
+ * @{ */
+
+/**
+ * Get CAN interface index by name
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                     (<TT>struct can_ifreq</TT>). If
+ *                     <TT>ifr_name</TT> holds a valid CAN interface
+ *                     name <TT>ifr_ifindex</TT> will be filled with
+ *                     the corresponding interface index.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted}
+ */
+#ifdef DOXYGEN_CPP /* For Doxygen only, already defined by kernel headers */
+#define SIOCGIFINDEX defined_by_kernel_header_file
+#endif
+
+/**
+ * Set baud rate
+ *
+ * The baudrate must be specified in bits per second. The driver will
+ * try to calculate resonable CAN bit-timing parameters. You can use
+ * @ref SIOCSCANCUSTOMBITTIME to set custom bit-timing.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EDOM  : Baud rate not possible.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the baud rate is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANBAUDRATE	_IOW(RTIOC_TYPE_CAN, 0x01, struct can_ifreq)
+
+/**
+ * Get baud rate
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANBAUDRATE	_IOWR(RTIOC_TYPE_CAN, 0x02, struct can_ifreq)
+
+/**
+ * Set custom bit time parameter
+ *
+ * Custem-bit time could be defined in various formats (see
+ * struct can_bittime).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the bit-time is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCUSTOMBITTIME	_IOW(RTIOC_TYPE_CAN, 0x03, struct can_ifreq)
+
+/**
+ * Get custom bit-time parameters
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANCUSTOMBITTIME	_IOWR(RTIOC_TYPE_CAN, 0x04, struct can_ifreq)
+
+/**
+ * Set operation mode of CAN controller
+ *
+ * See @ref CAN_MODE "CAN controller modes" for available modes.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EAGAIN: (@ref CAN_MODE_START, @ref CAN_MODE_STOP) Could not successfully
+ *            set mode, hardware is busy. Try again.
+ * - -EINVAL: (@ref CAN_MODE_START) Cannot start controller,
+ *            set baud rate first.
+ * - -ENETDOWN: (@ref CAN_MODE_SLEEP) Cannot go into sleep mode because
+		controller is stopped or bus off.
+ * - -EOPNOTSUPP: unknown mode
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting a CAN controller into normal operation after a bus-off can
+ * take some time (128 occurrences of 11 consecutive recessive bits).
+ * In such a case, although this IOCTL will return immediately with success
+ * and @ref SIOCGCANSTATE will report @ref CAN_STATE_ACTIVE,
+ * bus-off recovery may still be in progress. @n
+ * If a controller is bus-off, setting it into stop mode will return no error
+ * but the controller remains bus-off.
+ */
+#define SIOCSCANMODE		_IOW(RTIOC_TYPE_CAN, 0x05, struct can_ifreq)
+
+/**
+ * Get current state of CAN controller
+ *
+ * States are divided into main states and additional error indicators. A CAN
+ * controller is always in exactly one main state. CAN bus errors are
+ * registered by the CAN hardware and collected by the driver. There is one
+ * error indicator (bit) per error type. If this IOCTL is triggered the error
+ * types which occured since the last call of this IOCTL are reported and
+ * thereafter the error indicators are cleared. See also
+ * @ref CAN_STATE "CAN controller states".
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANSTATE		_IOWR(RTIOC_TYPE_CAN, 0x06, struct can_ifreq)
+
+/**
+ * Set special controller modes
+ *
+ * Various special controller modes could be or'ed together (see
+ * @ref CAN_CTRLMODE for further information).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting special controller modes is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCTRLMODE	_IOW(RTIOC_TYPE_CAN, 0x07, struct can_ifreq)
+
+/**
+ * Get special controller modes
+ *
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANCTRLMODE	_IOWR(RTIOC_TYPE_CAN, 0x08, struct can_ifreq)
+
+/**
+ * Enable or disable storing a high precision timestamp upon reception of
+ * a CAN frame.
+ *
+ * A newly created socket takes no timestamps by default.
+ *
+ * @param [in] arg int variable, see @ref RTCAN_TIMESTAMPS "Timestamp switches"
+ *
+ * @return 0 on success.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Activating taking timestamps only has an effect on newly received
+ * CAN messages from the bus. Frames that already are in the socket buffer do
+ * not have timestamps if it was deactivated before. See @ref Recv "Receive"
+ * for more details.
+ */
+#define RTCAN_RTIOC_TAKE_TIMESTAMP _IOW(RTIOC_TYPE_CAN, 0x09, int)
+
+/**
+ * Specify a reception timeout for a socket
+ *
+ * Defines a timeout for all receive operations via a
+ * socket which will take effect when one of the @ref Recv "receive functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before receiving messages from the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_RCV_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0A, nanosecs_rel_t)
+
+/**
+ * Specify a transmission timeout for a socket
+ *
+ * Defines a timeout for all send operations via a
+ * socket which will take effect when one of the @ref Send "send functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before sending messages to the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_SND_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0B, nanosecs_rel_t)
+/** @} */
+
+#define CAN_ERR_DLC  8	/* dlc for error frames */
+
+/*!
+ * @anchor Errors @name Error mask
+ * Error class (mask) in @c can_id field of struct can_frame to
+ * be used with @ref CAN_RAW_ERR_FILTER.
+ *
+ * @b Note: Error reporting is hardware dependent and most CAN controllers
+ * report less detailed error conditions than the SJA1000.
+ *
+ * @b Note: In case of a bus-off error condition (@ref CAN_ERR_BUSOFF), the
+ * CAN controller is @b not restarted automatically. It is the application's
+ * responsibility to react appropriately, e.g. calling @ref CAN_MODE_START.
+ *
+ * @b Note: Bus error interrupts (@ref CAN_ERR_BUSERROR) are enabled when an
+ * application is calling a @ref Recv function on a socket listening
+ * on bus errors (using @ref CAN_RAW_ERR_FILTER). After one bus error has
+ * occured, the interrupt will be disabled to allow the application time for
+ * error processing and to efficiently avoid bus error interrupt flooding.
+ * @{ */
+
+/** TX timeout (netdevice driver) */
+#define CAN_ERR_TX_TIMEOUT	0x00000001U
+
+/** Lost arbitration (see @ref Error0 "data[0]") */
+#define CAN_ERR_LOSTARB		0x00000002U
+
+/** Controller problems (see @ref Error1 "data[1]") */
+#define CAN_ERR_CRTL		0x00000004U
+
+/** Protocol violations (see @ref Error2 "data[2]",
+			     @ref Error3 "data[3]") */
+#define CAN_ERR_PROT		0x00000008U
+
+/** Transceiver status (see @ref Error4 "data[4]")    */
+#define CAN_ERR_TRX		0x00000010U
+
+/** Received no ACK on transmission */
+#define CAN_ERR_ACK		0x00000020U
+
+/** Bus off */
+#define CAN_ERR_BUSOFF		0x00000040U
+
+/** Bus error (may flood!) */
+#define CAN_ERR_BUSERROR	0x00000080U
+
+/** Controller restarted */
+#define CAN_ERR_RESTARTED	0x00000100U
+
+/** Omit EFF, RTR, ERR flags */
+#define CAN_ERR_MASK		0x1FFFFFFFU
+
+/** @} */
+
+/*!
+ * @anchor Error0 @name Arbitration lost error
+ * Error in the data[0] field of struct can_frame.
+ * @{ */
+/* arbitration lost in bit ... / data[0] */
+#define CAN_ERR_LOSTARB_UNSPEC	0x00 /**< unspecified */
+				     /**< else bit number in bitstream */
+/** @} */
+
+/*!
+ * @anchor Error1 @name Controller problems
+ * Error in the data[1] field of struct can_frame.
+ * @{ */
+/* error status of CAN-controller / data[1] */
+#define CAN_ERR_CRTL_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /**< RX buffer overflow */
+#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /**< TX buffer overflow */
+#define CAN_ERR_CRTL_RX_WARNING	 0x04 /**< reached warning level for RX errors */
+#define CAN_ERR_CRTL_TX_WARNING	 0x08 /**< reached warning level for TX errors */
+#define CAN_ERR_CRTL_RX_PASSIVE	 0x10 /**< reached passive level for RX errors */
+#define CAN_ERR_CRTL_TX_PASSIVE	 0x20 /**< reached passive level for TX errors */
+/** @} */
+
+/*!
+ * @anchor Error2 @name Protocol error type
+ * Error in the data[2] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (type) / data[2] */
+#define CAN_ERR_PROT_UNSPEC	0x00 /**< unspecified */
+#define CAN_ERR_PROT_BIT	0x01 /**< single bit error */
+#define CAN_ERR_PROT_FORM	0x02 /**< frame format error */
+#define CAN_ERR_PROT_STUFF	0x04 /**< bit stuffing error */
+#define CAN_ERR_PROT_BIT0	0x08 /**< unable to send dominant bit */
+#define CAN_ERR_PROT_BIT1	0x10 /**< unable to send recessive bit */
+#define CAN_ERR_PROT_OVERLOAD	0x20 /**< bus overload */
+#define CAN_ERR_PROT_ACTIVE	0x40 /**< active error announcement */
+#define CAN_ERR_PROT_TX		0x80 /**< error occured on transmission */
+/** @} */
+
+/*!
+ * @anchor Error3 @name Protocol error location
+ * Error in the data[3] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (location) / data[3] */
+#define CAN_ERR_PROT_LOC_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_PROT_LOC_SOF	 0x03 /**< start of frame */
+#define CAN_ERR_PROT_LOC_ID28_21 0x02 /**< ID bits 28 - 21 (SFF: 10 - 3) */
+#define CAN_ERR_PROT_LOC_ID20_18 0x06 /**< ID bits 20 - 18 (SFF: 2 - 0 )*/
+#define CAN_ERR_PROT_LOC_SRTR	 0x04 /**< substitute RTR (SFF: RTR) */
+#define CAN_ERR_PROT_LOC_IDE	 0x05 /**< identifier extension */
+#define CAN_ERR_PROT_LOC_ID17_13 0x07 /**< ID bits 17-13 */
+#define CAN_ERR_PROT_LOC_ID12_05 0x0F /**< ID bits 12-5 */
+#define CAN_ERR_PROT_LOC_ID04_00 0x0E /**< ID bits 4-0 */
+#define CAN_ERR_PROT_LOC_RTR	 0x0C /**< RTR */
+#define CAN_ERR_PROT_LOC_RES1	 0x0D /**< reserved bit 1 */
+#define CAN_ERR_PROT_LOC_RES0	 0x09 /**< reserved bit 0 */
+#define CAN_ERR_PROT_LOC_DLC	 0x0B /**< data length code */
+#define CAN_ERR_PROT_LOC_DATA	 0x0A /**< data section */
+#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /**< CRC sequence */
+#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /**< CRC delimiter */
+#define CAN_ERR_PROT_LOC_ACK	 0x19 /**< ACK slot */
+#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /**< ACK delimiter */
+#define CAN_ERR_PROT_LOC_EOF	 0x1A /**< end of frame */
+#define CAN_ERR_PROT_LOC_INTERM	 0x12 /**< intermission */
+/** @} */
+
+/*!
+ * @anchor Error4 @name Protocol error location
+ * Error in the data[4] field of struct can_frame.
+ * @{ */
+/* error status of CAN-transceiver / data[4] */
+/*                                               CANH CANL */
+#define CAN_ERR_TRX_UNSPEC		0x00 /**< 0000 0000 */
+#define CAN_ERR_TRX_CANH_NO_WIRE	0x04 /**< 0000 0100 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_BAT	0x05 /**< 0000 0101 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_VCC	0x06 /**< 0000 0110 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_GND	0x07 /**< 0000 0111 */
+#define CAN_ERR_TRX_CANL_NO_WIRE	0x40 /**< 0100 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_BAT	0x50 /**< 0101 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_VCC	0x60 /**< 0110 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_GND	0x70 /**< 0111 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_CANH	0x80 /**< 1000 0000 */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_CAN_H */
+++ linux-patched/include/xenomai/rtdm/uapi/net.h	2022-03-21 12:58:32.232861238 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/spi.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ *  This exception does not apply when the application code is built as a
+ *  static or dynamically loadable portion of the Linux kernel nor does the
+ *  exception override other reasons justifying application of the GNU General
+ *  Public License.
+ *
+ *  This exception applies only to the code released by the RTnet project
+ *  under the name RTnet and bearing this exception notice. If you copy code
+ *  from other sources into a copy of RTnet, the exception does not apply to
+ *  the code that you add in this way.
+ *
+ */
+
+#ifndef _RTDM_UAPI_NET_H
+#define _RTDM_UAPI_NET_H
+
+/* sub-classes: RTDM_CLASS_NETWORK */
+#define RTDM_SUBCLASS_RTNET     0
+
+#define RTIOC_TYPE_NETWORK      RTDM_CLASS_NETWORK
+
+/* RTnet-specific IOCTLs */
+#define RTNET_RTIOC_XMITPARAMS  _IOW(RTIOC_TYPE_NETWORK, 0x10, unsigned int)
+#define RTNET_RTIOC_PRIORITY    RTNET_RTIOC_XMITPARAMS  /* legacy */
+#define RTNET_RTIOC_TIMEOUT     _IOW(RTIOC_TYPE_NETWORK, 0x11, int64_t)
+/* RTNET_RTIOC_CALLBACK         _IOW(RTIOC_TYPE_NETWORK, 0x12, ...
+ * IOCTL only usable inside the kernel. */
+/* RTNET_RTIOC_NONBLOCK         _IOW(RTIOC_TYPE_NETWORK, 0x13, unsigned int)
+ * This IOCTL is no longer supported (and it was buggy anyway).
+ * Use RTNET_RTIOC_TIMEOUT with any negative timeout value instead. */
+#define RTNET_RTIOC_EXTPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x14, unsigned int)
+#define RTNET_RTIOC_SHRPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x15, unsigned int)
+
+/* socket transmission priorities */
+#define SOCK_MAX_PRIO           0
+#define SOCK_DEF_PRIO           SOCK_MAX_PRIO + \
+				    (SOCK_MIN_PRIO-SOCK_MAX_PRIO+1)/2
+#define SOCK_MIN_PRIO           SOCK_NRT_PRIO - 1
+#define SOCK_NRT_PRIO           31
+
+/* socket transmission channels */
+#define SOCK_DEF_RT_CHANNEL     0           /* default rt xmit channel     */
+#define SOCK_DEF_NRT_CHANNEL    1           /* default non-rt xmit channel */
+#define SOCK_USER_CHANNEL       2           /* first user-defined channel  */
+
+/* argument construction for RTNET_RTIOC_XMITPARAMS */
+#define SOCK_XMIT_PARAMS(priority, channel) ((priority) | ((channel) << 16))
+
+#endif  /* !_RTDM_UAPI_NET_H */
+++ linux-patched/include/xenomai/rtdm/uapi/spi.h	2022-03-21 12:58:32.225861306 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/autotune.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_SPI_H
+#define _RTDM_UAPI_SPI_H
+
+#include <linux/types.h>
+
+struct rtdm_spi_config {
+	__u32 speed_hz;
+	__u16 mode;
+	__u8 bits_per_word;
+};
+
+struct rtdm_spi_iobufs {
+	__u32 io_len;
+	__u32 i_offset;
+	__u32 o_offset;
+	__u32 map_len;
+};
+
+#define SPI_RTIOC_SET_CONFIG		_IOW(RTDM_CLASS_SPI, 0, struct rtdm_spi_config)
+#define SPI_RTIOC_GET_CONFIG		_IOR(RTDM_CLASS_SPI, 1, struct rtdm_spi_config)
+#define SPI_RTIOC_SET_IOBUFS		_IOR(RTDM_CLASS_SPI, 2, struct rtdm_spi_iobufs)
+#define SPI_RTIOC_TRANSFER		_IO(RTDM_CLASS_SPI, 3)
+#define SPI_RTIOC_TRANSFER_N		_IOR(RTDM_CLASS_SPI, 4, int)
+
+#endif /* !_RTDM_UAPI_SPI_H */
+++ linux-patched/include/xenomai/rtdm/uapi/autotune.h	2022-03-21 12:58:32.217861384 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/ipc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_AUTOTUNE_H
+#define _RTDM_UAPI_AUTOTUNE_H
+
+#include <linux/types.h>
+
+#define RTDM_CLASS_AUTOTUNE		RTDM_CLASS_MISC
+#define RTDM_SUBCLASS_AUTOTUNE		0
+
+struct autotune_setup {
+	__u32 period;
+	__u32 quiet;
+};
+
+#define AUTOTUNE_RTIOC_IRQ		_IOW(RTDM_CLASS_AUTOTUNE, 0, struct autotune_setup)
+#define AUTOTUNE_RTIOC_KERN		_IOW(RTDM_CLASS_AUTOTUNE, 1, struct autotune_setup)
+#define AUTOTUNE_RTIOC_USER		_IOW(RTDM_CLASS_AUTOTUNE, 2, struct autotune_setup)
+#define AUTOTUNE_RTIOC_PULSE		_IOW(RTDM_CLASS_AUTOTUNE, 3, __u64)
+#define AUTOTUNE_RTIOC_RUN		_IOR(RTDM_CLASS_AUTOTUNE, 4, __u32)
+#define AUTOTUNE_RTIOC_RESET		_IO(RTDM_CLASS_AUTOTUNE, 5)
+
+#endif /* !_RTDM_UAPI_AUTOTUNE_H */
+++ linux-patched/include/xenomai/rtdm/ipc.h	2022-03-21 12:58:31.927864212 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/udd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_RTDM_IPC_H
+#define _COBALT_RTDM_IPC_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/ipc.h>
+
+#endif /* !_COBALT_RTDM_IPC_H */
+++ linux-patched/include/xenomai/rtdm/udd.h	2022-03-21 12:58:31.920864280 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/testing.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_UDD_H
+#define _COBALT_RTDM_UDD_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/udd.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_udd User-space driver core
+ *
+ * This profile includes all mini-drivers sitting on top of the
+ * User-space Device Driver framework (UDD). The generic UDD core
+ * driver enables interrupt control and I/O memory access interfaces
+ * to user-space device drivers, as defined by the mini-drivers when
+ * registering.
+ *
+ * A mini-driver supplements the UDD core with ancillary functions for
+ * dealing with @ref udd_memory_region "memory mappings" and @ref
+ * udd_irq_handler "interrupt control" for a particular I/O
+ * card/device.
+ *
+ * UDD-compliant mini-drivers only have to provide the basic support
+ * for dealing with the interrupt sources present in the device, so
+ * that most part of the device requests can be handled from a Xenomai
+ * application running in user-space. Typically, a mini-driver would
+ * handle the interrupt top-half, and the user-space application would
+ * handle the bottom-half.
+ *
+ * This profile is reminiscent of the UIO framework available with the
+ * Linux kernel, adapted to the dual kernel Cobalt environment.
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_irq_special
+ * Special IRQ values for udd_device.irq
+ *
+ * @{
+ */
+/**
+ * No IRQ managed. Passing this code implicitly disables all
+ * interrupt-related services, including control (disable/enable) and
+ * notification.
+ */
+#define UDD_IRQ_NONE     0
+/**
+ * IRQ directly managed from the mini-driver on top of the UDD
+ * core. The mini-driver is in charge of attaching the handler(s) to
+ * the IRQ(s) it manages, notifying the Cobalt threads waiting for IRQ
+ * events by calling the udd_notify_event() service.
+ */
+#define UDD_IRQ_CUSTOM   (-1)
+/** @} */
+
+/**
+ * @anchor udd_memory_types  @name Memory types for mapping
+ * Types of memory for mapping
+ *
+ * The UDD core implements a default ->mmap() handler which first
+ * attempts to hand over the request to the corresponding handler
+ * defined by the mini-driver. If not present, the UDD core
+ * establishes the mapping automatically, depending on the memory
+ * type defined for the region.
+ *
+ * @{
+ */
+/**
+ * No memory region. Use this type code to disable an entry in the
+ * array of memory mappings, i.e. udd_device.mem_regions[].
+ */
+#define UDD_MEM_NONE     0
+/**
+ * Physical I/O memory region. By default, the UDD core maps such
+ * memory to a virtual user range by calling the rtdm_mmap_iomem()
+ * service.
+ */
+#define UDD_MEM_PHYS     1
+/**
+ * Kernel logical memory region (e.g. kmalloc()). By default, the UDD
+ * core maps such memory to a virtual user range by calling the
+ * rtdm_mmap_kmem() service. */
+#define UDD_MEM_LOGICAL  2
+/**
+ * Virtual memory region with no direct physical mapping
+ * (e.g. vmalloc()). By default, the UDD core maps such memory to a
+ * virtual user range by calling the rtdm_mmap_vmem() service.
+ */
+#define UDD_MEM_VIRTUAL  3
+/** @} */
+
+#define UDD_NR_MAPS  5
+
+/**
+ * @anchor udd_memory_region
+ * UDD memory region descriptor.
+ *
+ * This descriptor defines the characteristics of a memory region
+ * declared to the UDD core by the mini-driver. All valid regions
+ * should be declared in the udd_device.mem_regions[] array,
+ * invalid/unassigned ones should bear the UDD_MEM_NONE type.
+ *
+ * The UDD core exposes each region via the mmap(2) interface to the
+ * application. To this end, a companion mapper device is created
+ * automatically when registering the mini-driver.
+ *
+ * The mapper device creates special files in the RTDM namespace for
+ * reaching the individual regions, which the application can open
+ * then map to its address space via the mmap(2) system call.
+ *
+ * For instance, declaring a region of physical memory at index #2 of
+ * the memory region array could be done as follows:
+ *
+ * @code
+ * static struct udd_device udd;
+ *
+ * static int foocard_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ * {
+ *      udd.device_name = "foocard";
+ *      ...
+ *      udd.mem_regions[2].name = "ADC";
+ *      udd.mem_regions[2].addr = pci_resource_start(dev, 1);
+ *      udd.mem_regions[2].len = pci_resource_len(dev, 1);
+ *      udd.mem_regions[2].type = UDD_MEM_PHYS;
+ *      ...
+ *      return udd_register_device(&udd);
+ * }
+ * @endcode
+ *
+ * This will make such region accessible via the mapper device using
+ * the following sequence of code (see note), via the default
+ * ->mmap() handler from the UDD core:
+ *
+ * @code
+ * int fd, fdm;
+ * void *p;
+ *
+ * fd = open("/dev/rtdm/foocard", O_RDWR);
+ * fdm = open("/dev/rtdm/foocard,mapper2", O_RDWR);
+ * p = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fdm, 0);
+ * @endcode
+ *
+ * if no valid region has been declared in the
+ * udd_device.mem_regions[] array, no mapper device is created.
+ *
+ * @note The example code assumes that @ref cobalt_api POSIX symbol
+ * wrapping is in effect, so that RTDM performs the memory mapping
+ * operation (not the regular kernel).
+ */
+struct udd_memregion {
+	/** Name of the region (informational but required) */
+	const char *name;
+	/**
+	 * Start address of the region. This may be a physical or
+	 * virtual address, depending on the @ref udd_memory_types
+	 * "memory type".
+	 */
+	unsigned long addr;
+	/**
+	 * Length (in bytes) of the region. This value must be
+	 * PAGE_SIZE aligned.
+	 */
+	size_t len;
+	/**
+	 * Type of the region. See the discussion about @ref
+	 * udd_memory_types "UDD memory types" for possible values.
+	 */
+	int type;
+};
+
+/**
+ * @anchor udd_device
+ * UDD device descriptor.
+ *
+ * This descriptor defines the characteristics of a UDD-based
+ * mini-driver when registering via a call to udd_register_device().
+ */
+struct udd_device {
+	/**
+	 * Name of the device managed by the mini-driver, appears
+	 * automatically in the /dev/rtdm namespace upon creation.
+	 */
+	const char *device_name;
+	/**
+	 * Additional device flags (e.g. RTDM_EXCLUSIVE)
+	 * RTDM_NAMED_DEVICE may be omitted).
+	 */
+	int device_flags;
+	/**
+	 * Subclass code of the device managed by the mini-driver (see
+	 * RTDM_SUBCLASS_xxx definition in the @ref rtdm_profiles
+	 * "Device Profiles"). The main class code is pre-set to
+	 * RTDM_CLASS_UDD.
+	 */
+	int device_subclass;
+	struct {
+		/**
+		 * Ancillary open() handler, optional. See
+		 * rtdm_open_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*open)(struct rtdm_fd *fd, int oflags);
+		/**
+		 * Ancillary close() handler, optional. See
+		 * rtdm_close_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		void (*close)(struct rtdm_fd *fd);
+		/**
+		 * Ancillary ioctl() handler, optional. See
+		 * rtdm_ioctl_handler().
+		 *
+		 * If this routine returns -ENOSYS, the default action
+		 * implemented by the UDD core for the corresponding
+		 * request will be applied, as if no ioctl handler had
+		 * been defined.
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*ioctl)(struct rtdm_fd *fd,
+			     unsigned int request, void *arg);
+		/**
+		 * Ancillary mmap() handler for the mapper device,
+		 * optional. See rtdm_mmap_handler(). The mapper
+		 * device operates on a valid region defined in the @a
+		 * mem_regions[] array. A pointer to the region 
+		 * can be obtained by a call to udd_get_region().
+		 *
+		 * If this handler is NULL, the UDD core establishes
+		 * the mapping automatically, depending on the memory
+		 * type defined for the region.
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*mmap)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+		/**
+		 * @anchor udd_irq_handler
+		 *
+		 * Ancillary handler for receiving interrupts. This
+		 * handler must be provided if the mini-driver hands
+		 * over IRQ handling to the UDD core, by setting the
+		 * @a irq field to a valid value, different from
+		 * UDD_IRQ_CUSTOM and UDD_IRQ_NONE.
+		 *
+		 * The ->interrupt() handler shall return one of the
+		 * following status codes:
+		 *
+		 * - RTDM_IRQ_HANDLED, if the mini-driver successfully
+		 * handled the IRQ. This flag can be combined with
+		 * RTDM_IRQ_DISABLE to prevent the Cobalt kernel from
+		 * re-enabling the interrupt line upon return,
+		 * otherwise it is re-enabled automatically.
+		 *
+		 * - RTDM_IRQ_NONE, if the interrupt does not match
+		 * any IRQ the mini-driver can handle.
+		 *
+		 * Once the ->interrupt() handler has returned, the
+		 * UDD core notifies user-space Cobalt threads waiting
+		 * for IRQ events (if any).
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*interrupt)(struct udd_device *udd);
+	} ops;
+	/**
+	 * IRQ number. If valid, the UDD core manages the
+	 * corresponding interrupt line, installing a base handler.
+	 * Otherwise, a special value can be passed for declaring
+	 * @ref udd_irq_special "unmanaged IRQs".
+	 */
+	int irq;
+	/**
+	 * Array of memory regions defined by the device. The array
+	 * can be sparse, with some entries bearing the UDD_MEM_NONE
+	 * type interleaved with valid ones.  See the discussion about
+	 * @ref udd_memory_region "UDD memory regions".
+	 */
+	struct udd_memregion mem_regions[UDD_NR_MAPS];
+	/** Reserved to the UDD core. */
+	struct udd_reserved {
+		rtdm_irq_t irqh;
+		u32 event_count;
+		struct udd_signotify signfy;
+		struct rtdm_event pulse;
+		struct rtdm_driver driver;
+		struct rtdm_device device;
+		struct rtdm_driver mapper_driver;
+		struct udd_mapper {
+			struct udd_device *udd;
+			struct rtdm_device dev;
+		} mapdev[UDD_NR_MAPS];
+		char *mapper_name;
+		int nr_maps;
+	} __reserved;
+};
+
+int udd_register_device(struct udd_device *udd);
+
+int udd_unregister_device(struct udd_device *udd);
+
+struct udd_device *udd_get_device(struct rtdm_fd *fd);
+
+void udd_notify_event(struct udd_device *udd);
+
+void udd_enable_irq(struct udd_device *udd,
+		    rtdm_event_t *done);
+
+void udd_disable_irq(struct udd_device *udd,
+		     rtdm_event_t *done);
+
+/** @} */
+
+#endif /* !_COBALT_RTDM_UDD_H */
+++ linux-patched/include/xenomai/rtdm/testing.h	2022-03-21 12:58:31.912864359 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/gpio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_TESTING_H
+#define _COBALT_RTDM_TESTING_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/testing.h>
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <rtdm/compat.h>
+
+struct compat_rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	compat_uptr_t histogram_avg;
+	compat_uptr_t histogram_min;
+	compat_uptr_t histogram_max;
+};
+
+struct compat_rttst_heap_stathdr {
+	int nrstats;
+	compat_uptr_t buf;
+};
+
+#define RTTST_RTIOC_TMBENCH_STOP_COMPAT \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct compat_rttst_overall_bench_res)
+
+#endif	/* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_RTDM_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/gpio.h	2022-03-21 12:58:31.905864427 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/compat.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_GPIO_H
+#define _COBALT_RTDM_GPIO_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/gpio.h>
+
+struct class;
+struct device_node;
+struct gpio_desc;
+
+struct rtdm_gpio_pin {
+	struct rtdm_device dev;
+	struct list_head next;
+	rtdm_irq_t irqh;
+	rtdm_event_t event;
+	char *name;
+	struct gpio_desc *desc;
+	nanosecs_abs_t timestamp;
+	bool monotonic_timestamp;
+};
+
+struct rtdm_gpio_chip {
+	struct gpio_chip *gc;
+	struct rtdm_driver driver;
+	struct class *devclass;
+	struct list_head next;
+	rtdm_lock_t lock;
+	struct rtdm_gpio_pin pins[0];
+};
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc,
+		      int gpio_subclass);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc,
+		    int gpio_subclass);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc);
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type);
+
+#ifdef CONFIG_OF
+
+int rtdm_gpiochip_scan_of(struct device_node *from,
+			  const char *compat, int type);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type);
+#endif
+
+void rtdm_gpiochip_remove_by_type(int type);
+
+#endif /* !_COBALT_RTDM_GPIO_H */
+++ linux-patched/include/xenomai/rtdm/compat.h	2022-03-21 12:58:31.894864534 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/serial.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COMPAT_H
+#define _COBALT_RTDM_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <cobalt/kernel/compat.h>
+#include <rtdm/rtdm.h>
+
+struct compat_rtdm_getsockopt_args {
+	int level;
+	int optname;
+	compat_uptr_t optval;
+	compat_uptr_t optlen;
+};
+
+struct compat_rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const compat_uptr_t optval;
+	socklen_t optlen;
+};
+
+struct compat_rtdm_getsockaddr_args {
+	compat_uptr_t addr;
+	compat_uptr_t addrlen;
+};
+
+struct compat_rtdm_setsockaddr_args {
+	const compat_uptr_t addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x20,	\
+					     struct compat_rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x21,	\
+					     struct compat_rtdm_setsockopt_args)
+#define _RTIOC_BIND_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x22,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x23,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_ACCEPT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x25,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x26,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x27,	\
+					     struct compat_rtdm_getsockaddr_args)
+
+#define __COMPAT_CASE(__op)		: case __op
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define __COMPAT_CASE(__op)
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define COMPAT_CASE(__op)	case __op __COMPAT_CASE(__op  ## _COMPAT)
+
+#endif /* !_COBALT_RTDM_COMPAT_H */
+++ linux-patched/include/xenomai/rtdm/serial.h	2022-03-21 12:58:31.887864602 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/driver.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_SERIAL_H
+#define _COBALT_RTDM_SERIAL_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/serial.h>
+
+#endif /* !_COBALT_RTDM_SERIAL_H */
+++ linux-patched/include/xenomai/rtdm/driver.h	2022-03-21 12:58:31.879864680 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, driver API header
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * @ingroup driverapi
+ */
+#ifndef _COBALT_RTDM_DRIVER_H
+#define _COBALT_RTDM_DRIVER_H
+
+#include <asm/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <pipeline/lock.h>
+#include <pipeline/inband_work.h>
+#include <xenomai/version.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <cobalt/kernel/tree.h>
+#include <rtdm/fd.h>
+#include <rtdm/rtdm.h>
+
+/* debug support */
+#include <cobalt/kernel/assert.h>
+#include <trace/events/cobalt-rtdm.h>
+#ifdef CONFIG_PCI
+#include <asm-generic/xenomai/pci_ids.h>
+#endif /* CONFIG_PCI */
+#include <asm/xenomai/syscall.h>
+
+struct class;
+typedef struct xnselector rtdm_selector_t;
+enum rtdm_selecttype;
+
+/*!
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/*!
+ * @anchor dev_flags @name Device Flags
+ * Static flags describing a RTDM device
+ * @{
+ */
+/** If set, only a single instance of the device can be requested by an
+ *  application. */
+#define RTDM_EXCLUSIVE			0x0001
+
+/**
+ * Use fixed minor provided in the rtdm_device description for
+ * registering. If this flag is absent, the RTDM core assigns minor
+ * numbers to devices managed by a driver in order of registration.
+ */
+#define RTDM_FIXED_MINOR		0x0002
+
+/** If set, the device is addressed via a clear-text name. */
+#define RTDM_NAMED_DEVICE		0x0010
+
+/** If set, the device is addressed via a combination of protocol ID and
+ *  socket type. */
+#define RTDM_PROTOCOL_DEVICE		0x0020
+
+/** Mask selecting the device type. */
+#define RTDM_DEVICE_TYPE_MASK		0x00F0
+
+/** Flag indicating a secure variant of RTDM (not supported here) */
+#define RTDM_SECURE_DEVICE		0x80000000
+/** @} Device Flags */
+
+/** Maximum number of named devices per driver. */
+#define RTDM_MAX_MINOR	4096
+
+/** @} rtdm_device_register */
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_SELECTTYPE_xxx   @name RTDM_SELECTTYPE_xxx
+ * Event types select can bind to
+ * @{
+ */
+enum rtdm_selecttype {
+	/** Select input data availability events */
+	RTDM_SELECTTYPE_READ = XNSELECT_READ,
+
+	/** Select ouput buffer availability events */
+	RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
+
+	/** Select exceptional events */
+	RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
+};
+/** @} RTDM_SELECTTYPE_xxx */
+
+/** @} rtdm_sync */
+
+/**
+ * @brief Device context
+ *
+ * A device context structure is associated with every open device instance.
+ * RTDM takes care of its creation and destruction and passes it to the
+ * operation handlers when being invoked.
+ *
+ * Drivers can attach arbitrary data immediately after the official
+ * structure.  The size of this data is provided via
+ * rtdm_driver.context_size during device registration.
+ */
+struct rtdm_dev_context {
+	struct rtdm_fd fd;
+
+	/** Set of active device operation handlers */
+	/** Reference to owning device */
+	struct rtdm_device *device;
+
+	/** Begin of driver defined context data structure */
+	char dev_private[0];
+};
+
+static inline struct rtdm_dev_context *rtdm_fd_to_context(struct rtdm_fd *fd)
+{
+	return container_of(fd, struct rtdm_dev_context, fd);
+}
+
+/**
+ * Locate the driver private area associated to a device context structure
+ *
+ * @param[in] fd File descriptor structure associated with opened
+ * device instance
+ *
+ * @return The address of the private driver area associated to @a
+ * file descriptor.
+ */
+static inline void *rtdm_fd_to_private(struct rtdm_fd *fd)
+{
+	return &rtdm_fd_to_context(fd)->dev_private[0];
+}
+
+/**
+ * Locate a device file descriptor structure from its driver private area
+ *
+ * @param[in] dev_private Address of a private context area
+ *
+ * @return The address of the file descriptor structure defining @a
+ * dev_private.
+ */
+static inline struct rtdm_fd *rtdm_private_to_fd(void *dev_private)
+{
+	struct rtdm_dev_context *ctx;
+	ctx = container_of(dev_private, struct rtdm_dev_context, dev_private);
+	return &ctx->fd;
+}
+
+/**
+ * Tell whether the passed file descriptor belongs to an application.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return true if passed file descriptor belongs to an application,
+ * false otherwise.
+ */
+static inline bool rtdm_fd_is_user(struct rtdm_fd *fd)
+{
+	return rtdm_fd_owner(fd) != &cobalt_kernel_ppd;
+}
+
+/**
+ * Locate a device structure from a file descriptor.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return The address of the device structure to which this file
+ * descriptor is attached.
+ */
+static inline struct rtdm_device *rtdm_fd_device(struct rtdm_fd *fd)
+{
+	return rtdm_fd_to_context(fd)->device;
+}
+
+/**
+ * @brief RTDM profile information
+ *
+ * This descriptor details the profile information associated to a
+ * RTDM class of device managed by a driver.
+ *
+ * @anchor rtdm_profile_info
+ */
+struct rtdm_profile_info {
+	/** Device class name */
+	const char *name;
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int class_id;
+	/** Device sub-class, see RTDM_SUBCLASS_xxx definition in the
+	    @ref rtdm_profiles "Device Profiles" */
+	int subclass_id;
+	/** Supported device profile version */
+	int version;
+	/** Reserved */
+	unsigned int magic;
+	struct module *owner;
+	struct class *kdev_class;
+};
+
+struct rtdm_driver;
+
+/**
+ * @brief RTDM state management handlers
+ */
+struct rtdm_sm_ops {
+	/** Handler called upon transition to COBALT_STATE_WARMUP */ 
+	int (*start)(struct rtdm_driver *drv);
+	/** Handler called upon transition to COBALT_STATE_TEARDOWN */ 
+	int (*stop)(struct rtdm_driver *drv);
+};
+
+/**
+ * @brief RTDM driver
+ *
+ * This descriptor describes a RTDM device driver. The structure holds
+ * runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_driver {
+	/**
+	 * Class profile information. The RTDM_PROFILE_INFO() macro @b
+	 * must be used for filling up this field.
+	 * @anchor rtdm_driver_profile
+	 */
+	struct rtdm_profile_info profile_info;
+	/**
+	 * Device flags, see @ref dev_flags "Device Flags" for details
+	 * @anchor rtdm_driver_flags
+	 */
+	int device_flags;
+	/**
+	 * Size of the private memory area the core should
+	 * automatically allocate for each open file descriptor, which
+	 * is usable for storing the context data associated to each
+	 * connection. The allocated memory is zero-initialized. The
+	 * start of this area can be retrieved by a call to
+	 * rtdm_fd_to_private().
+	 */
+	size_t context_size;
+	/** Protocol device identification: protocol family (PF_xxx) */
+	int protocol_family;
+	/** Protocol device identification: socket type (SOCK_xxx) */
+	int socket_type;
+	/** I/O operation handlers */
+	struct rtdm_fd_ops ops;
+	/** State management handlers */
+	struct rtdm_sm_ops smops;
+	/**
+	 * Count of devices this driver manages. This value is used to
+	 * allocate a chrdev region for named devices.
+	 */
+	int device_count;
+	/** Base minor for named devices. */
+	int base_minor;
+	/** Reserved area */
+	struct {
+		union {
+			struct {
+				struct cdev cdev;
+				int major;
+			} named;
+		};
+		atomic_t refcount;
+		struct notifier_block nb_statechange;
+		DECLARE_BITMAP(minor_map, RTDM_MAX_MINOR);
+	};
+};
+
+#define RTDM_CLASS_MAGIC	0x8284636c
+
+/**
+ * @brief Initializer for class profile information.
+ *
+ * This macro must be used to fill in the @ref rtdm_profile_info
+ * "class profile information" field from a RTDM driver.
+ *
+ * @param __name Class name (unquoted).
+ *
+ * @param __id Class major identification number
+ * (profile_version.class_id).
+ *
+ * @param __subid Class minor identification number
+ * (profile_version.subclass_id).
+ *
+ * @param __version Profile version number.
+ *
+ * @note See @ref rtdm_profiles "Device Profiles".
+ */
+#define RTDM_PROFILE_INFO(__name, __id, __subid, __version)	\
+{								\
+	.name = ( # __name ),					\
+	.class_id = (__id),					\
+	.subclass_id = (__subid),				\
+	.version = (__version),					\
+	.magic = ~RTDM_CLASS_MAGIC,				\
+	.owner = THIS_MODULE,					\
+	.kdev_class = NULL,					\
+}
+
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls);
+
+/**
+ * @brief RTDM device
+ *
+ * This descriptor describes a RTDM device instance. The structure
+ * holds runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_device {
+	/** Device driver. */
+	struct rtdm_driver *driver;
+	/** Driver definable device data */
+	void *device_data;
+	/**
+	 * Device label template for composing the device name. A
+	 * limited printf-like format string is assumed, with a
+	 * provision for replacing the first %d/%i placeholder found
+	 * in the string by the device minor number.  It is up to the
+	 * driver to actually mention this placeholder or not,
+	 * depending on the naming convention for its devices.  For
+	 * named devices, the corresponding device node will
+	 * automatically appear in the /dev/rtdm hierachy with
+	 * hotplug-enabled device filesystems (DEVTMPFS).
+	 */
+	const char *label;
+	/**
+	 * Minor number of the device. If RTDM_FIXED_MINOR is present
+	 * in the driver flags, the value stored in this field is used
+	 * verbatim by rtdm_dev_register(). Otherwise, the RTDM core
+	 * automatically assigns minor numbers to all devices managed
+	 * by the driver referred to by @a driver, in order of
+	 * registration, storing the resulting values into this field.
+	 *
+	 * Device nodes created for named devices in the Linux /dev
+	 * hierarchy are assigned this minor number.
+	 *
+	 * The minor number of the current device handling an I/O
+	 * request can be retreived by a call to rtdm_fd_minor().
+	 */
+	int minor;
+	/** Reserved area. */
+	struct {
+		unsigned int magic;
+		char *name;
+		union {
+			struct {
+				xnhandle_t handle;
+			} named;
+			struct {
+				struct xnid id;
+			} proto;
+		};
+		dev_t rdev;
+		struct device *kdev;
+		struct class *kdev_class;
+		atomic_t refcount;
+		struct rtdm_fd_ops ops;
+		wait_queue_head_t putwq;
+		struct list_head openfd_list;
+	};
+};
+
+/* --- device registration --- */
+
+int rtdm_dev_register(struct rtdm_device *device);
+
+void rtdm_dev_unregister(struct rtdm_device *device);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+
+static inline struct device *rtdm_dev_to_kdev(struct rtdm_device *device)
+{
+	return device->kdev;
+}
+
+/* --- clock services --- */
+static inline nanosecs_abs_t rtdm_clock_read(void)
+{
+	return xnclock_read_realtime(&nkclock);
+}
+
+static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
+{
+	return xnclock_read_monotonic(&nkclock);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- timeout sequences */
+
+typedef nanosecs_abs_t rtdm_toseq_t;
+
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @defgroup rtdm_sync_biglock Big dual kernel lock
+ * @{
+ */
+
+/**
+ * @brief Enter atomic section (dual kernel only)
+ *
+ * This call opens a fully atomic section, serializing execution with
+ * respect to all interrupt handlers (including for real-time IRQs)
+ * and Xenomai threads running on all CPUs.
+ *
+ * @param __context name of local variable to store the context
+ * in. This variable updated by the real-time core will hold the
+ * information required to leave the atomic section properly.
+ *
+ * @note Atomic sections may be nested. The caller is allowed to sleep
+ * on a blocking Xenomai service from primary mode within an atomic
+ * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls.
+ * On the contrary, sleeping on a regular Linux kernel service while
+ * holding such lock is NOT valid.
+ *
+ * @note Since the strongest lock is acquired by this service, it can
+ * be used to synchronize real-time and non-real-time contexts.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases, mainly for the
+ * purpose of porting existing dual-kernel drivers which still depend
+ * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct.
+ */
+#define cobalt_atomic_enter(__context)				\
+	do {							\
+		xnlock_get_irqsave(&nklock, (__context));	\
+		xnsched_lock();					\
+	} while (0)
+
+/**
+ * @brief Leave atomic section (dual kernel only)
+ *
+ * This call closes an atomic section previously opened by a call to
+ * cobalt_atomic_enter(), restoring the preemption and interrupt state
+ * which prevailed prior to entering the exited section.
+ *
+ * @param __context name of local variable which stored the context.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases.
+ */
+#define cobalt_atomic_leave(__context)				\
+	do {							\
+		xnsched_unlock();				\
+		xnlock_put_irqrestore(&nklock, (__context));	\
+	} while (0)
+
+/**
+ * @brief Execute code block atomically (DEPRECATED)
+ *
+ * Generally, it is illegal to suspend the current task by calling
+ * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In
+ * contrast, this macro allows to combine several operations including
+ * a potentially rescheduling call to an atomic code block with respect to
+ * other RTDM_EXECUTE_ATOMICALLY() blocks. The macro is a light-weight
+ * alternative for protecting code blocks via mutexes, and it can even be used
+ * to synchronise real-time and non-real-time contexts.
+ *
+ * @param code_block Commands to be executed atomically
+ *
+ * @note It is not allowed to leave the code block explicitly by using
+ * @c break, @c return, @c goto, etc. This would leave the global lock held
+ * during the code block execution in an inconsistent state. Moreover, do not
+ * embed complex operations into the code bock. Consider that they will be
+ * executed under preemption lock with interrupts switched-off. Also note that
+ * invocation of rescheduling calls may break the atomicity until the task
+ * gains the CPU again.
+ *
+ * @coretags{unrestricted}
+ *
+ * @deprecated This construct will be phased out in Xenomai
+ * 3.0. Please use rtdm_waitqueue services instead.
+ *
+ * @see cobalt_atomic_enter().
+ */
+#ifdef DOXYGEN_CPP /* Beautify doxygen output */
+#define RTDM_EXECUTE_ATOMICALLY(code_block)	\
+{						\
+	<ENTER_ATOMIC_SECTION>			\
+	code_block;				\
+	<LEAVE_ATOMIC_SECTION>			\
+}
+#else /* This is how it really works */
+static inline __attribute__((deprecated)) void
+rtdm_execute_atomically(void) { }
+
+#define RTDM_EXECUTE_ATOMICALLY(code_block)		\
+{							\
+	spl_t __rtdm_s;					\
+							\
+	rtdm_execute_atomically();			\
+	xnlock_get_irqsave(&nklock, __rtdm_s);		\
+	xnsched_lock();					\
+	code_block;					\
+	xnsched_unlock();				\
+	xnlock_put_irqrestore(&nklock, __rtdm_s);	\
+}
+#endif
+
+/** @} Big dual kernel lock */
+
+/**
+ * @defgroup rtdm_sync_spinlock Spinlock with preemption deactivation
+ * @{
+ */
+
+/**
+ * Static lock initialisation
+ */
+#define RTDM_LOCK_UNLOCKED(__name)	PIPELINE_SPIN_LOCK_UNLOCKED(__name)
+
+#define DEFINE_RTDM_LOCK(__name)		\
+	rtdm_lock_t __name = RTDM_LOCK_UNLOCKED(__name)
+
+/** Lock variable */
+typedef pipeline_spinlock_t rtdm_lock_t;
+
+/** Variable to save the context while holding a lock */
+typedef unsigned long rtdm_lockctx_t;
+
+/**
+ * Dynamic lock initialisation
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{task-unrestricted}
+ */
+static inline void rtdm_lock_init(rtdm_lock_t *lock)
+{
+	raw_spin_lock_init(lock);
+}
+
+/**
+ * Acquire lock from non-preemptible contexts
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted}
+ */
+static inline void rtdm_lock_get(rtdm_lock_t *lock)
+{
+	XENO_BUG_ON(COBALT, !spltest());
+	raw_spin_lock(lock);
+	xnsched_lock();
+}
+
+/**
+ * Release lock without preemption restoration
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+static inline void rtdm_lock_put(rtdm_lock_t *lock)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+}
+
+/**
+ * Acquire lock and disable preemption, by stalling the head domain.
+ *
+ * @param __lock Address of lock variable
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_get_irqsave(__lock, __context)	\
+	((__context) = __rtdm_lock_get_irqsave(__lock))
+
+static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock)
+{
+	rtdm_lockctx_t context;
+
+	splhigh(context);
+	raw_spin_lock(lock);
+	xnsched_lock();
+
+	return context;
+}
+
+/**
+ * Release lock and restore preemption state
+ *
+ * @param lock Address of lock variable
+ * @param context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+static inline
+void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+	splexit(context);
+}
+
+/**
+ * Disable preemption locally
+ *
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqsave(__context)	\
+	splhigh(__context)
+
+/**
+ * Restore preemption state
+ *
+ * @param __context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqrestore(__context)	\
+	splexit(__context)
+
+/** @} Spinlock with Preemption Deactivation */
+
+#ifndef DOXYGEN_CPP
+
+struct rtdm_waitqueue {
+	struct xnsynch wait;
+};
+typedef struct rtdm_waitqueue rtdm_waitqueue_t;
+
+#define RTDM_WAITQUEUE_INITIALIZER(__name) {		 \
+	    .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \
+	}
+
+#define DEFINE_RTDM_WAITQUEUE(__name)				\
+	struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name)
+
+#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name)	\
+	DEFINE_RTDM_WAITQUEUE(__name)
+
+static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+{
+	*wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq);
+}
+
+static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+{
+	xnsynch_destroy(&wq->wait);
+}
+
+static inline int __rtdm_dowait(struct rtdm_waitqueue *wq,
+				nanosecs_rel_t timeout, xntmode_t timeout_mode)
+{
+	int ret;
+	
+	ret = xnsynch_sleep_on(&wq->wait, timeout, timeout_mode);
+	if (ret & XNBREAK)
+		return -EINTR;
+	if (ret & XNTIMEO)
+		return -ETIMEDOUT;
+	if (ret & XNRMID)
+		return -EIDRM;
+	return 0;
+}
+
+static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq,
+				   nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+{
+	if (toseq && timeout > 0)
+		return __rtdm_dowait(wq, *toseq, XN_ABSOLUTE);
+
+	return __rtdm_dowait(wq, timeout, XN_RELATIVE);
+}
+
+#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_timedwait(__wq, __timeout, __toseq); \
+		__ret;							\
+	})
+
+#define rtdm_wait_condition_locked(__wq, __cond)			\
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_dowait(__wq,			\
+					      XN_INFINITE, XN_RELATIVE); \
+		__ret;							\
+	})
+
+#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq)	\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_timedwait_condition_locked(__wq, __cond,	\
+					      __timeout, __toseq);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_timedwait(__wq, __timeout, __toseq)			\
+	__rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_timedwait_locked(__wq, __timeout, __toseq)			\
+	rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_wait_condition(__wq, __cond)				\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_wait_condition_locked(__wq, __cond);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_wait(__wq)							\
+	__rtdm_dowait(__wq, XN_INFINITE, XN_RELATIVE)
+
+#define rtdm_wait_locked(__wq)  rtdm_wait(__wq)
+
+#define rtdm_waitqueue_lock(__wq, __context)  cobalt_atomic_enter(__context)
+
+#define rtdm_waitqueue_unlock(__wq, __context)  cobalt_atomic_leave(__context)
+
+#define rtdm_waitqueue_signal(__wq)					\
+	({								\
+		struct xnthread *__waiter;				\
+		__waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait);	\
+		xnsched_run();						\
+		__waiter != NULL;					\
+	})
+
+#define __rtdm_waitqueue_flush(__wq, __reason)				\
+	({								\
+		int __ret;						\
+		__ret = xnsynch_flush(&(__wq)->wait, __reason);		\
+		xnsched_run();						\
+		__ret == XNSYNCH_RESCHED;				\
+	})
+
+#define rtdm_waitqueue_broadcast(__wq)	\
+	__rtdm_waitqueue_flush(__wq, 0)
+
+#define rtdm_waitqueue_flush(__wq)	\
+	__rtdm_waitqueue_flush(__wq, XNBREAK)
+
+#define rtdm_waitqueue_wakeup(__wq, __waiter)				\
+	do {								\
+		xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter);	\
+		xnsched_run();						\
+	} while (0)
+
+#define rtdm_for_each_waiter(__pos, __wq)		\
+	xnsynch_for_each_sleeper(__pos, &(__wq)->wait)
+
+#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq)	\
+	xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait)
+
+#endif /* !DOXYGEN_CPP */
+
+/** @} rtdm_sync */
+
+/* --- Interrupt management services --- */
+/*!
+ * @addtogroup rtdm_irq
+ * @{
+ */
+
+typedef struct xnintr rtdm_irq_t;
+
+/*!
+ * @anchor RTDM_IRQTYPE_xxx   @name RTDM_IRQTYPE_xxx
+ * Interrupt registrations flags
+ * @{
+ */
+/** Enable IRQ-sharing with other real-time drivers */
+#define RTDM_IRQTYPE_SHARED		XN_IRQTYPE_SHARED
+/** Mark IRQ as edge-triggered, relevant for correct handling of shared
+ *  edge-triggered IRQs */
+#define RTDM_IRQTYPE_EDGE		XN_IRQTYPE_EDGE
+/** @} RTDM_IRQTYPE_xxx */
+
+/**
+ * Interrupt handler
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 or a combination of @ref RTDM_IRQ_xxx flags
+ */
+typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
+
+/*!
+ * @anchor RTDM_IRQ_xxx   @name RTDM_IRQ_xxx
+ * Return flags of interrupt handlers
+ * @{
+ */
+/** Unhandled interrupt */
+#define RTDM_IRQ_NONE			XN_IRQ_NONE
+/** Denote handled interrupt */
+#define RTDM_IRQ_HANDLED		XN_IRQ_HANDLED
+/** Request interrupt disabling on exit */
+#define RTDM_IRQ_DISABLE		XN_IRQ_DISABLE
+/** @} RTDM_IRQ_xxx */
+
+/**
+ * Retrieve IRQ handler argument
+ *
+ * @param irq_handle IRQ handle
+ * @param type Type of the pointer to return
+ *
+ * @return The argument pointer registered on rtdm_irq_request() is returned,
+ * type-casted to the specified @a type.
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_irq_get_arg(irq_handle, type)	((type *)irq_handle->cookie)
+/** @} rtdm_irq */
+
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg);
+
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+	xnintr_destroy(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
+{
+	xnintr_enable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
+{
+	xnintr_disable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle,
+					const cpumask_t *cpumask)
+{
+	return xnintr_affinity(irq_handle, cpumask);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- non-real-time signalling services --- */
+
+/*!
+ * @addtogroup rtdm_nrtsignal
+ * @{
+ */
+
+typedef struct rtdm_nrtsig rtdm_nrtsig_t;
+/**
+ * Non-real-time signal handler
+ *
+ * @param[in] nrt_sig Signal handle pointer as passed to rtdm_nrtsig_init()
+ * @param[in] arg Argument as passed to rtdm_nrtsig_init()
+ *
+ * @note The signal handler will run in soft-IRQ context of the non-real-time
+ * subsystem. Note the implications of this context, e.g. no invocation of
+ * blocking operations.
+ */
+typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg);
+
+struct rtdm_nrtsig {
+	struct pipeline_inband_work inband_work; /* Must be first */
+	rtdm_nrtsig_handler_t handler;
+	void *arg;
+};
+
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work);
+/** @} rtdm_nrtsignal */
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work);
+
+static inline void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
+				    rtdm_nrtsig_handler_t handler, void *arg)
+{
+	nrt_sig->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*nrt_sig,
+						 __rtdm_nrtsig_execute);
+	nrt_sig->handler = handler;
+	nrt_sig->arg = arg;
+}
+
+static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
+{
+	nrt_sig->handler = NULL;
+	nrt_sig->arg = NULL;
+}
+
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig);
+#endif /* !DOXYGEN_CPP */
+
+/* --- timer services --- */
+
+/*!
+ * @addtogroup rtdm_timer
+ * @{
+ */
+
+typedef struct xntimer rtdm_timer_t;
+
+/**
+ * Timer handler
+ *
+ * @param[in] timer Timer handle as returned by rtdm_timer_init()
+ */
+typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
+
+/*!
+ * @anchor RTDM_TIMERMODE_xxx   @name RTDM_TIMERMODE_xxx
+ * Timer operation modes
+ * @{
+ */
+enum rtdm_timer_mode {
+	/** Monotonic timer with relative timeout */
+	RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
+
+	/** Monotonic timer with absolute timeout */
+	RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
+
+	/** Adjustable timer with absolute timeout */
+	RTDM_TIMERMODE_REALTIME = XN_REALTIME
+};
+/** @} RTDM_TIMERMODE_xxx */
+
+/** @} rtdm_timer */
+
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name);
+
+void rtdm_timer_destroy(rtdm_timer_t *timer);
+
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode);
+
+void rtdm_timer_stop(rtdm_timer_t *timer);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
+					      nanosecs_abs_t expiry,
+					      nanosecs_rel_t interval,
+					      enum rtdm_timer_mode mode)
+{
+	return xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+}
+
+static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
+{
+	xntimer_stop(timer);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- task services --- */
+/*!
+ * @addtogroup rtdm_task
+ * @{
+ */
+
+typedef struct xnthread rtdm_task_t;
+
+/**
+ * Real-time task procedure
+ *
+ * @param[in,out] arg argument as passed to rtdm_task_init()
+ */
+typedef void (*rtdm_task_proc_t)(void *arg);
+
+/**
+ * @anchor rtdmtaskprio @name Task Priority Range
+ * Maximum and minimum task priorities
+ * @{ */
+#define RTDM_TASK_LOWEST_PRIORITY	0
+#define RTDM_TASK_HIGHEST_PRIORITY	99
+/** @} Task Priority Range */
+
+/**
+ * @anchor rtdmchangetaskprio @name Task Priority Modification
+ * Raise or lower task priorities by one level
+ * @{ */
+#define RTDM_TASK_RAISE_PRIORITY	(+1)
+#define RTDM_TASK_LOWER_PRIORITY	(-1)
+/** @} Task Priority Modification */
+
+/** @} rtdm_task */
+
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period);
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
+void rtdm_task_busy_sleep(nanosecs_rel_t delay);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void rtdm_task_destroy(rtdm_task_t *task)
+{
+	xnthread_cancel(task);
+	xnthread_join(task, true);
+}
+
+static inline int rtdm_task_should_stop(void)
+{
+	return xnthread_test_info(xnthread_current(), XNCANCELD);
+}
+
+void rtdm_task_join(rtdm_task_t *task);
+
+static inline void __deprecated rtdm_task_join_nrt(rtdm_task_t *task,
+						   unsigned int poll_delay)
+{
+	rtdm_task_join(task);
+}
+
+static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
+{
+	union xnsched_policy_param param = { .rt = { .prio = priority } };
+	spl_t s;
+
+	splhigh(s);
+	xnthread_set_schedparam(task, &xnsched_class_rt, &param);
+	xnsched_run();
+	splexit(s);
+}
+
+static inline int rtdm_task_set_period(rtdm_task_t *task,
+				       nanosecs_abs_t start_date,
+				       nanosecs_rel_t period)
+{
+	if (period < 0)
+		period = 0;
+	if (start_date == 0)
+		start_date = XN_INFINITE;
+
+	return xnthread_set_periodic(task, start_date, XN_ABSOLUTE, period);
+}
+
+static inline int rtdm_task_unblock(rtdm_task_t *task)
+{
+	spl_t s;
+	int res;
+
+	splhigh(s);
+	res = xnthread_unblock(task);
+	xnsched_run();
+	splexit(s);
+
+	return res;
+}
+
+static inline rtdm_task_t *rtdm_task_current(void)
+{
+	return xnthread_current();
+}
+
+static inline int rtdm_task_wait_period(unsigned long *overruns_r)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+	return xnthread_wait_period(overruns_r);
+}
+
+static inline int rtdm_task_sleep(nanosecs_rel_t delay)
+{
+	return __rtdm_task_sleep(delay, XN_RELATIVE);
+}
+
+static inline int
+rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
+{
+	/* For the sake of a consistent API usage... */
+	if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
+		return -EINVAL;
+	return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
+}
+
+/* rtdm_task_sleep_abs shall be used instead */
+static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
+{
+	return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
+}
+
+#define rtdm_task_busy_wait(__condition, __spin_ns, __sleep_ns)			\
+	({									\
+		__label__ done;							\
+		nanosecs_abs_t __end;						\
+		int __ret = 0;							\
+		for (;;) {							\
+			__end = rtdm_clock_read_monotonic() + __spin_ns;	\
+			for (;;) {						\
+				if (__condition)				\
+					goto done;				\
+				if (rtdm_clock_read_monotonic() >= __end)	\
+					break;					\
+			}							\
+			__ret = rtdm_task_sleep(__sleep_ns);			\
+			if (__ret)						\
+				break;						\
+		}								\
+	done:									\
+		__ret;								\
+	})
+
+#define rtdm_wait_context	xnthread_wait_context
+
+static inline
+void rtdm_wait_complete(struct rtdm_wait_context *wc)
+{
+	xnthread_complete_wait(wc);
+}
+
+static inline
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc)
+{
+	return xnthread_wait_complete_p(wc);
+}
+
+static inline void rtdm_wait_prepare(struct rtdm_wait_context *wc)
+{
+	xnthread_prepare_wait(wc);
+}
+
+static inline
+struct rtdm_wait_context *rtdm_wait_get_context(rtdm_task_t *task)
+{
+	return xnthread_get_wait_context(task);
+}
+
+#endif /* !DOXYGEN_CPP */
+
+/* --- event services --- */
+
+typedef struct rtdm_event {
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_event_t;
+
+#define RTDM_EVENT_PENDING		XNSYNCH_SPARE1
+
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_event_wait(rtdm_event_t *event);
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_event_signal(rtdm_event_t *event);
+
+void rtdm_event_clear(rtdm_event_t *event);
+
+void rtdm_event_pulse(rtdm_event_t *event);
+
+void rtdm_event_destroy(rtdm_event_t *event);
+
+/* --- semaphore services --- */
+
+typedef struct rtdm_sem {
+	unsigned long value;
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_sem_t;
+
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_sem_down(rtdm_sem_t *sem);
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq);
+void rtdm_sem_up(rtdm_sem_t *sem);
+
+void rtdm_sem_destroy(rtdm_sem_t *sem);
+
+/* --- mutex services --- */
+
+typedef struct rtdm_mutex {
+	struct xnsynch synch_base;
+	atomic_t fastlock;
+} rtdm_mutex_t;
+
+void rtdm_mutex_init(rtdm_mutex_t *mutex);
+int rtdm_mutex_lock(rtdm_mutex_t *mutex);
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex);
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex);
+
+/* --- utility functions --- */
+
+#define rtdm_printk(format, ...)	printk(format, ##__VA_ARGS__)
+
+#define rtdm_printk_ratelimited(fmt, ...)  do {				\
+	if (xnclock_ratelimit())					\
+		printk(fmt, ##__VA_ARGS__);				\
+} while (0)
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void *rtdm_malloc(size_t size)
+{
+	return xnmalloc(size);
+}
+
+static inline void rtdm_free(void *ptr)
+{
+	xnfree(ptr);
+}
+
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data);
+
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data);
+
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa);
+
+int rtdm_munmap(void *ptr, size_t len);
+
+static inline int rtdm_read_user_ok(struct rtdm_fd *fd,
+				    const void __user *ptr, size_t size)
+{
+	return access_rok(ptr, size);
+}
+
+static inline int rtdm_rw_user_ok(struct rtdm_fd *fd,
+				  const void __user *ptr, size_t size)
+{
+	return access_wok(ptr, size);
+}
+
+static inline int rtdm_copy_from_user(struct rtdm_fd *fd,
+				      void *dst, const void __user *src,
+				      size_t size)
+{
+	return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_from_user(struct rtdm_fd *fd,
+					   void *dst, const void __user *src,
+					   size_t size)
+{
+	return cobalt_copy_from_user(dst, src, size);
+}
+
+static inline int rtdm_copy_to_user(struct rtdm_fd *fd,
+				    void __user *dst, const void *src,
+				    size_t size)
+{
+	return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_to_user(struct rtdm_fd *fd,
+					 void __user *dst, const void *src,
+					 size_t size)
+{
+	return cobalt_copy_to_user(dst, src, size);
+}
+
+static inline int rtdm_strncpy_from_user(struct rtdm_fd *fd,
+					 char *dst,
+					 const char __user *src, size_t count)
+{
+	return cobalt_strncpy_from_user(dst, src, count);
+}
+
+static inline bool rtdm_available(void)
+{
+	return realtime_core_enabled();
+}
+
+static inline int rtdm_rt_capable(struct rtdm_fd *fd)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return 0;
+
+	if (!rtdm_fd_is_user(fd))
+		return !xnsched_root_p();
+
+	return xnthread_current() != NULL;
+}
+
+static inline int rtdm_in_rt_context(void)
+{
+	return is_primary_domain();
+}
+
+#define RTDM_IOV_FASTMAX  16
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+static inline
+void rtdm_drop_iovec(struct iovec *iov, struct iovec *iov_fast)
+{
+	if (iov != iov_fast)
+		xnfree(iov);
+}
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen);
+
+#endif /* !DOXYGEN_CPP */
+
+#endif /* _COBALT_RTDM_DRIVER_H */
+++ linux-patched/include/xenomai/rtdm/rtdm.h	2022-03-21 12:58:31.872864749 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/gpiopwm.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_RTDM_H
+#define _COBALT_RTDM_RTDM_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/socket.h>
+#include <cobalt/kernel/ppd.h>
+#include <rtdm/fd.h>
+
+typedef __u32 socklen_t;
+
+#include <rtdm/uapi/rtdm.h>
+
+int __rtdm_dev_open(const char *path, int oflag);
+
+int __rtdm_dev_socket(int protocol_family,
+		      int socket_type, int protocol);
+
+static inline int rtdm_open(const char *path, int oflag, ...)
+{
+	return __rtdm_dev_open(path, oflag);
+}
+
+static inline int rtdm_socket(int protocol_family,
+			      int socket_type, int protocol)
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+static inline int rtdm_close(int fd)
+{
+	return rtdm_fd_close(fd, RTDM_FD_MAGIC);
+}
+
+#define rtdm_fcntl(__fd, __cmd, __args...)	\
+	rtdm_fd_fcntl(__fd, __cmd, ##__args)
+
+#define rtdm_ioctl(__fd, __request, __args...)	\
+	rtdm_fd_ioctl(__fd, __request, ##__args)
+
+static inline ssize_t rtdm_read(int fd, void *buf, size_t count)
+{
+	return rtdm_fd_read(fd, buf, count);
+}
+
+static inline ssize_t rtdm_write(int fd, const void *buf, size_t count)
+{
+	return rtdm_fd_write(fd, buf, count);
+}
+
+static inline ssize_t rtdm_recvmsg(int s, struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_recvmsg(s, msg, flags);
+}
+
+static inline ssize_t rtdm_sendmsg(int s, const struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_sendmsg(s, msg, flags);
+}
+
+static inline
+ssize_t rtdm_recvfrom(int s, void *buf, size_t len, int flags,
+		      struct sockaddr *from,
+		      socklen_t *fromlen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+	ssize_t ret;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	msg.msg_name = from;
+	msg.msg_namelen = from ? *fromlen : 0;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	ret = rtdm_recvmsg(s, &msg, flags);
+	if (ret < 0)
+		return ret;
+
+	if (from)
+		*fromlen = msg.msg_namelen;
+
+	return ret;
+}
+
+static inline ssize_t rtdm_recv(int s, void *buf, size_t len, int flags)
+{
+	return rtdm_recvfrom(s, buf, len, flags, NULL, NULL);
+}
+
+static inline ssize_t rtdm_sendto(int s, const void *buf, size_t len,
+				  int flags, const struct sockaddr *to,
+				  socklen_t tolen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+
+	iov.iov_base = (void *)buf;
+	iov.iov_len = len;
+	msg.msg_name = (struct sockaddr *)to;
+	msg.msg_namelen = tolen;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	return rtdm_sendmsg(s, &msg, flags);
+}
+
+static inline ssize_t rtdm_send(int s, const void *buf, size_t len, int flags)
+{
+	return rtdm_sendto(s, buf, len, flags, NULL, 0);
+}
+
+static inline int rtdm_getsockopt(int s, int level, int optname,
+				  void *optval, socklen_t *optlen)
+{
+	struct _rtdm_getsockopt_args args = {
+		level, optname, optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKOPT, &args);
+}
+
+static inline int rtdm_setsockopt(int s, int level, int optname,
+				  const void *optval, socklen_t optlen)
+{
+	struct _rtdm_setsockopt_args args = {
+		level, optname, (void *)optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_SETSOCKOPT, &args);
+}
+
+static inline int rtdm_bind(int s, const struct sockaddr *my_addr,
+			    socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		my_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_BIND, &args);
+}
+
+static inline int rtdm_connect(int s, const struct sockaddr *serv_addr,
+			       socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		serv_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_CONNECT, &args);
+}
+
+static inline int rtdm_listen(int s, int backlog)
+{
+	return rtdm_ioctl(s, _RTIOC_LISTEN, backlog);
+}
+
+static inline int rtdm_accept(int s, struct sockaddr *addr,
+			      socklen_t *addrlen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_ACCEPT, &args);
+}
+
+static inline int rtdm_getsockname(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKNAME, &args);
+}
+
+static inline int rtdm_getpeername(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETPEERNAME, &args);
+}
+
+static inline int rtdm_shutdown(int s, int how)
+{
+	return rtdm_ioctl(s, _RTIOC_SHUTDOWN, how);
+}
+
+#endif /* _COBALT_RTDM_RTDM_H */
+++ linux-patched/include/xenomai/rtdm/gpiopwm.h	2022-03-21 12:58:31.864864827 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/fd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_PWM_H
+#define _COBALT_RTDM_PWM_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/gpiopwm.h>
+
+#endif /* !_COBALT_RTDM_PWM_H */
+++ linux-patched/include/xenomai/rtdm/fd.h	2022-03-21 12:58:31.857864895 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/cobalt.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008,2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_FD_H
+#define _COBALT_KERNEL_FD_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <cobalt/kernel/tree.h>
+#include <asm-generic/xenomai/syscall.h>
+
+struct vm_area_struct;
+struct rtdm_fd;
+struct _rtdm_mmap_request;
+struct xnselector;
+struct cobalt_ppd;
+struct rtdm_device;
+
+/**
+ * @file
+ * @anchor File operation handlers
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/**
+ * Open handler for named devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] oflags Open flags as passed by the user
+ *
+ * The file descriptor carries a device minor information which can be
+ * retrieved by a call to rtdm_fd_minor(fd). The minor number can be
+ * used for distinguishing devices managed by a driver.
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_open_handler(struct rtdm_fd *fd, int oflags);
+
+/**
+ * Socket creation handler for protocol devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] protocol Protocol number as passed by the user
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_socket_handler(struct rtdm_fd *fd, int protocol);
+
+/**
+ * Close handler
+ *
+ * @param[in] fd File descriptor associated with opened
+ * device instance.
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+void rtdm_close_handler(struct rtdm_fd *fd);
+
+/**
+ * IOCTL handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] request Request number as passed by the user
+ * @param[in,out] arg Request argument as passed by the user
+ *
+ * @return A positive value or 0 on success. On failure return either
+ * -ENOSYS, to request that the function be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_ioctl_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg);
+
+/**
+ * Read handler
+ *
+ * @param[in] fd File descriptor
+ * @param[out] buf Input buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to read
+ *
+ * @return On success, the number of bytes read. On failure return either
+ * -ENOSYS, to request that this handler be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_read_handler(struct rtdm_fd *fd, void __user *buf, size_t size);
+
+/**
+ * Write handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] buf Output buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to write
+ *
+ * @return On success, the number of bytes written. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_write_handler(struct rtdm_fd *fd, const void __user *buf, size_t size);
+
+/**
+ * Receive message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in,out] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes received. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_recvmsg_handler(struct rtdm_fd *fd, struct user_msghdr *msg, int flags);
+
+/**
+ * Transmit message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes transmitted. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_sendmsg_handler(struct rtdm_fd *fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * Select handler
+ *
+ * @param[in] fd File descriptor
+ * @param selector Pointer to the selector structure
+ * @param type Type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT)
+ * @param index Index of the file descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c select() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/007908799/xsh/select.html
+ */
+int rtdm_select_handler(struct rtdm_fd *fd, struct xnselector *selector,
+			unsigned int type, unsigned int index);
+
+/**
+ * Memory mapping handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] vma Virtual memory area descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c mmap() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/7908799/xsh/mmap.html
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM.
+ */
+int rtdm_mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma);
+
+/**
+ * Allocate mapping region in address space
+ *
+ * When present, this optional handler should return the start address
+ * of a free region in the process's address space, large enough to
+ * cover the ongoing mmap() operation. If unspecified, the default
+ * architecture-defined handler is invoked.
+ *
+ * Most drivers can omit this handler, except on MMU-less platforms
+ * (see second note).
+ *
+ * @param[in] fd File descriptor
+ * @param[in] len Length of the requested region
+ * @param[in] pgoff Page frame number to map to (see second note).
+ * @param[in] flags Requested mapping flags
+ *
+ * @return The start address of the mapping region on success. On
+ * failure, a negative error code should be returned, with -ENOSYS
+ * meaning that the driver does not want to provide such information,
+ * in which case the ongoing mmap() operation will fail.
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM, and therefore not passed to this handler.
+ *
+ * @note On MMU-less platforms, this handler is required because RTDM
+ * issues mapping requests over a shareable character device
+ * internally. In such context, the RTDM core may pass a null @a pgoff
+ * argument to the handler, for probing for the logical start address
+ * of the memory region to map to. Otherwise, when @a pgoff is
+ * non-zero, pgoff << PAGE_SHIFT is usually returned.
+ */
+unsigned long
+rtdm_get_unmapped_area_handler(struct rtdm_fd *fd,
+			       unsigned long len, unsigned long pgoff,
+			       unsigned long flags);
+/**
+ * @anchor rtdm_fd_ops
+ * @brief RTDM file operation descriptor.
+ *
+ * This structure describes the operations available with a RTDM
+ * device, defining handlers for submitting I/O requests. Those
+ * handlers are implemented by RTDM device drivers.
+ */
+struct rtdm_fd_ops {
+	/** See rtdm_open_handler(). */
+	int (*open)(struct rtdm_fd *fd, int oflags);
+	/** See rtdm_socket_handler(). */
+	int (*socket)(struct rtdm_fd *fd, int protocol);
+	/** See rtdm_close_handler(). */
+	void (*close)(struct rtdm_fd *fd);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_rt)(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_nrt)(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_rt)(struct rtdm_fd *fd,
+			   void __user *buf, size_t size);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_nrt)(struct rtdm_fd *fd,
+			    void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_rt)(struct rtdm_fd *fd,
+			    const void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_nrt)(struct rtdm_fd *fd,
+			     const void __user *buf, size_t size);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_rt)(struct rtdm_fd *fd,
+			      struct user_msghdr *msg, int flags);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_nrt)(struct rtdm_fd *fd,
+			       struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_rt)(struct rtdm_fd *fd,
+			      const struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_nrt)(struct rtdm_fd *fd,
+			       const struct user_msghdr *msg, int flags);
+	/** See rtdm_select_handler(). */
+	int (*select)(struct rtdm_fd *fd,
+		      struct xnselector *selector,
+		      unsigned int type, unsigned int index);
+	/** See rtdm_mmap_handler(). */
+	int (*mmap)(struct rtdm_fd *fd,
+		    struct vm_area_struct *vma);
+	/** See rtdm_get_unmapped_area_handler(). */
+	unsigned long (*get_unmapped_area)(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags);
+};
+
+/** @} File operation handlers */
+
+struct rtdm_fd {
+	unsigned int magic;
+	struct rtdm_fd_ops *ops;
+	struct cobalt_ppd *owner;
+	unsigned int refs;
+	int ufd;
+	int minor;
+	int oflags;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	int compat;
+#endif
+	bool stale;
+	struct list_head cleanup;
+	struct list_head next;	/* in dev->openfd_list */
+};
+
+#define RTDM_FD_MAGIC 0x52544446
+
+#define RTDM_FD_COMPAT	__COBALT_COMPAT_BIT
+#define RTDM_FD_COMPATX	__COBALT_COMPATX_BIT
+
+int __rtdm_anon_getfd(const char *name, int flags);
+
+void __rtdm_anon_putfd(int ufd);
+
+static inline struct cobalt_ppd *rtdm_fd_owner(const struct rtdm_fd *fd)
+{
+	return fd->owner;
+}
+
+static inline int rtdm_fd_ufd(const struct rtdm_fd *fd)
+{
+	return fd->ufd;
+}
+
+static inline int rtdm_fd_minor(const struct rtdm_fd *fd)
+{
+	return fd->minor;
+}
+
+static inline int rtdm_fd_flags(const struct rtdm_fd *fd)
+{
+	return fd->oflags;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return fd->compat;
+}
+#else
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return 0;
+}
+#endif
+
+int rtdm_fd_enter(struct rtdm_fd *rtdm_fd, int ufd,
+		  unsigned int magic, struct rtdm_fd_ops *ops);
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd);
+
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic);
+
+int rtdm_fd_lock(struct rtdm_fd *fd);
+
+void rtdm_fd_put(struct rtdm_fd *fd);
+
+void rtdm_fd_unlock(struct rtdm_fd *fd);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...);
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...);
+
+ssize_t rtdm_fd_read(int ufd, void __user *buf, size_t size);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size);
+
+int rtdm_fd_close(int ufd, unsigned int magic);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags);
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts));
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg,
+			int flags);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp);
+
+int rtdm_fd_valid_p(int ufd);
+
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type);
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+		struct rtdm_device *dev);
+
+void rtdm_device_flush_fds(struct rtdm_device *dev);
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p);
+
+void rtdm_fd_init(void);
+
+#endif /* _COBALT_KERNEL_FD_H */
+++ linux-patched/include/xenomai/rtdm/cobalt.h	2022-03-21 12:58:31.849864973 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/rtdm_helpers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COBALT_H
+#define _COBALT_RTDM_COBALT_H
+
+#include <xenomai/posix/process.h>
+#include <xenomai/posix/extension.h>
+#include <xenomai/posix/thread.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/timer.h>
+#include <xenomai/posix/clock.h>
+#include <xenomai/posix/event.h>
+#include <xenomai/posix/monitor.h>
+#include <xenomai/posix/corectl.h>
+
+#endif /* !_COBALT_RTDM_COBALT_H */
+++ linux-patched/include/xenomai/rtdm/analogy/rtdm_helpers.h	2022-03-21 12:58:31.842865041 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/subdevice.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, Operation system facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+#define _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <rtdm/driver.h>
+
+/* --- Trace section  --- */
+#define A4L_PROMPT "Analogy: "
+
+#define RTDM_SUBCLASS_ANALOGY 0
+
+#define __a4l_err(fmt, args...)  rtdm_printk(KERN_ERR A4L_PROMPT fmt, ##args)
+#define __a4l_warn(fmt, args...) rtdm_printk(KERN_WARNING A4L_PROMPT fmt, ##args)
+
+#ifdef  CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_info(fmt, args...) trace_printk(fmt, ##args)
+#else
+#define __a4l_info(fmt, args...) 						\
+        rtdm_printk(KERN_INFO A4L_PROMPT "%s: " fmt, __FUNCTION__, ##args)
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_dbg(level, debug, fmt, args...)				\
+	do {								\
+	if ((debug) >= (level))						\
+		trace_printk(fmt, ##args); 				\
+	} while (0)
+#else
+#define __a4l_dbg(level, debug, fmt, args...)						\
+	do {										\
+	if ((debug) >= (level))								\
+		rtdm_printk(KERN_DEBUG A4L_PROMPT "%s: " fmt, __FUNCTION__ , ##args);	\
+	} while (0)
+#endif
+
+#define core_dbg CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+#define drv_dbg CONFIG_XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dbg(level, debug, fmt, args...)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dev_name(dev) 						\
+	(dev->driver == NULL) ? "unattached dev" : dev->driver->board_name
+
+#define a4l_err(dev, fmt, args...) 					\
+	__a4l_err("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_warn(dev, fmt, args...) 					\
+	__a4l_warn("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_info(dev, fmt, args...) 					\
+	__a4l_info("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_dbg(level, debug, dev, fmt, args...)			\
+	__a4l_dbg(level, debug, "%s: " fmt, __a4l_dev_name(dev), ##args)
+
+
+/* --- Time section --- */
+static inline void a4l_udelay(unsigned int us)
+{
+	rtdm_task_busy_sleep(((nanosecs_rel_t) us) * 1000);
+}
+
+/* Function which gives absolute time */
+nanosecs_abs_t a4l_get_time(void);
+
+/* Function for setting up the absolute time recovery */
+void a4l_init_time(void);
+
+/* --- IRQ section --- */
+#define A4L_IRQ_DISABLED 0
+
+typedef int (*a4l_irq_hdlr_t) (unsigned int irq, void *d);
+
+struct a4l_irq_descriptor {
+	/* These fields are useful to launch the IRQ trampoline;
+	   that is the reason why a structure has been defined */
+	a4l_irq_hdlr_t handler;
+	unsigned int irq;
+	void *cookie;
+	rtdm_irq_t rtdm_desc;
+};
+
+int __a4l_request_irq(struct a4l_irq_descriptor * dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie);
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc);
+
+/* --- Synchronization section --- */
+#define __NRT_WAITER 1
+#define __RT_WAITER 2
+#define __EVT_PDING 3
+
+struct a4l_sync {
+	unsigned long status;
+	rtdm_event_t rtdm_evt;
+	rtdm_nrtsig_t nrt_sig;
+	wait_queue_head_t wq;
+};
+
+#define a4l_select_sync(snc, slr, type, fd) \
+	rtdm_event_select(&((snc)->rtdm_evt), slr, type, fd)
+
+int a4l_init_sync(struct a4l_sync * snc);
+void a4l_cleanup_sync(struct a4l_sync * snc);
+void a4l_flush_sync(struct a4l_sync * snc);
+int a4l_wait_sync(struct a4l_sync * snc, int rt);
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout);
+void a4l_signal_sync(struct a4l_sync * snc);
+
+#endif /* !_COBALT_RTDM_ANALOGY_RTDM_HELPERS_H */
+++ linux-patched/include/xenomai/rtdm/analogy/subdevice.h	2022-03-21 12:58:31.835865109 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/context.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, subdevice related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+#define _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/instruction.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/channel_range.h>
+
+/* --- Subdevice descriptor structure --- */
+
+struct a4l_device;
+struct a4l_buffer;
+
+/*!
+ * @brief Structure describing the subdevice
+ * @see a4l_add_subd()
+ */
+
+struct a4l_subdevice {
+
+	struct list_head list;
+			   /**< List stuff */
+
+	struct a4l_device *dev;
+			       /**< Containing device */
+
+	unsigned int idx;
+		      /**< Subdevice index */
+
+	struct a4l_buffer *buf;
+			       /**< Linked buffer */
+
+	/* Subdevice's status (busy, linked?) */
+	unsigned long status;
+			     /**< Subdevice's status */
+
+	/* Descriptors stuff */
+	unsigned long flags;
+			 /**< Type flags */
+	struct a4l_channels_desc *chan_desc;
+				/**< Tab of channels descriptors pointers */
+	struct a4l_rngdesc *rng_desc;
+				/**< Tab of ranges descriptors pointers */
+	struct a4l_cmd_desc *cmd_mask;
+			    /**< Command capabilities mask */
+
+	/* Functions stuff */
+	int (*insn_read) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "read" */
+	int (*insn_write) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							 /**< Callback for the instruction "write" */
+	int (*insn_bits) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "bits" */
+	int (*insn_config) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							  /**< Callback for the configuration instruction */
+	int (*do_cmd) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+					/**< Callback for command handling */
+	int (*do_cmdtest) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+						       /**< Callback for command checking */
+	void (*cancel) (struct a4l_subdevice *);
+					 /**< Callback for asynchronous transfer cancellation */
+	void (*munge) (struct a4l_subdevice *, void *, unsigned long);
+								/**< Callback for munge operation */
+	int (*trigger) (struct a4l_subdevice *, lsampl_t);
+					      /**< Callback for trigger operation */
+
+	char priv[0];
+		  /**< Private data */
+};
+
+/* --- Subdevice related functions and macros --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice * sb, int idx);
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice * sb, int chidx, int rngidx);
+int a4l_check_chanlist(struct a4l_subdevice * subd,
+		       unsigned char nb_chan, unsigned int *chans);
+
+#define a4l_subd_is_input(x) ((A4L_SUBD_MASK_READ & (x)->flags) != 0)
+/* The following macro considers that a DIO subdevice is firstly an
+   output subdevice */
+#define a4l_subd_is_output(x) \
+	((A4L_SUBD_MASK_WRITE & (x)->flags) != 0 || \
+	 (A4L_SUBD_DIO & (x)->flags) != 0)
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_get_subd(struct a4l_device *dev, int idx);
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *));
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice * subd);
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_SUBDEVICE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/context.h	2022-03-21 12:58:31.827865187 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/instruction.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, context structure / macros declarations
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CONTEXT_H
+#define _COBALT_RTDM_ANALOGY_CONTEXT_H
+
+#include <rtdm/driver.h>
+
+struct a4l_device;
+struct a4l_buffer;
+
+struct a4l_device_context {
+	/* The adequate device pointer
+	   (retrieved thanks to minor at open time) */
+	struct a4l_device *dev;
+
+	/* The buffer structure contains everything to transfer data
+	   from asynchronous acquisition operations on a specific
+	   subdevice */
+	struct a4l_buffer *buffer;
+};
+
+static inline int a4l_get_minor(struct a4l_device_context *cxt)
+{
+	/* Get a pointer on the container structure */
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	/* Get the minor index */
+	return rtdm_fd_minor(fd);
+}
+
+#endif /* !_COBALT_RTDM_ANALOGY_CONTEXT_H */
+++ linux-patched/include/xenomai/rtdm/analogy/instruction.h	2022-03-21 12:58:31.820865256 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/channel_range.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+#define _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+
+struct a4l_kernel_instruction {
+	unsigned int type;
+	unsigned int idx_subd;
+	unsigned int chan_desc;
+	unsigned int data_size;
+	void *data;
+	void *__udata;
+};
+
+struct a4l_kernel_instruction_list {
+	unsigned int count;
+	struct a4l_kernel_instruction *insns;
+	a4l_insn_t *__uinsns;
+};
+
+/* Instruction related functions */
+
+/* Upper layer functions */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/channel_range.h	2022-03-21 12:58:31.812865334 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/driver.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, channel, range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+#define _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+
+#include <rtdm/uapi/analogy.h>
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_channel_range Channels and ranges
+ *
+ * Channels
+ *
+ * According to the Analogy nomenclature, the channel is the elementary
+ * acquisition entity. One channel is supposed to acquire one data at
+ * a time. A channel can be:
+ * - an analog input or an analog ouput;
+ * - a digital input or a digital ouput;
+ *
+ * Channels are defined by their type and by some other
+ * characteristics like:
+ * - their resolutions for analog channels (which usually ranges from
+     8 to 32 bits);
+ * - their references;
+ *
+ * Such parameters must be declared for each channel composing a
+ * subdevice. The structure a4l_channel (struct a4l_channel) is used to
+ * define one channel.
+ *
+ * Another structure named a4l_channels_desc (struct a4l_channels_desc)
+ * gathers all channels for a specific subdevice. This latter
+ * structure also stores :
+ * - the channels count;
+ * - the channels declaration mode (A4L_CHAN_GLOBAL_CHANDESC or
+     A4L_CHAN_PERCHAN_CHANDESC): if all the channels composing a
+     subdevice are identical, there is no need to declare the
+     parameters for each channel; the global declaration mode eases
+     the structure composition.
+ *
+ * Usually the channels descriptor looks like this:
+ * <tt> @verbatim
+struct a4l_channels_desc example_chan = {
+	mode: A4L_CHAN_GLOBAL_CHANDESC, -> Global declaration
+					      mode is set
+	length: 8, -> 8 channels
+	chans: {
+		{A4L_CHAN_AREF_GROUND, 16}, -> Each channel is 16 bits
+						  wide with the ground as
+						  reference
+	},
+};
+@endverbatim </tt>
+ *
+ * Ranges
+ *
+ * So as to perform conversion from logical values acquired by the
+ * device to physical units, some range structure(s) must be declared
+ * on the driver side.
+ *
+ * Such structures contain:
+ * - the physical unit type (Volt, Ampere, none);
+ * - the minimal and maximal values;
+ *
+ * These range structures must be associated with the channels at
+ * subdevice registration time as a channel can work with many
+ * ranges. At configuration time (thanks to an Analogy command), one
+ * range will be selected for each enabled channel.
+ *
+ * Consequently, for each channel, the developer must declare all the
+ * possible ranges in a structure called struct a4l_rngtab. Here is an
+ * example:
+ * <tt> @verbatim
+struct a4l_rngtab example_tab = {
+    length: 2,
+    rngs: {
+	RANGE_V(-5,5),
+	RANGE_V(-10,10),
+    },
+};
+@endverbatim </tt>
+ *
+ * For each subdevice, a specific structure is designed to gather all
+ * the ranges tabs of all the channels. In this structure, called
+ * struct a4l_rngdesc, three fields must be filled:
+ * - the declaration mode (A4L_RNG_GLOBAL_RNGDESC or
+ *   A4L_RNG_PERCHAN_RNGDESC);
+ * - the number of ranges tab;
+ * - the tab of ranges tabs pointers;
+ *
+ * Most of the time, the channels which belong to the same subdevice
+ * use the same set of ranges. So, there is no need to declare the
+ * same ranges for each channel. A macro is defined to prevent
+ * redundant declarations: RNG_GLOBAL().
+ *
+ * Here is an example:
+ * <tt> @verbatim
+struct a4l_rngdesc example_rng = RNG_GLOBAL(example_tab);
+@endverbatim </tt>
+ *
+ * @{
+ */
+
+
+/* --- Channel section --- */
+
+/*!
+ * @anchor A4L_CHAN_AREF_xxx @name Channel reference
+ * @brief Flags to define the channel's reference
+ * @{
+ */
+
+/**
+ * Ground reference
+ */
+#define A4L_CHAN_AREF_GROUND 0x1
+/**
+ * Common reference
+ */
+#define A4L_CHAN_AREF_COMMON 0x2
+/**
+ * Differential reference
+ */
+#define A4L_CHAN_AREF_DIFF 0x4
+/**
+ * Misc reference
+ */
+#define A4L_CHAN_AREF_OTHER 0x8
+
+	  /*! @} A4L_CHAN_AREF_xxx */
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_CHAN_GLOBAL 0x10
+
+/*!
+ * @brief Structure describing some channel's characteristics
+ */
+
+struct a4l_channel {
+	unsigned long flags; /*!< Channel flags to define the reference. */
+	unsigned long nb_bits; /*!< Channel resolution. */
+};
+
+/*!
+ * @anchor A4L_CHAN_xxx @name Channels declaration mode
+ * @brief Constant to define whether the channels in a descriptor are
+ * identical
+ * @{
+ */
+
+/**
+ * Global declaration, the set contains channels with similar
+ * characteristics
+ */
+#define A4L_CHAN_GLOBAL_CHANDESC 0
+/**
+ * Per channel declaration, the decriptor gathers differents channels
+ */
+#define A4L_CHAN_PERCHAN_CHANDESC 1
+
+	  /*! @} A4L_CHAN_xxx */
+
+/*!
+ * @brief Structure describing a channels set
+ */
+
+struct a4l_channels_desc {
+	unsigned long mode; /*!< Declaration mode (global or per channel) */
+	unsigned long length; /*!< Channels count */
+	struct a4l_channel chans[]; /*!< Channels tab */
+};
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_RNG_GLOBAL 0x8
+
+/*!
+ * @brief Structure describing a (unique) range
+ */
+
+struct a4l_range {
+	long min; /*!< Minimal value */
+	long max; /*!< Maximal falue */
+	unsigned long flags; /*!< Range flags (unit, etc.) */
+};
+
+/**
+ * Macro to declare a (unique) range with no unit defined
+ */
+#define RANGE(x,y) {(x * A4L_RNG_FACTOR), (y * A4L_RNG_FACTOR),	\
+			A4L_RNG_NO_UNIT}
+/**
+ * Macro to declare a (unique) range in Volt
+ */
+#define RANGE_V(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_VOLT_UNIT}
+/**
+ * Macro to declare a (unique) range in milliAmpere
+ */
+#define RANGE_mA(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_MAMP_UNIT}
+/**
+ * Macro to declare a (unique) range in some external reference
+ */
+#define RANGE_ext(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_EXT_UNIT}
+
+
+/* Ranges tab descriptor */
+struct a4l_rngtab {
+	unsigned char length;
+	struct a4l_range rngs[];
+};
+
+/**
+ * Constant to define a ranges descriptor as global (inter-channel)
+ */
+#define A4L_RNG_GLOBAL_RNGDESC 0
+/**
+ * Constant to define a ranges descriptor as specific for a channel
+ */
+#define A4L_RNG_PERCHAN_RNGDESC 1
+
+/* Global ranges descriptor */
+struct a4l_rngdesc {
+	unsigned char mode;
+	unsigned char length;
+	struct a4l_rngtab *rngtabs[];
+};
+
+/**
+ * Macro to declare a ranges global descriptor in one line
+ */
+#define RNG_GLOBAL(x) {			\
+	.mode = A4L_RNG_GLOBAL_RNGDESC,	\
+	.length =  1,			\
+	.rngtabs = {&(x)},		\
+}
+
+extern struct a4l_rngdesc a4l_range_bipolar10;
+extern struct a4l_rngdesc a4l_range_bipolar5;
+extern struct a4l_rngdesc a4l_range_unipolar10;
+extern struct a4l_rngdesc a4l_range_unipolar5;
+extern struct a4l_rngdesc a4l_range_unknown;
+extern struct a4l_rngdesc a4l_range_fake;
+
+#define range_digital a4l_range_unipolar5
+
+/*! @} channelrange */
+
+#endif /* !_COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/driver.h	2022-03-21 12:58:31.805865402 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/device.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DRIVER_H
+#define _COBALT_RTDM_ANALOGY_DRIVER_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/buffer.h>
+
+struct seq_file;
+struct a4l_link_desc;
+struct a4l_device;
+
+/** Structure containing driver declaration data.
+ *
+ *  @see rt_task_inquire()
+ */
+/* Analogy driver descriptor */
+struct a4l_driver {
+
+	/* List stuff */
+	struct list_head list;
+			   /**< List stuff */
+
+	/* Visible description stuff */
+	struct module *owner;
+	               /**< Pointer to module containing the code */
+	unsigned int flags;
+	               /**< Type / status driver's flags */
+	char *board_name;
+		       /**< Board name */
+	char *driver_name;
+	               /**< driver name */
+	int privdata_size;
+		       /**< Size of the driver's private data */
+
+	/* Init/destroy procedures */
+	int (*attach) (struct a4l_device *, struct a4l_link_desc *);
+								      /**< Attach procedure */
+	int (*detach) (struct a4l_device *);
+				   /**< Detach procedure */
+
+};
+
+/* Driver list related functions */
+
+int a4l_register_drv(struct a4l_driver * drv);
+int a4l_unregister_drv(struct a4l_driver * drv);
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio);
+#ifdef CONFIG_PROC_FS
+int a4l_rdproc_drvs(struct seq_file *p, void *data);
+#endif /* CONFIG_PROC_FS */
+
+#endif /* !_COBALT_RTDM_ANALOGY_DRIVER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/device.h	2022-03-21 12:58:31.797865480 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/buffer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DEVICE_H
+#define _COBALT_RTDM_ANALOGY_DEVICE_H
+
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/transfer.h>
+#include <rtdm/analogy/driver.h>
+
+#define A4L_NB_DEVICES 10
+
+#define A4L_DEV_ATTACHED_NR 0
+
+struct a4l_device {
+
+	/* Spinlock for global device use */
+	rtdm_lock_t lock;
+
+	/* Device specific flags */
+	unsigned long flags;
+
+	/* Driver assigned to this device thanks to attaching
+	   procedure */
+	struct a4l_driver *driver;
+
+	/* Hidden description stuff */
+	struct list_head subdvsq;
+
+	/* Context-dependent stuff */
+	struct a4l_transfer transfer;
+
+	/* Private data useful for drivers functioning */
+	void *priv;
+};
+
+/* --- Devices tab related functions --- */
+void a4l_init_devs(void);
+int a4l_check_cleanup_devs(void);
+int a4l_rdproc_devs(struct seq_file *p, void *data);
+
+/* --- Context related function / macro --- */
+void a4l_set_dev(struct a4l_device_context *cxt);
+#define a4l_get_dev(x) ((x)->dev)
+
+/* --- Upper layer functions --- */
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_DEVICE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/buffer.h	2022-03-21 12:58:31.790865548 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/command.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H
+#define _COBALT_RTDM_ANALOGY_BUFFER_H
+
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/subdevice.h>
+
+/* --- Events bits / flags --- */
+
+#define A4L_BUF_EOBUF_NR 0
+#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
+
+#define A4L_BUF_ERROR_NR 1
+#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
+
+#define A4L_BUF_EOA_NR 2
+#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
+
+/* --- Status bits / flags --- */
+
+#define A4L_BUF_BULK_NR 8
+#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
+
+#define A4L_BUF_MAP_NR 9
+#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
+
+
+/* Buffer descriptor structure */
+struct a4l_buffer {
+
+	/* Added by the structure update */
+	struct a4l_subdevice *subd;
+
+	/* Buffer's first virtual page pointer */
+	void *buf;
+
+	/* Buffer's global size */
+	unsigned long size;
+	/* Tab containing buffer's pages pointers */
+	unsigned long *pg_list;
+
+	/* RT/NRT synchronization element */
+	struct a4l_sync sync;
+
+	/* Counters needed for transfer */
+	unsigned long end_count;
+	unsigned long prd_count;
+	unsigned long cns_count;
+	unsigned long tmp_count;
+
+	/* Status + events occuring during transfer */
+	unsigned long flags;
+
+	/* Command on progress */
+	struct a4l_cmd_desc *cur_cmd;
+
+	/* Munge counter */
+	unsigned long mng_count;
+
+	/* Theshold below which the user process should not be
+	   awakened */
+	unsigned long wake_count;
+};
+
+static inline void __dump_buffer_counters(struct a4l_buffer *buf)
+{
+	__a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf);
+	__a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n",
+		buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count);
+}
+
+/* --- Static inline functions related with
+   user<->kernel data transfers --- */
+
+/* The function __produce is an inline function which copies data into
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __produce(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pin, unsigned long count)
+{
+	unsigned long start_ptr = (buf->prd_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(buf->buf + start_ptr, pin, blk_size);
+		else
+			ret = rtdm_safe_copy_from_user(fd,
+						       buf->buf + start_ptr,
+						       pin, blk_size);
+
+		/* Update pointers/counts */
+		pin += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __consume is an inline function which copies data from
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __consume(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pout, unsigned long count)
+{
+	unsigned long start_ptr = (buf->cns_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(pout, buf->buf + start_ptr, blk_size);
+		else
+			ret = rtdm_safe_copy_to_user(fd,
+						     pout,
+						     buf->buf + start_ptr,
+						     blk_size);
+
+		/* Update pointers/counts */
+		pout += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __munge is an inline function which calls the
+   subdevice specific munge callback on contiguous windows within the
+   whole buffer. This function is used in read and write operations */
+static inline void __munge(struct a4l_subdevice * subd,
+			   void (*munge) (struct a4l_subdevice *,
+					  void *, unsigned long),
+			   struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long start_ptr = (buf->mng_count % buf->size);
+	unsigned long tmp_cnt = count;
+
+	while (tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the munge operation */
+		munge(subd, buf->buf + start_ptr, blk_size);
+
+		/* Update the start pointer and the count */
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+}
+
+/* The function __handle_event can only be called from process context
+   (not interrupt service routine). It allows the client process to
+   retrieve the buffer status which has been updated by the driver */
+static inline int __handle_event(struct a4l_buffer * buf)
+{
+	int ret = 0;
+
+	/* The event "End of acquisition" must not be cleaned
+	   before the complete flush of the buffer */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		ret = -ENOENT;
+
+	if (test_bit(A4L_BUF_ERROR_NR, &buf->flags))
+		ret = -EPIPE;
+
+	return ret;
+}
+
+/* --- Counters management functions --- */
+
+/* Here, we may wonder why we need more than two counters / pointers.
+
+   Theoretically, we only need two counters (or two pointers):
+   - one which tells where the reader should be within the buffer
+   - one which tells where the writer should be within the buffer
+
+   With these two counters (or pointers), we just have to check that
+   the writer does not overtake the reader inside the ring buffer
+   BEFORE any read / write operations.
+
+   However, if one element is a DMA controller, we have to be more
+   careful. Generally a DMA transfer occurs like this:
+   DMA shot
+      |-> then DMA interrupt
+	 |-> then DMA soft handler which checks the counter
+
+   So, the checkings occur AFTER the write operations.
+
+   Let's take an example: the reader is a software task and the writer
+   is a DMA controller. At the end of the DMA shot, the write counter
+   is higher than the read counter. Unfortunately, a read operation
+   occurs between the DMA shot and the DMA interrupt, so the handler
+   will not notice that an overflow occured.
+
+   That is why tmp_count comes into play: tmp_count records the
+   read/consumer current counter before the next DMA shot and once the
+   next DMA shot is done, we check that the updated writer/producer
+   counter is not higher than tmp_count. Thus we are sure that the DMA
+   writer has not overtaken the reader because it was not able to
+   overtake the n-1 value. */
+
+static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	if (count - buf->tmp_count > buf->size) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+	buf->tmp_count = buf->cns_count;
+
+	return 0;
+}
+
+static inline int __pre_put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_put(buf, buf->tmp_count + count);
+}
+
+static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	/* The first time, we expect the buffer to be properly filled
+	before the trigger occurence; by the way, we need tmp_count to
+	have been initialized and tmp_count is updated right here */
+	if (buf->tmp_count == 0 || buf->cns_count == 0)
+		goto out;
+
+	/* At the end of the acquisition, the user application has
+	written the defined amount of data into the buffer; so the
+	last time, the DMA channel can easily overtake the tmp
+	frontier because no more data were sent from user space;
+	therefore no useless alarm should be sent */
+	if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
+		goto out;
+
+	/* Once the exception are passed, we check that the DMA
+	transfer has not overtaken the last record of the production
+	count (tmp_count was updated with prd_count the last time
+	__pre_abs_get was called). We must understand that we cannot
+	compare the current DMA count with the current production
+	count because even if, right now, the production count is
+	higher than the DMA count, it does not mean that the DMA count
+	was not greater a few cycles before; in such case, the DMA
+	channel would have retrieved the wrong data */
+	if ((long)(count - buf->tmp_count) > 0) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+out:
+	buf->tmp_count = buf->prd_count;
+
+	return 0;
+}
+
+static inline int __pre_get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_get(buf, buf->tmp_count + count);
+}
+
+static inline int __abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->prd_count;
+
+	if ((long)(buf->prd_count - count) >= 0)
+		return -EINVAL;
+
+	buf->prd_count = count;
+
+	if ((old / buf->size) != (count / buf->size))
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_put(buf, buf->prd_count + count);
+}
+
+static inline int __abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->cns_count;
+
+	if ((long)(buf->cns_count - count) >= 0)
+		return -EINVAL;
+
+	buf->cns_count = count;
+
+	if ((old / buf->size) != count / buf->size)
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_get(buf, buf->cns_count + count);
+}
+
+static inline unsigned long __count_to_put(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
+		ret = buf->size + buf->cns_count - buf->prd_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_get(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	/* If the acquisition is unlimited (end_count == 0), we must
+	   not take into account end_count */
+	if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
+		ret = buf->prd_count;
+	else
+		ret = buf->end_count;
+
+	if ((long)(ret - buf->cns_count) > 0)
+		ret -= buf->cns_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_end(struct a4l_buffer * buf)
+{
+	unsigned long ret = buf->end_count - buf->cns_count;
+
+	if (buf->end_count == 0)
+		return ULONG_MAX;
+
+	return ((long)ret) < 0 ? 0 : ret;
+}
+
+/* --- Buffer internal functions --- */
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size);
+
+void a4l_free_buffer(struct a4l_buffer *buf_desc);
+
+void a4l_init_buffer(struct a4l_buffer * buf_desc);
+
+void a4l_cleanup_buffer(struct a4l_buffer * buf_desc);
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd);
+
+void a4l_cancel_buffer(struct a4l_device_context *cxt);
+
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absput(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_put(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_put(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_put(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absget(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_get(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_get(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_get(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+
+/* --- Current Command management function --- */
+
+static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd)
+{
+	return (subd->buf) ? subd->buf->cur_cmd : NULL;
+}
+
+/* --- Munge related function --- */
+
+int a4l_get_chan(struct a4l_subdevice *subd);
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg);
+ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes);
+ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes);
+int a4l_select(struct a4l_device_context *cxt,
+	       rtdm_selector_t *selector,
+	       enum rtdm_selecttype type, unsigned fd_index);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/command.h	2022-03-21 12:58:31.783865616 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/transfer.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_COMMAND_H
+#define _COBALT_RTDM_ANALOGY_COMMAND_H
+
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/context.h>
+
+#define CR_CHAN(a) CHAN(a)
+#define CR_RNG(a) (((a)>>16)&0xff)
+#define CR_AREF(a) (((a)>>24)&0xf)
+
+/* --- Command related function --- */
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc);
+
+/* --- Upper layer functions --- */
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc);
+int a4l_ioctl_cmd(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_COMMAND_H */
+++ linux-patched/include/xenomai/rtdm/analogy/transfer.h	2022-03-21 12:58:31.775865694 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/can.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_TRANSFER_H
+#define _COBALT_RTDM_ANALOGY_TRANSFER_H
+
+#include <rtdm/analogy/buffer.h>
+
+/* IRQ types */
+#define A4L_IRQ_DISABLED 0
+
+/* Fields init values */
+#define A4L_IRQ_UNUSED (unsigned int)((unsigned short)(~0))
+#define A4L_IDX_UNUSED (unsigned int)(~0)
+
+/* TODO: IRQ handling must leave transfer for os_facilities */
+
+struct a4l_device;
+/* Analogy transfer descriptor */
+struct a4l_transfer {
+
+	/* Subdevices desc */
+	unsigned int nb_subd;
+	struct a4l_subdevice **subds;
+
+	/* Buffer stuff: the default size */
+	unsigned int default_bufsize;
+
+	/* IRQ in use */
+	/* TODO: irq_desc should vanish */
+	struct a4l_irq_descriptor irq_desc;
+};
+
+/* --- Proc function --- */
+
+int a4l_rdproc_transfer(struct seq_file *p, void *data);
+
+/* --- Upper layer functions --- */
+
+void a4l_presetup_transfer(struct a4l_device_context * cxt);
+int a4l_setup_transfer(struct a4l_device_context * cxt);
+int a4l_precleanup_transfer(struct a4l_device_context * cxt);
+int a4l_cleanup_transfer(struct a4l_device_context * cxt);
+int a4l_reserve_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_init_transfer(struct a4l_device_context * cxt, struct a4l_cmd_desc * cmd);
+int a4l_cancel_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_cancel_transfers(struct a4l_device_context * cxt);
+
+ssize_t a4l_put(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+ssize_t a4l_get(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+
+int a4l_request_irq(struct a4l_device *dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie);
+int a4l_free_irq(struct a4l_device *dev, unsigned int irq);
+unsigned int a4l_get_irq(struct a4l_device *dev);
+
+int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_TRANSFER_H */
+++ linux-patched/include/xenomai/rtdm/can.h	2022-03-21 12:58:31.768865763 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/net.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                    <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_CAN_H
+#define _COBALT_RTDM_CAN_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/can.h>
+
+#endif /* _COBALT_RTDM_CAN_H */
+++ linux-patched/include/xenomai/rtdm/net.h	2022-03-21 12:58:31.761865831 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/autotune.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _COBALT_RTDM_NET_H
+#define _COBALT_RTDM_NET_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/net.h>
+#include <rtdm/driver.h>
+
+struct rtnet_callback {
+    void    (*func)(struct rtdm_fd *, void *);
+    void    *arg;
+};
+
+#define RTNET_RTIOC_CALLBACK    _IOW(RTIOC_TYPE_NETWORK, 0x12, \
+				     struct rtnet_callback)
+
+/* utility functions */
+
+/* provided by rt_ipv4 */
+unsigned long rt_inet_aton(const char *ip);
+
+/* provided by rt_packet */
+int rt_eth_aton(unsigned char *addr_buf, const char *mac);
+
+#define RTNET_RTDM_VER 914
+
+#endif  /* _COBALT_RTDM_NET_H */
+++ linux-patched/include/xenomai/rtdm/autotune.h	2022-03-21 12:58:31.753865909 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_AUTOTUNE_H
+#define _COBALT_RTDM_AUTOTUNE_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/autotune.h>
+
+#endif /* !_COBALT_RTDM_AUTOTUNE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/sched.h	2022-03-21 12:58:32.206861492 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/mutex.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SCHED_H
+#define _COBALT_UAPI_SCHED_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define SCHED_COBALT		42
+#define SCHED_WEAK		43
+
+#ifndef SCHED_SPORADIC
+#define SCHED_SPORADIC		10
+#define sched_ss_low_priority	sched_u.ss.__sched_low_priority
+#define sched_ss_repl_period	sched_u.ss.__sched_repl_period
+#define sched_ss_init_budget	sched_u.ss.__sched_init_budget
+#define sched_ss_max_repl	sched_u.ss.__sched_max_repl
+#endif	/* !SCHED_SPORADIC */
+
+struct __sched_ss_param {
+	int __sched_low_priority;
+	struct __user_old_timespec __sched_repl_period;
+	struct __user_old_timespec __sched_init_budget;
+	int __sched_max_repl;
+};
+
+#define sched_rr_quantum	sched_u.rr.__sched_rr_quantum
+
+struct __sched_rr_param {
+	struct __user_old_timespec __sched_rr_quantum;
+};
+
+#ifndef SCHED_TP
+#define SCHED_TP		11
+#define sched_tp_partition	sched_u.tp.__sched_partition
+#endif	/* !SCHED_TP */
+
+struct __sched_tp_param {
+	int __sched_partition;
+};
+
+struct sched_tp_window {
+	struct __user_old_timespec offset;
+	struct __user_old_timespec duration;
+	int ptid;
+};
+
+enum {
+	sched_tp_install,
+	sched_tp_uninstall,
+	sched_tp_start,
+	sched_tp_stop,
+};
+	
+struct __sched_config_tp {
+	int op;
+	int nr_windows;
+	struct sched_tp_window windows[0];
+};
+
+#define sched_tp_confsz(nr_win) \
+  (sizeof(struct __sched_config_tp) + nr_win * sizeof(struct sched_tp_window))
+
+#ifndef SCHED_QUOTA
+#define SCHED_QUOTA		12
+#define sched_quota_group	sched_u.quota.__sched_group
+#endif	/* !SCHED_QUOTA */
+
+struct __sched_quota_param {
+	int __sched_group;
+};
+
+enum {
+	sched_quota_add,
+	sched_quota_remove,
+	sched_quota_force_remove,
+	sched_quota_set,
+	sched_quota_get,
+};
+
+struct __sched_config_quota {
+	int op;
+	union {
+		struct {
+			int pshared;
+		} add;
+		struct {
+			int tgid;
+		} remove;
+		struct {
+			int tgid;
+			int quota;
+			int quota_peak;
+		} set;
+		struct {
+			int tgid;
+		} get;
+	};
+	struct __sched_quota_info {
+		int tgid;
+		int quota;
+		int quota_peak;
+		int quota_sum;
+	} info;
+};
+
+#define sched_quota_confsz()  sizeof(struct __sched_config_quota)
+
+struct sched_param_ex {
+	int sched_priority;
+	union {
+		struct __sched_ss_param ss;
+		struct __sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+union sched_config {
+	struct __sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#endif /* !_COBALT_UAPI_SCHED_H */
+++ linux-patched/include/xenomai/cobalt/uapi/mutex.h	2022-03-21 12:58:32.199861560 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/synch.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MUTEX_H
+#define _COBALT_UAPI_MUTEX_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_MUTEX_MAGIC  0x86860303
+
+struct cobalt_mutex_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MUTEX_COND_SIGNAL 0x00000001
+#define COBALT_MUTEX_ERRORCHECK  0x00000002
+	__u32 ceiling;
+};
+
+union cobalt_mutex_union {
+	pthread_mutex_t native_mutex;
+	struct cobalt_mutex_shadow {
+		__u32 magic;
+		__u32 lockcnt;
+		__u32 state_offset;
+		xnhandle_t handle;
+		struct cobalt_mutexattr attr;
+	} shadow_mutex;
+};
+
+#endif /* !_COBALT_UAPI_MUTEX_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/synch.h	2022-03-21 12:58:32.192861628 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/limits.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_SYNCH_H
+#define _COBALT_UAPI_KERNEL_SYNCH_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/* Creation flags */
+#define XNSYNCH_FIFO    0x0
+#define XNSYNCH_PRIO    0x1
+#define XNSYNCH_PI      0x2
+#define XNSYNCH_DREORD  0x4
+#define XNSYNCH_OWNER   0x8
+#define XNSYNCH_PP      0x10
+
+/* Fast lock API */
+static inline int xnsynch_fast_is_claimed(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_FLCLAIM) != 0;
+}
+
+static inline xnhandle_t xnsynch_fast_claimed(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCLAIM;
+}
+
+static inline xnhandle_t xnsynch_fast_ceiling(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCEIL;
+}
+
+static inline int
+xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh)
+{
+	return (xnhandle_get_id(atomic_read(fastlock)) == ownerh) ?
+		0 : -EPERM;
+}
+
+static inline
+int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh)
+{
+	xnhandle_t h;
+
+	h = atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
+	if (h != XN_NO_HANDLE) {
+		if (xnhandle_get_id(h) == new_ownerh)
+			return -EBUSY;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static inline
+int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh)
+{
+	return (xnhandle_t)atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE)
+		== cur_ownerh;
+}
+
+/* Local/shared property */
+static inline int xnsynch_is_shared(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_PSHARED) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/limits.h	2022-03-21 12:58:32.184861706 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/types.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_LIMITS_H
+#define _COBALT_UAPI_KERNEL_LIMITS_H
+
+#define XNOBJECT_NAME_LEN 32
+
+#endif /* !_COBALT_UAPI_KERNEL_LIMITS_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/types.h	2022-03-21 12:58:32.177861774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/urw.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TYPES_H
+#define _COBALT_UAPI_KERNEL_TYPES_H
+
+#include <linux/types.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+typedef __u64 xnticks_t;
+
+typedef __s64 xnsticks_t;
+
+typedef __u32 xnhandle_t;
+
+#define XN_NO_HANDLE		((xnhandle_t)0)
+#define XN_HANDLE_INDEX_MASK	((xnhandle_t)0xf0000000)
+
+/* Fixed bits (part of the identifier) */
+#define XNSYNCH_PSHARED		((xnhandle_t)0x40000000)
+
+/* Transient bits (expressing a status) */
+#define XNSYNCH_FLCLAIM		((xnhandle_t)0x80000000) /* Contended. */
+#define XNSYNCH_FLCEIL		((xnhandle_t)0x20000000) /* Ceiling active. */
+
+#define XN_HANDLE_TRANSIENT_MASK	(XNSYNCH_FLCLAIM|XNSYNCH_FLCEIL)
+
+/*
+ * Strip all special bits from the handle, only retaining the object
+ * index value in the registry.
+ */
+static inline xnhandle_t xnhandle_get_index(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_INDEX_MASK;
+}
+
+/*
+ * Strip the transient bits from the handle, only retaining the fixed
+ * part making the identifier.
+ */
+static inline xnhandle_t xnhandle_get_id(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_TRANSIENT_MASK;
+}
+
+/*
+ * Our representation of time specs at the kernel<->user interface
+ * boundary at the moment, until we have fully transitioned to a
+ * y2038-safe implementation in libcobalt. Once done, those legacy
+ * types will be removed.
+ */
+struct __user_old_timespec {
+	long  tv_sec;
+	long  tv_nsec;
+};
+
+struct __user_old_itimerspec {
+	struct __user_old_timespec it_interval;
+	struct __user_old_timespec it_value;
+};
+
+struct __user_old_timeval {
+	long  tv_sec;
+	long  tv_usec;
+};
+
+/* Lifted from include/uapi/linux/timex.h. */
+struct __user_old_timex {
+	unsigned int modes;	/* mode selector */
+	__kernel_long_t offset;	/* time offset (usec) */
+	__kernel_long_t freq;	/* frequency offset (scaled ppm) */
+	__kernel_long_t maxerror;/* maximum error (usec) */
+	__kernel_long_t esterror;/* estimated error (usec) */
+	int status;		/* clock command/status */
+	__kernel_long_t constant;/* pll time constant */
+	__kernel_long_t precision;/* clock precision (usec) (read only) */
+	__kernel_long_t tolerance;/* clock frequency tolerance (ppm)
+				   * (read only)
+				   */
+	struct __user_old_timeval time;	/* (read only, except for ADJ_SETOFFSET) */
+	__kernel_long_t tick;	/* (modified) usecs between clock ticks */
+
+	__kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */
+	__kernel_long_t jitter; /* pps jitter (us) (ro) */
+	int shift;              /* interval duration (s) (shift) (ro) */
+	__kernel_long_t stabil;            /* pps stability (scaled ppm) (ro) */
+	__kernel_long_t jitcnt; /* jitter limit exceeded (ro) */
+	__kernel_long_t calcnt; /* calibration intervals (ro) */
+	__kernel_long_t errcnt; /* calibration errors (ro) */
+	__kernel_long_t stbcnt; /* stability limit exceeded (ro) */
+
+	int tai;		/* TAI offset (ro) */
+
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_TYPES_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/urw.h	2022-03-21 12:58:32.170861843 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/vdso.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_URW_H
+#define _COBALT_UAPI_KERNEL_URW_H
+
+#include <linux/types.h>
+
+/*
+ * A restricted version of the kernel seqlocks with a slightly
+ * different interface, allowing for unsynced reads with concurrent
+ * write detection, without serializing writers.  Caller should
+ * provide for proper locking to deal with concurrent updates.
+ *
+ * urw_t lock = URW_INITIALIZER;
+ * urwstate_t tmp;
+ *
+ * unsynced_read_block(&tmp, &lock) {
+ *          (will redo until clean read)...
+ * }
+ *
+ * unsynced_write_block(&tmp, &lock) {
+ *          ...
+ * }
+ *
+ * This code was inspired by Wolfgang Mauerer's linux/seqlock.h
+ * adaptation for Xenomai 2.6 to support the VDSO feature.
+ */
+
+typedef struct {
+	__u32 sequence;
+} urw_t;
+
+typedef struct {
+	__u32 token;
+	__u32 dirty;
+} urwstate_t;
+
+#define URW_INITIALIZER     { 0 }
+#define DEFINE_URW(__name)  urw_t __name = URW_INITIALIZER
+
+#ifndef READ_ONCE
+#define READ_ONCE ACCESS_ONCE
+#endif
+
+static inline void __try_read_start(const urw_t *urw, urwstate_t *tmp)
+{
+	__u32 token;
+repeat:
+	token = READ_ONCE(urw->sequence);
+	smp_rmb();
+	if (token & 1) {
+		cpu_relax();
+		goto repeat;
+	}
+
+	tmp->token = token;
+	tmp->dirty = 1;
+}
+
+static inline void __try_read_end(const urw_t *urw, urwstate_t *tmp)
+{
+	smp_rmb();
+	if (urw->sequence != tmp->token) {
+		__try_read_start(urw, tmp);
+		return;
+	}
+
+	tmp->dirty = 0;
+}
+
+static inline void __do_write_start(urw_t *urw, urwstate_t *tmp)
+{
+	urw->sequence++;
+	tmp->dirty = 1;
+	smp_wmb();
+}
+
+static inline void __do_write_end(urw_t *urw, urwstate_t *tmp)
+{
+	smp_wmb();
+	tmp->dirty = 0;
+	urw->sequence++;
+}
+
+static inline void unsynced_rw_init(urw_t *urw)
+{
+	urw->sequence = 0;
+}
+
+#define unsynced_read_block(__tmp, __urw)		\
+	for (__try_read_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __try_read_end(__urw, __tmp))
+
+#define unsynced_write_block(__tmp, __urw)		\
+	for (__do_write_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __do_write_end(__urw, __tmp))
+
+#endif /* !_COBALT_UAPI_KERNEL_URW_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/vdso.h	2022-03-21 12:58:32.162861921 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/pipe.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_VDSO_H
+#define _COBALT_UAPI_KERNEL_VDSO_H
+
+#include <cobalt/uapi/kernel/urw.h>
+
+/*
+ * I-pipe only. Dovetail enables the common vDSO for getting
+ * CLOCK_REALTIME timestamps from the out-of-band stage
+ * (XNVDSO_FEAT_HOST_REALTIME is cleared in this case).
+ */
+struct xnvdso_hostrt_data {
+	__u64 wall_sec;
+	__u64 wtom_sec;
+	__u64 cycle_last;
+	__u64 mask;
+	__u32 wall_nsec;
+	__u32 wtom_nsec;
+	__u32 mult;
+	__u32 shift;
+	__u32 live;
+	urw_t lock;
+};
+
+/*
+ * Data shared between the Cobalt kernel and applications, which lives
+ * in the shared memory heap (COBALT_MEMDEV_SHARED).
+ * xnvdso_hostrt_data.features tells which data is present. Notice
+ * that struct xnvdso may only grow, but never shrink.
+ */
+struct xnvdso {
+	__u64 features;
+	/* XNVDSO_FEAT_HOST_REALTIME */
+	struct xnvdso_hostrt_data hostrt_data;
+	/* XNVDSO_FEAT_WALLCLOCK_OFFSET */
+	__u64 wallclock_offset;
+};
+
+/* For each shared feature, add a flag below. */
+
+#define XNVDSO_FEAT_HOST_REALTIME	0x0000000000000001ULL
+#define XNVDSO_FEAT_WALLCLOCK_OFFSET	0x0000000000000002ULL
+
+static inline int xnvdso_test_feature(struct xnvdso *vdso,
+				      __u64 feature)
+{
+	return (vdso->features & feature) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_VDSO_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/pipe.h	2022-03-21 12:58:32.155861989 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_PIPE_H
+#define _COBALT_UAPI_KERNEL_PIPE_H
+
+#define	XNPIPE_IOCTL_BASE	'p'
+
+#define XNPIPEIOC_GET_NRDEV	_IOW(XNPIPE_IOCTL_BASE, 0, int)
+#define XNPIPEIOC_IFLUSH	_IO(XNPIPE_IOCTL_BASE, 1)
+#define XNPIPEIOC_OFLUSH	_IO(XNPIPE_IOCTL_BASE, 2)
+#define XNPIPEIOC_FLUSH		XNPIPEIOC_OFLUSH
+#define XNPIPEIOC_SETSIG	_IO(XNPIPE_IOCTL_BASE, 3)
+
+#define XNPIPE_NORMAL	0x0
+#define XNPIPE_URGENT	0x1
+
+#define XNPIPE_IFLUSH	0x1
+#define XNPIPE_OFLUSH	0x2
+
+#define XNPIPE_MINOR_AUTO  (-1)
+
+#endif /* !_COBALT_UAPI_KERNEL_PIPE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/thread.h	2022-03-21 12:58:32.147862067 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/heap.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_THREAD_H
+#define _COBALT_UAPI_KERNEL_THREAD_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_states Thread state flags
+ * @brief Bits reporting permanent or transient states of threads
+ * @{
+ */
+
+/* State flags (shared) */
+
+#define XNSUSP    0x00000001 /**< Suspended. */
+#define XNPEND    0x00000002 /**< Sleep-wait for a resource. */
+#define XNDELAY   0x00000004 /**< Delayed */
+#define XNREADY   0x00000008 /**< Linked to the ready queue. */
+#define XNDORMANT 0x00000010 /**< Not started yet */
+#define XNZOMBIE  0x00000020 /**< Zombie thread in deletion process */
+#define XNMAPPED  0x00000040 /**< Thread is mapped to a linux task */
+#define XNRELAX   0x00000080 /**< Relaxed shadow thread (blocking bit) */
+#define XNHELD    0x00000200 /**< Thread is held to process emergency. */
+#define XNBOOST   0x00000400 /**< PI/PP boost undergoing */
+#define XNSSTEP   0x00000800 /**< Single-stepped by debugger */
+#define XNLOCK    0x00001000 /**< Scheduler lock control (pseudo-bit, not in ->state) */
+#define XNRRB     0x00002000 /**< Undergoes a round-robin scheduling */
+#define XNWARN    0x00004000 /**< Issue SIGDEBUG on error detection */
+#define XNFPU     0x00008000 /**< Thread uses FPU */
+#define XNROOT    0x00010000 /**< Root thread (that is, Linux/IDLE) */
+#define XNWEAK    0x00020000 /**< Non real-time shadow (from the WEAK class) */
+#define XNUSER    0x00040000 /**< Shadow thread running in userland */
+#define XNJOINED  0x00080000 /**< Another thread waits for joining this thread */
+#define XNTRAPLB  0x00100000 /**< Trap lock break (i.e. may not sleep with sched lock) */
+#define XNDEBUG   0x00200000 /**< User-level debugging enabled */
+#define XNDBGSTOP 0x00400000 /**< Stopped for synchronous debugging */
+
+/** @} */
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_info Thread information flags
+ * @brief Bits reporting events notified to threads
+ * @{
+ */
+
+/* Information flags (shared) */
+
+#define XNTIMEO   0x00000001 /**< Woken up due to a timeout condition */
+#define XNRMID    0x00000002 /**< Pending on a removed resource */
+#define XNBREAK   0x00000004 /**< Forcibly awaken from a wait state */
+#define XNKICKED  0x00000008 /**< Forced out of primary mode */
+#define XNWAKEN   0x00000010 /**< Thread waken up upon resource availability */
+#define XNROBBED  0x00000020 /**< Robbed from resource ownership */
+#define XNCANCELD 0x00000040 /**< Cancellation request is pending */
+#define XNPIALERT 0x00000080 /**< Priority inversion alert (SIGDEBUG sent) */
+#define XNSCHEDP  0x00000100 /**< schedparam propagation is pending */
+#define XNCONTHI  0x00000200 /**< Continue in primary mode after debugging */
+
+/* Local information flags (private to current thread) */
+
+#define XNMOVED   0x00000001 /**< CPU migration in primary mode occurred */
+#define XNLBALERT 0x00000002 /**< Scheduler lock break alert (SIGDEBUG sent) */
+#define XNDESCENT 0x00000004 /**< Adaptive transitioning to secondary mode */
+#define XNSYSRST  0x00000008 /**< Thread awaiting syscall restart after signal */
+#define XNHICCUP  0x00000010 /**< Just left from ptracing */
+
+/** @} */
+
+/*
+ * Must follow strictly the declaration order of the state flags
+ * defined above. Status symbols are defined as follows:
+ *
+ * 'S' -> Forcibly suspended.
+ * 'w'/'W' -> Waiting for a resource, with or without timeout.
+ * 'D' -> Delayed (without any other wait condition).
+ * 'R' -> Runnable.
+ * 'U' -> Unstarted or dormant.
+ * 'X' -> Relaxed shadow.
+ * 'H' -> Held in emergency.
+ * 'b' -> Priority boost undergoing.
+ * 'T' -> Ptraced and stopped.
+ * 'l' -> Locks scheduler.
+ * 'r' -> Undergoes round-robin.
+ * 't' -> Runtime mode errors notified.
+ * 'L' -> Lock breaks trapped.
+ * 's' -> Ptraced, stopped synchronously.
+ */
+#define XNTHREAD_STATE_LABELS  "SWDRU..X.HbTlrt.....L.s"
+
+struct xnthread_user_window {
+	__u32 state;
+	__u32 info;
+	__u32 grant_value;
+	__u32 pp_pending;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/heap.h	2022-03-21 12:58:32.140862135 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_HEAP_H
+#define _COBALT_UAPI_KERNEL_HEAP_H
+
+#include <linux/types.h>
+
+#define COBALT_MEMDEV_PRIVATE  "memdev-private"
+#define COBALT_MEMDEV_SHARED   "memdev-shared"
+#define COBALT_MEMDEV_SYS      "memdev-sys"
+
+struct cobalt_memdev_stat {
+	__u32 size;
+	__u32 free;
+};
+
+#define MEMDEV_RTIOC_STAT	_IOR(RTDM_CLASS_MEMORY, 0, struct cobalt_memdev_stat)
+
+#endif /* !_COBALT_UAPI_KERNEL_HEAP_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/trace.h	2022-03-21 12:58:32.133862204 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/signal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TRACE_H
+#define _COBALT_UAPI_KERNEL_TRACE_H
+
+#define __xntrace_op_max_begin		0
+#define __xntrace_op_max_end		1
+#define __xntrace_op_max_reset		2
+#define __xntrace_op_user_start		3
+#define __xntrace_op_user_stop		4
+#define __xntrace_op_user_freeze	5
+#define __xntrace_op_special		6
+#define __xntrace_op_special_u64	7
+#define __xntrace_op_latpeak_freeze	8
+
+#endif /* !_COBALT_UAPI_KERNEL_TRACE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/signal.h	2022-03-21 12:58:32.125862281 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/sem.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SIGNAL_H
+#define _COBALT_UAPI_SIGNAL_H
+
+/*
+ * Those are pseudo-signals only available with pthread_kill() to
+ * suspend/resume/unblock threads synchronously, force them out of
+ * primary mode or even demote them to the SCHED_OTHER class via the
+ * low-level nucleus interface. Can't block those signals, queue them,
+ * or even set them in a sigset. Those are nasty, strictly anti-POSIX
+ * things; we do provide them nevertheless only because we are mean
+ * people doing harmful code for no valid reason. Can't go against
+ * your nature, right?  Nah... (this said, don't blame us for POSIX,
+ * we are not _that_ mean).
+ */
+#define SIGSUSP (SIGRTMAX + 1)
+#define SIGRESM (SIGRTMAX + 2)
+#define SIGRELS (SIGRTMAX + 3)
+#define SIGKICK (SIGRTMAX + 4)
+#define SIGDEMT (SIGRTMAX + 5)
+
+/*
+ * Regular POSIX signals with specific handling by Xenomai.
+ */
+#define SIGSHADOW			SIGWINCH
+#define sigshadow_action(code)		((code) & 0xff)
+#define sigshadow_arg(code)		(((code) >> 8) & 0xff)
+#define sigshadow_int(action, arg)	((action) | ((arg) << 8))
+
+/* SIGSHADOW action codes. */
+#define SIGSHADOW_ACTION_HARDEN		1
+#define SIGSHADOW_ACTION_BACKTRACE	2
+#define SIGSHADOW_ACTION_HOME		3
+#define SIGSHADOW_BACKTRACE_DEPTH	16
+
+#define SIGDEBUG			SIGXCPU
+#define sigdebug_code(si)		((si)->si_value.sival_int)
+#define sigdebug_reason(si)		(sigdebug_code(si) & 0xff)
+#define sigdebug_marker			0xfccf0000
+#define sigdebug_marked(si)		\
+	((sigdebug_code(si) & 0xffff0000) == sigdebug_marker)
+
+/* Possible values of sigdebug_reason() */
+#define SIGDEBUG_UNDEFINED		0
+#define SIGDEBUG_MIGRATE_SIGNAL		1
+#define SIGDEBUG_MIGRATE_SYSCALL	2
+#define SIGDEBUG_MIGRATE_FAULT		3
+#define SIGDEBUG_MIGRATE_PRIOINV	4
+#define SIGDEBUG_NOMLOCK		5
+#define SIGDEBUG_WATCHDOG		6
+#define SIGDEBUG_RESCNT_IMBALANCE	7
+#define SIGDEBUG_LOCK_BREAK		8
+#define SIGDEBUG_MUTEX_SLEEP		9
+
+#define COBALT_DELAYMAX			2147483647U
+
+/*
+ * Internal accessors to extra siginfo/sigevent fields, extending some
+ * existing base field. The extra data should be grouped in a
+ * dedicated struct type. The extra space is taken from the padding
+ * area available from the original structure definitions.
+ *
+ * e.g. getting the address of the following extension to
+ * _sifields._rt from siginfo_t,
+ *
+ * struct bar {
+ *    int foo;
+ * };
+ *
+ * would be noted as:
+ *
+ * siginfo_t si;
+ * struct bar *p = __cobalt_si_extra(&si, _rt, struct bar);
+ *
+ * This code is shared between kernel and user space. Proper
+ * definitions of siginfo_t and sigevent_t should have been read prior
+ * to including this file.
+ *
+ * CAUTION: this macro does not handle alignment issues for the extra
+ * data. The extra type definition should take care of this.
+ */
+#ifdef __OPTIMIZE__
+extern void *__siginfo_overflow(void);
+static inline
+const void *__check_si_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	siginfo_t *si __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(si->_sifields))
+		return p;
+
+	return __siginfo_overflow();
+}
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)__check_si_overflow(sizeof(__si->_sifields.__basefield),	\
+	       sizeof(__type), &(__si->_sifields.__basefield) + 1))
+#else
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)((&__si->_sifields.__basefield) + 1))
+#endif
+
+/* Same approach, this time for extending sigevent_t. */
+
+#ifdef __OPTIMIZE__
+extern void *__sigevent_overflow(void);
+static inline
+const void *__check_sev_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	sigevent_t *sev __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(sev->_sigev_un))
+		return p;
+
+	return __sigevent_overflow();
+}
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)__check_sev_overflow(sizeof(__sev->_sigev_un.__basefield),	\
+	       sizeof(__type), &(__sev->_sigev_un.__basefield) + 1))
+#else
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)((&__sev->_sigev_un.__basefield) + 1))
+#endif
+
+#endif /* !_COBALT_UAPI_SIGNAL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/sem.h	2022-03-21 12:58:32.118862350 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/corectl.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SEM_H
+#define _COBALT_UAPI_SEM_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_SEM_MAGIC (0x86860707)
+#define COBALT_NAMED_SEM_MAGIC (0x86860D0D)
+
+struct cobalt_sem;
+
+struct cobalt_sem_state {
+	atomic_t value;
+	__u32 flags;
+};
+
+union cobalt_sem_union {
+	sem_t native_sem;
+	struct cobalt_sem_shadow {
+		__u32 magic;
+		__s32 state_offset;
+		xnhandle_t handle;
+	} shadow_sem;
+};
+
+struct cobalt_sem_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+#define SEM_FIFO       0x1
+#define SEM_PULSE      0x2
+#define SEM_PSHARED    0x4
+#define SEM_REPORT     0x8
+#define SEM_WARNDEL    0x10
+#define SEM_RAWCLOCK   0x20
+#define SEM_NOBUSYDEL  0x40
+
+#endif /* !_COBALT_UAPI_SEM_H */
+++ linux-patched/include/xenomai/cobalt/uapi/corectl.h	2022-03-21 12:58:32.110862428 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_CORECTL_H
+#define _COBALT_UAPI_CORECTL_H
+
+#define _CC_COBALT_GET_VERSION		0
+#define _CC_COBALT_GET_NR_PIPES		1
+#define _CC_COBALT_GET_NR_TIMERS	2
+
+#define _CC_COBALT_GET_DEBUG			3
+#   define _CC_COBALT_DEBUG_ASSERT		1
+#   define _CC_COBALT_DEBUG_CONTEXT		2
+#   define _CC_COBALT_DEBUG_LOCKING		4
+#   define _CC_COBALT_DEBUG_USER		8
+#   define _CC_COBALT_DEBUG_MUTEX_RELAXED	16
+#   define _CC_COBALT_DEBUG_MUTEX_SLEEP		32
+/* bit 6 (64) formerly used for DEBUG_POSIX_SYNCHRO */
+#   define _CC_COBALT_DEBUG_LEGACY		128
+#   define _CC_COBALT_DEBUG_TRACE_RELAX		256
+#   define _CC_COBALT_DEBUG_NET			512
+
+#define _CC_COBALT_GET_POLICIES		4
+#   define _CC_COBALT_SCHED_FIFO	1
+#   define _CC_COBALT_SCHED_RR		2
+#   define _CC_COBALT_SCHED_WEAK	4
+#   define _CC_COBALT_SCHED_SPORADIC	8
+#   define _CC_COBALT_SCHED_QUOTA	16
+#   define _CC_COBALT_SCHED_TP		32
+
+#define _CC_COBALT_GET_WATCHDOG		5
+#define _CC_COBALT_GET_CORE_STATUS	6
+#define _CC_COBALT_START_CORE		7
+#define _CC_COBALT_STOP_CORE		8
+
+#define _CC_COBALT_GET_NET_CONFIG	9
+#   define _CC_COBALT_NET		0x00000001
+#   define _CC_COBALT_NET_ETH_P_ALL	0x00000002
+#   define _CC_COBALT_NET_IPV4		0x00000004
+#   define _CC_COBALT_NET_ICMP		0x00000008
+#   define _CC_COBALT_NET_NETROUTING	0x00000010
+#   define _CC_COBALT_NET_ROUTER	0x00000020
+#   define _CC_COBALT_NET_UDP		0x00000040
+#   define _CC_COBALT_NET_AF_PACKET	0x00000080
+#   define _CC_COBALT_NET_TDMA		0x00000100
+#   define _CC_COBALT_NET_NOMAC		0x00000200
+#   define _CC_COBALT_NET_CFG		0x00000400
+#   define _CC_COBALT_NET_CAP		0x00000800
+#   define _CC_COBALT_NET_PROXY		0x00001000
+
+
+enum cobalt_run_states {
+	COBALT_STATE_DISABLED,
+	COBALT_STATE_RUNNING,
+	COBALT_STATE_STOPPED,
+	COBALT_STATE_TEARDOWN,
+	COBALT_STATE_WARMUP,
+};
+
+#endif /* !_COBALT_UAPI_CORECTL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/syscall.h	2022-03-21 12:58:32.103862496 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/time.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SYSCALL_H
+#define _COBALT_UAPI_SYSCALL_H
+
+#include <cobalt/uapi/asm-generic/syscall.h>
+
+#define sc_cobalt_bind				0
+#define sc_cobalt_thread_create			1
+#define sc_cobalt_thread_getpid			2
+#define sc_cobalt_thread_setmode		3
+#define sc_cobalt_thread_setname		4
+#define sc_cobalt_thread_join			5
+#define sc_cobalt_thread_kill			6
+#define sc_cobalt_thread_setschedparam_ex	7
+#define sc_cobalt_thread_getschedparam_ex	8
+#define sc_cobalt_thread_getstat		9
+#define sc_cobalt_sem_init			10
+#define sc_cobalt_sem_destroy			11
+#define sc_cobalt_sem_post			12
+#define sc_cobalt_sem_wait			13
+#define sc_cobalt_sem_trywait			14
+#define sc_cobalt_sem_getvalue			15
+#define sc_cobalt_sem_open			16
+#define sc_cobalt_sem_close			17
+#define sc_cobalt_sem_unlink			18
+#define sc_cobalt_sem_timedwait			19
+#define sc_cobalt_sem_inquire			20
+#define sc_cobalt_sem_broadcast_np		21
+#define sc_cobalt_clock_getres			22
+#define sc_cobalt_clock_gettime			23
+#define sc_cobalt_clock_settime			24
+#define sc_cobalt_clock_nanosleep		25
+#define sc_cobalt_mutex_init			26
+#define sc_cobalt_mutex_check_init		27
+#define sc_cobalt_mutex_destroy			28
+#define sc_cobalt_mutex_lock			29
+#define sc_cobalt_mutex_timedlock		30
+#define sc_cobalt_mutex_trylock			31
+#define sc_cobalt_mutex_unlock			32
+#define sc_cobalt_cond_init			33
+#define sc_cobalt_cond_destroy			34
+#define sc_cobalt_cond_wait_prologue		35
+#define sc_cobalt_cond_wait_epilogue		36
+#define sc_cobalt_mq_open			37
+#define sc_cobalt_mq_close			38
+#define sc_cobalt_mq_unlink			39
+#define sc_cobalt_mq_getattr			40
+#define sc_cobalt_mq_timedsend			41
+#define sc_cobalt_mq_timedreceive		42
+#define sc_cobalt_mq_notify			43
+#define sc_cobalt_sched_minprio			44
+#define sc_cobalt_sched_maxprio			45
+#define sc_cobalt_sched_weightprio		46
+#define sc_cobalt_sched_yield			47
+#define sc_cobalt_sched_setscheduler_ex		48
+#define sc_cobalt_sched_getscheduler_ex		49
+#define sc_cobalt_sched_setconfig_np		50
+#define sc_cobalt_sched_getconfig_np		51
+#define sc_cobalt_timer_create			52
+#define sc_cobalt_timer_delete			53
+#define sc_cobalt_timer_settime			54
+#define sc_cobalt_timer_gettime			55
+#define sc_cobalt_timer_getoverrun		56
+#define sc_cobalt_timerfd_create		57
+#define sc_cobalt_timerfd_settime		58
+#define sc_cobalt_timerfd_gettime		59
+#define sc_cobalt_sigwait			60
+#define sc_cobalt_sigwaitinfo			61
+#define sc_cobalt_sigtimedwait			62
+#define sc_cobalt_sigpending			63
+#define sc_cobalt_kill				64
+#define sc_cobalt_sigqueue			65
+#define sc_cobalt_monitor_init			66
+#define sc_cobalt_monitor_destroy		67
+#define sc_cobalt_monitor_enter			68
+#define sc_cobalt_monitor_wait			69
+#define sc_cobalt_monitor_sync			70
+#define sc_cobalt_monitor_exit			71
+#define sc_cobalt_event_init			72
+#define sc_cobalt_event_wait			73
+#define sc_cobalt_event_sync			74
+#define sc_cobalt_event_destroy			75
+#define sc_cobalt_event_inquire			76
+#define sc_cobalt_open				77
+#define sc_cobalt_socket			78
+#define sc_cobalt_close				79
+#define sc_cobalt_ioctl				80
+#define sc_cobalt_read				81
+#define sc_cobalt_write				82
+#define sc_cobalt_recvmsg			83
+#define sc_cobalt_sendmsg			84
+#define sc_cobalt_mmap				85
+#define sc_cobalt_select			86
+#define sc_cobalt_fcntl				87
+#define sc_cobalt_migrate			88
+#define sc_cobalt_archcall			89
+#define sc_cobalt_trace				90
+#define sc_cobalt_corectl			91
+#define sc_cobalt_get_current			92
+/* 93: formerly mayday */
+#define sc_cobalt_backtrace			94
+#define sc_cobalt_serialdbg			95
+#define sc_cobalt_extend			96
+#define sc_cobalt_ftrace_puts			97
+#define sc_cobalt_recvmmsg			98
+#define sc_cobalt_sendmmsg			99
+#define sc_cobalt_clock_adjtime			100
+#define sc_cobalt_thread_setschedprio		101
+#define sc_cobalt_sem_timedwait64		102
+#define sc_cobalt_clock_gettime64		103
+#define sc_cobalt_clock_settime64		104
+#define sc_cobalt_clock_nanosleep64		105
+#define sc_cobalt_clock_getres64		106
+#define sc_cobalt_clock_adjtime64		107
+#define sc_cobalt_mutex_timedlock64		108
+#define sc_cobalt_mq_timedsend64		109
+#define sc_cobalt_mq_timedreceive64		110
+#define sc_cobalt_sigtimedwait64		111
+#define sc_cobalt_monitor_wait64		112
+#define sc_cobalt_event_wait64			113
+#define sc_cobalt_recvmmsg64			114
+
+#define __NR_COBALT_SYSCALLS			128 /* Power of 2 */
+
+#endif /* !_COBALT_UAPI_SYSCALL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/time.h	2022-03-21 12:58:32.096862564 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/event.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_TIME_H
+#define _COBALT_UAPI_TIME_H
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW  4
+#endif
+
+/*
+ * Additional clock ids we manage are supposed not to collide with any
+ * of the POSIX and Linux kernel definitions so that no ambiguities
+ * arise when porting applications in both directions.
+ *
+ * 0  .. 31   regular POSIX/linux clock ids.
+ * 32 .. 63   statically reserved Cobalt clocks
+ * 64 .. 127  dynamically registered Cobalt clocks (external)
+ *
+ * CAUTION: clock ids must fit within a 7bit value, see
+ * include/cobalt/uapi/thread.h (e.g. cobalt_condattr).
+ */
+#define __COBALT_CLOCK_STATIC(nr)	((clockid_t)(nr + 32))
+
+#define CLOCK_HOST_REALTIME  __COBALT_CLOCK_STATIC(0)
+
+#define COBALT_MAX_EXTCLOCKS  64
+
+#define __COBALT_CLOCK_EXT(nr)		((clockid_t)(nr) | (1 << 6))
+#define __COBALT_CLOCK_EXT_P(id)	((int)(id) >= 64 && (int)(id) < 128)
+#define __COBALT_CLOCK_EXT_INDEX(id)	((int)(id) & ~(1 << 6))
+
+/*
+ * Additional timerfd defines
+ *
+ * when passing TFD_WAKEUP to timer_settime, any timer expiration
+ * unblocks the thread having issued timer_settime.
+ */
+#define TFD_WAKEUP	(1 << 2)
+
+#endif /* !_COBALT_UAPI_TIME_H */
+++ linux-patched/include/xenomai/cobalt/uapi/event.h	2022-03-21 12:58:32.088862642 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/monitor.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_EVENT_H
+#define _COBALT_UAPI_EVENT_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_event_state {
+	__u32 value;
+	__u32 flags;
+#define COBALT_EVENT_PENDED  0x1
+	__u32 nwaiters;
+};
+
+struct cobalt_event;
+
+/* Creation flags. */
+#define COBALT_EVENT_FIFO    0x0
+#define COBALT_EVENT_PRIO    0x1
+#define COBALT_EVENT_SHARED  0x2
+
+/* Wait mode. */
+#define COBALT_EVENT_ALL  0x0
+#define COBALT_EVENT_ANY  0x1
+
+struct cobalt_event_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+};
+
+struct cobalt_event_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+typedef struct cobalt_event_shadow cobalt_event_t;
+
+#endif /* !_COBALT_UAPI_EVENT_H */
+++ linux-patched/include/xenomai/cobalt/uapi/monitor.h	2022-03-21 12:58:32.081862711 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MONITOR_H
+#define _COBALT_UAPI_MONITOR_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_monitor_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MONITOR_GRANTED    0x01
+#define COBALT_MONITOR_DRAINED    0x02
+#define COBALT_MONITOR_SIGNALED   0x03 /* i.e. GRANTED or DRAINED */
+#define COBALT_MONITOR_BROADCAST  0x04
+#define COBALT_MONITOR_PENDED     0x08
+};
+
+struct cobalt_monitor;
+
+struct cobalt_monitor_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+#define COBALT_MONITOR_SHARED     0x1
+#define COBALT_MONITOR_WAITGRANT  0x0
+#define COBALT_MONITOR_WAITDRAIN  0x1
+};
+
+typedef struct cobalt_monitor_shadow cobalt_monitor_t;
+
+#endif /* !_COBALT_UAPI_MONITOR_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/arith.h	2022-03-21 12:58:32.074862779 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
+#define _COBALT_UAPI_ASM_GENERIC_ARITH_H
+
+#ifndef xnarch_u64tou32
+#define xnarch_u64tou32(ull, h, l) ({		\
+      union {					\
+	      unsigned long long _ull;		\
+	      struct endianstruct _s;		\
+      } _u;					\
+      _u._ull = (ull);				\
+      (h) = _u._s._h;				\
+      (l) = _u._s._l;				\
+})
+#endif /* !xnarch_u64tou32 */
+
+#ifndef xnarch_u64fromu32
+#define xnarch_u64fromu32(h, l) ({		\
+	union {					\
+		unsigned long long _ull;	\
+		struct endianstruct _s;		\
+	} _u;					\
+	_u._s._h = (h);				\
+	_u._s._l = (l);				\
+	_u._ull;				\
+})
+#endif /* !xnarch_u64fromu32 */
+
+#ifndef xnarch_ullmul
+static inline __attribute__((__const__)) unsigned long long
+xnarch_generic_ullmul(const unsigned m0, const unsigned m1)
+{
+	return (unsigned long long) m0 * m1;
+}
+#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
+#endif /* !xnarch_ullmul */
+
+#ifndef xnarch_ulldiv
+static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull,
+							const unsigned uld,
+							unsigned long *const rp)
+{
+	const unsigned r = do_div(ull, uld);
+
+	if (rp)
+		*rp = r;
+
+	return ull;
+}
+#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
+#endif /* !xnarch_ulldiv */
+
+#ifndef xnarch_uldivrem
+#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
+#endif /* !xnarch_uldivrem */
+
+#ifndef xnarch_divmod64
+static inline unsigned long long
+xnarch_generic_divmod64(unsigned long long a,
+			unsigned long long b,
+			unsigned long long *rem)
+{
+	unsigned long long q;
+#if defined(__KERNEL__) && BITS_PER_LONG < 64
+	unsigned long long
+		xnarch_generic_full_divmod64(unsigned long long a,
+					     unsigned long long b,
+					     unsigned long long *rem);
+	if (b <= 0xffffffffULL) {
+		unsigned long r;
+		q = xnarch_ulldiv(a, b, &r);
+		if (rem)
+			*rem = r;
+	} else {
+		if (a < b) {
+			if (rem)
+				*rem = a;
+			return 0;
+		}
+
+		return xnarch_generic_full_divmod64(a, b, rem);
+	}
+#else /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	q = a / b;
+	if (rem)
+		*rem = a % b;
+#endif  /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	return q;
+}
+#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
+#endif /* !xnarch_divmod64 */
+
+#ifndef xnarch_imuldiv
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i,
+								    int mult,
+								    int div)
+{
+	/* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull, div, NULL);
+}
+#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
+#endif /* !xnarch_imuldiv */
+
+#ifndef xnarch_imuldiv_ceil
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i,
+									 int mult,
+									 int div)
+{
+	/* Same as xnarch_generic_imuldiv, rounding up. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL);
+}
+#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
+#endif /* !xnarch_imuldiv_ceil */
+
+/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
+   Building block for llimd. Without const qualifiers, gcc reload registers
+   after each call to uldivrem. */
+static inline unsigned long long
+xnarch_generic_div96by32(const unsigned long long h,
+			 const unsigned l,
+			 const unsigned d,
+			 unsigned long *const rp)
+{
+	unsigned long rh;
+	const unsigned qh = xnarch_uldivrem(h, d, &rh);
+	const unsigned long long t = xnarch_u64fromu32(rh, l);
+	const unsigned ql = xnarch_uldivrem(t, d, rp);
+
+	return xnarch_u64fromu32(qh, ql);
+}
+
+#ifndef xnarch_llimd
+static inline __attribute__((__const__))
+unsigned long long xnarch_generic_ullimd(const unsigned long long op,
+					 const unsigned m,
+					 const unsigned d)
+{
+	unsigned int oph, opl, tlh, tll;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_ullmul(oph, m);
+	th += tlh;
+
+	return xnarch_generic_div96by32(th, tll, d, NULL);
+}
+
+static inline __attribute__((__const__)) long long
+xnarch_generic_llimd (long long op, unsigned m, unsigned d)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_generic_ullimd(op, m, d);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
+#endif /* !xnarch_llimd */
+
+#ifndef _xnarch_u96shift
+#define xnarch_u96shift(h, m, l, s) ({		\
+	unsigned int _l = (l);			\
+	unsigned int _m = (m);			\
+	unsigned int _s = (s);			\
+	_l >>= _s;				\
+	_l |= (_m << (32 - _s));		\
+	_m >>= _s;				\
+	_m |= ((h) << (32 - _s));		\
+	xnarch_u64fromu32(_m, _l);		\
+})
+#endif /* !xnarch_u96shift */
+
+static inline long long xnarch_llmi(int i, int j)
+{
+	/* Fast 32x32->64 signed multiplication */
+	return (long long) i * j;
+}
+
+#ifndef xnarch_llmulshft
+/* Fast scaled-math-based replacement for long long multiply-divide */
+static inline long long
+xnarch_generic_llmulshft(const long long op,
+			  const unsigned m,
+			  const unsigned s)
+{
+	unsigned int oph, opl, tlh, tll, thh, thl;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_llmi(oph, m);
+	th += tlh;
+	xnarch_u64tou32(th, thh, thl);
+
+	return xnarch_u96shift(thh, thl, tll, s);
+}
+#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
+#endif /* !xnarch_llmulshft */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+/* Representation of a 32 bits fraction. */
+struct xnarch_u32frac {
+	unsigned long long frac;
+	unsigned integ;
+};
+
+static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f,
+				       const unsigned m,
+				       const unsigned d)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned vol_d = d;
+	f->integ = m / d;
+	f->frac = xnarch_generic_div96by32
+		(xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
+}
+
+#ifndef xnarch_nodiv_imuldiv
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f)
+{
+	return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
+#endif /* xnarch_nodiv_imuldiv */
+
+#ifndef xnarch_nodiv_imuldiv_ceil
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f)
+{
+	unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
+	return (full >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv_ceil(op, f) \
+	xnarch_generic_nodiv_imuldiv_ceil((op),(f))
+#endif /* xnarch_nodiv_imuldiv_ceil */
+
+#ifndef xnarch_nodiv_ullimd
+
+#ifndef xnarch_add96and64
+#error "xnarch_add96and64 must be implemented."
+#endif
+
+static inline __attribute__((__const__)) unsigned long long
+xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m)
+{
+	/* Compute high 64 bits of multiplication 64 bits x 64 bits. */
+	register unsigned long long t0, t1, t2, t3;
+	register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(m, mh, ml);
+	t0 = xnarch_ullmul(opl, ml);
+	xnarch_u64tou32(t0, t0h, t0l);
+	t3 = xnarch_ullmul(oph, mh);
+	xnarch_u64tou32(t3, t3h, t3l);
+	xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
+	t1 = xnarch_ullmul(oph, ml);
+	xnarch_u64tou32(t1, t1h, t1l);
+	xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
+	t2 = xnarch_ullmul(opl, mh);
+	xnarch_u64tou32(t2, t2h, t2l);
+	xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
+
+	return xnarch_u64fromu32(t3h, t3l);
+}
+
+static inline unsigned long long
+xnarch_generic_nodiv_ullimd(const unsigned long long op,
+			    const unsigned long long frac,
+			    unsigned int integ)
+{
+	return xnarch_mul64by64_high(op, frac) + integ * op;
+}
+#define xnarch_nodiv_ullimd(op, f, i)  xnarch_generic_nodiv_ullimd((op),(f), (i))
+#endif /* !xnarch_nodiv_ullimd */
+
+#ifndef xnarch_nodiv_llimd
+static inline __attribute__((__const__)) long long
+xnarch_generic_nodiv_llimd(long long op, unsigned long long frac,
+			   unsigned int integ)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_nodiv_ullimd(op, frac, integ);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
+#endif /* !xnarch_nodiv_llimd */
+
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+static inline void xnarch_init_llmulshft(const unsigned m_in,
+					 const unsigned d_in,
+					 unsigned *m_out,
+					 unsigned *s_out)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned int vol_d = d_in;
+	unsigned long long mult;
+
+	*s_out = 31;
+	while (1) {
+		mult = ((unsigned long long)m_in) << *s_out;
+		do_div(mult, vol_d);
+		if (mult <= 0x7FFFFFFF)
+			break;
+		(*s_out)--;
+	}
+	*m_out = (unsigned int)mult;
+}
+
+#define xnarch_ullmod(ull,uld,rem)   ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
+#define xnarch_uldiv(ull, d)         xnarch_uldivrem(ull, d, NULL)
+#define xnarch_ulmod(ull, d)         ({ unsigned long _rem;	\
+					xnarch_uldivrem(ull,d,&_rem); _rem; })
+
+#define xnarch_div64(a,b)            xnarch_divmod64((a),(b),NULL)
+#define xnarch_mod64(a,b)            ({ unsigned long long _rem; \
+					xnarch_divmod64((a),(b),&_rem); _rem; })
+
+#endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/syscall.h	2022-03-21 12:58:32.066862857 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/features.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+#define _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <asm/xenomai/uapi/features.h>
+#include <asm/xenomai/uapi/syscall.h>
+
+#define __COBALT_SYSCALL_BIT	0x10000000
+
+struct cobalt_bindreq {
+	/** Features userland requires. */
+	__u32 feat_req;
+	/** ABI revision userland uses. */
+	__u32 abi_rev;
+	/** Features the Cobalt core provides. */
+	struct cobalt_featinfo feat_ret;
+};
+
+#define COBALT_SECONDARY  0
+#define COBALT_PRIMARY    1
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_SYSCALL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/features.h	2022-03-21 12:58:32.059862925 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/cond.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+#define _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+
+#include <linux/types.h>
+
+#define XNFEAT_STRING_LEN 64
+
+struct cobalt_featinfo {
+	/** Real-time clock frequency */
+	__u64 clock_freq;
+	/** Offset of nkvdso in the sem heap. */
+	__u32 vdso_offset;
+	/** ABI revision level. */
+	__u32 feat_abirev;
+	/** Available feature set. */
+	__u32 feat_all;
+	/** Mandatory features (when requested). */
+	__u32 feat_man;
+	/** Requested feature set. */
+	__u32 feat_req;
+	/** Missing features. */
+	__u32 feat_mis;
+	char feat_all_s[XNFEAT_STRING_LEN];
+	char feat_man_s[XNFEAT_STRING_LEN];
+	char feat_req_s[XNFEAT_STRING_LEN];
+	char feat_mis_s[XNFEAT_STRING_LEN];
+	/* Architecture-specific features. */
+	struct cobalt_featinfo_archdep feat_arch;
+};
+
+#define __xn_feat_smp         0x80000000
+#define __xn_feat_nosmp       0x40000000
+#define __xn_feat_fastsynch   0x20000000
+#define __xn_feat_nofastsynch 0x10000000
+#define __xn_feat_control     0x08000000
+#define __xn_feat_prioceiling 0x04000000
+
+#ifdef CONFIG_SMP
+#define __xn_feat_smp_mask __xn_feat_smp
+#else
+#define __xn_feat_smp_mask __xn_feat_nosmp
+#endif
+
+/*
+ * Revisit: all archs currently support fast locking, and there is no
+ * reason for any future port not to provide this. This will be
+ * written in stone at the next ABI update, when fastsynch support is
+ * dropped from the optional feature set.
+ */
+#define __xn_feat_fastsynch_mask __xn_feat_fastsynch
+
+/* List of generic features kernel or userland may support */
+#define __xn_feat_generic_mask			\
+	(__xn_feat_smp_mask		|	\
+	 __xn_feat_fastsynch_mask 	|	\
+	 __xn_feat_prioceiling)
+
+/*
+ * List of features both sides have to agree on: If userland supports
+ * it, the kernel has to provide it, too. This means backward
+ * compatibility between older userland and newer kernel may be
+ * supported for those features, but forward compatibility between
+ * newer userland and older kernel cannot.
+ */
+#define __xn_feat_generic_man_mask		\
+	(__xn_feat_fastsynch		|	\
+	 __xn_feat_nofastsynch		|	\
+	 __xn_feat_nosmp		|	\
+	 __xn_feat_prioceiling)
+
+static inline
+const char *get_generic_feature_label(unsigned int feature)
+{
+	switch (feature) {
+	case __xn_feat_smp:
+		return "smp";
+	case __xn_feat_nosmp:
+		return "nosmp";
+	case __xn_feat_fastsynch:
+		return "fastsynch";
+	case __xn_feat_nofastsynch:
+		return "nofastsynch";
+	case __xn_feat_control:
+		return "control";
+	case __xn_feat_prioceiling:
+		return "prioceiling";
+	default:
+		return 0;
+	}
+}
+
+static inline int check_abi_revision(unsigned long abirev)
+{
+	return abirev == XENOMAI_ABI_REV;
+}
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_FEATURES_H */
+++ linux-patched/include/xenomai/cobalt/uapi/cond.h	2022-03-21 12:58:32.052862993 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_COND_H
+#define _COBALT_UAPI_COND_H
+
+#include <cobalt/uapi/mutex.h>
+
+#define COBALT_COND_MAGIC 0x86860505
+
+struct cobalt_cond_state {
+	__u32 pending_signals;
+	__u32 mutex_state_offset;
+};
+
+union cobalt_cond_union {
+	pthread_cond_t native_cond;
+	struct cobalt_cond_shadow {
+		__u32 magic;
+		__u32 state_offset;
+		xnhandle_t handle;
+	} shadow_cond;
+};
+
+#endif /* !_COBALT_UAPI_COND_H */
+++ linux-patched/include/xenomai/cobalt/uapi/thread.h	2022-03-21 12:58:32.044863071 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/schedparam.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_THREAD_H
+#define _COBALT_UAPI_THREAD_H
+
+#include <cobalt/uapi/kernel/thread.h>
+
+#define PTHREAD_WARNSW             XNWARN
+#define PTHREAD_LOCK_SCHED         XNLOCK
+#define PTHREAD_DISABLE_LOCKBREAK  XNTRAPLB
+#define PTHREAD_CONFORMING     0
+
+struct cobalt_mutexattr {
+	int type : 3;
+	int protocol : 3;
+	int pshared : 1;
+	int __pad : 1;
+	int ceiling : 8;  /* prio-1, (XN)SCHED_FIFO range. */
+};
+
+struct cobalt_condattr {
+	int clock : 7;
+	int pshared : 1;
+};
+
+struct cobalt_threadstat {
+	__u64 xtime;
+	__u64 timeout;
+	__u64 msw;
+	__u64 csw;
+	__u64 xsc;
+	__u32 status;
+	__u32 pf;
+	int cpu;
+	int cprio;
+	char name[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+};
+
+#endif /* !_COBALT_UAPI_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/schedparam.h	2022-03-21 12:58:31.742866016 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/vfile.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDPARAM_H
+#define _COBALT_KERNEL_SCHEDPARAM_H
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+struct xnsched_idle_param {
+	int prio;
+};
+
+struct xnsched_weak_param {
+	int prio;
+};
+
+struct xnsched_rt_param {
+	int prio;
+};
+
+struct xnsched_tp_param {
+	int prio;
+	int ptid;	/* partition id. */
+};
+
+struct xnsched_sporadic_param {
+	xnticks_t init_budget;
+	xnticks_t repl_period;
+	int max_repl;
+	int low_prio;
+	int normal_prio;
+	int current_prio;
+};
+
+struct xnsched_quota_param {
+	int prio;
+	int tgid;	/* thread group id. */
+};
+
+union xnsched_policy_param {
+	struct xnsched_idle_param idle;
+	struct xnsched_rt_param rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	struct xnsched_weak_param weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tp_param tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_param pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_param quota;
+#endif
+};
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */
+++ linux-patched/include/xenomai/cobalt/kernel/vfile.h	2022-03-21 12:58:31.735866084 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/synch.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_VFILE_H
+#define _COBALT_KERNEL_VFILE_H
+
+#if defined(CONFIG_XENO_OPT_VFILE) || defined(DOXYGEN_CPP)
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @addtogroup cobalt_core_vfile
+ * @{
+ */
+
+struct xnvfile_directory;
+struct xnvfile_regular_iterator;
+struct xnvfile_snapshot_iterator;
+struct xnvfile_lock_ops;
+
+struct xnvfile {
+	struct proc_dir_entry *pde;
+	struct file *file;
+	struct xnvfile_lock_ops *lockops;
+	int refcnt;
+	void *private;
+};
+
+/**
+ * @brief Vfile locking operations
+ * @anchor vfile_lockops
+ *
+ * This structure describes the operations to be provided for
+ * implementing locking support on vfiles. They apply to both
+ * snapshot-driven and regular vfiles.
+ */
+struct xnvfile_lock_ops {
+	/**
+	 * @anchor lockops_get
+	 * This handler should grab the desired lock.
+	 *
+	 * @param vfile A pointer to the virtual file which needs
+	 * locking.
+	 *
+	 * @return zero should be returned if the call
+	 * succeeds. Otherwise, a negative error code can be returned;
+	 * upon error, the current vfile operation is aborted, and the
+	 * user-space caller is passed back the error value.
+	 */
+	int (*get)(struct xnvfile *vfile);
+	/**
+	 * @anchor lockops_put This handler should release the lock
+	 * previously grabbed by the @ref lockops_get "get() handler".
+	 *
+	 * @param vfile A pointer to the virtual file which currently
+	 * holds the lock to release.
+	 */
+	void (*put)(struct xnvfile *vfile);
+};
+
+struct xnvfile_hostlock_class {
+	struct xnvfile_lock_ops ops;
+	struct mutex mutex;
+};
+
+struct xnvfile_nklock_class {
+	struct xnvfile_lock_ops ops;
+	spl_t s;
+};
+
+struct xnvfile_input {
+	const char __user *u_buf;
+	size_t size;
+	struct xnvfile *vfile;
+};
+
+/**
+ * @brief Regular vfile operation descriptor
+ * @anchor regular_ops
+ *
+ * This structure describes the operations available with a regular
+ * vfile. It defines handlers for sending back formatted kernel data
+ * upon a user-space read request, and for obtaining user data upon a
+ * user-space write request.
+ */
+struct xnvfile_regular_ops {
+	/**
+	 * @anchor regular_rewind This handler is called only once,
+	 * when the virtual file is opened, before the @ref
+	 * regular_begin "begin() handler" is invoked.
+	 *
+	 * @param it A pointer to the vfile iterator which will be
+	 * used to read the file contents.
+	 *
+	 * @return Zero should be returned upon success. Otherwise, a
+	 * negative error code aborts the operation, and is passed
+	 * back to the reader.
+	 *
+	 * @note This handler is optional. It should not be used to
+	 * allocate resources but rather to perform consistency
+	 * checks, since no closure call is issued in case the open
+	 * sequence eventually fails.
+	 */
+	int (*rewind)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_begin
+	 * This handler should prepare for iterating over the records
+	 * upon a read request, starting from the specified position.
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * first record to output. This handler may be called multiple
+	 * times with different position requests.
+	 *
+	 * @return A pointer to the first record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - VFILE_SEQ_START, a special value indicating that @ref
+	 * regular_show "the show() handler" should receive a NULL
+	 * data pointer first, in order to output a header.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the @ref
+	 * regular_show "show() handler()" will be called only once
+	 * for a read operation, with a NULL @a data parameter. This
+	 * particular setting is convenient for simple regular vfiles
+	 * having a single, fixed record to output.
+	 */
+	void *(*begin)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_next
+	 * This handler should return the address of the next record
+	 * to format and output by the @ref regular_show "show()
+	 * handler".
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * next record to output.
+	 *
+	 * @return A pointer to the next record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the read operation
+	 * will stop after the first invocation of the @ref regular_show
+	 * "show() handler".
+	 */
+	void *(*next)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_end
+	 * This handler is called after all records have been output.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @note This handler is optional and the pointer may be NULL.
+	 */
+	void (*end)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_show
+	 * This handler should format and output a record.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler may receive a NULL @a
+	 * data pointer, depending on the presence and/or return of a
+	 * @ref regular_begin "hander"; the show handler should test
+	 * this special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_regular_iterator *it, void *data);
+	/**
+	 * @anchor regular_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating some kernel setting, or triggering any other
+	 * action which fits. This is the only handler which deals
+	 * with the write-side of a vfile.  It is called when writing
+	 * to the /proc entry of the vfile from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref regular_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+struct xnvfile_regular {
+	struct xnvfile entry;
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+};
+
+struct xnvfile_regular_template {
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Regular vfile iterator
+ * @anchor regular_iterator
+ *
+ * This structure defines an iterator over a regular vfile.
+ */
+struct xnvfile_regular_iterator {
+	/** Current record position while iterating. */
+	loff_t pos;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_regular *vfile;
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+/**
+ * @brief Snapshot vfile operation descriptor
+ * @anchor snapshot_ops
+ *
+ * This structure describes the operations available with a
+ * snapshot-driven vfile. It defines handlers for returning a
+ * printable snapshot of some Xenomai object contents upon a
+ * user-space read request, and for updating this object upon a
+ * user-space write request.
+ */
+struct xnvfile_snapshot_ops {
+	/**
+	 * @anchor snapshot_rewind
+	 * This handler (re-)initializes the data collection, moving
+	 * the seek pointer at the first record. When the file
+	 * revision tag is touched while collecting data, the current
+	 * reading is aborted, all collected data dropped, and the
+	 * vfile is eventually rewound.
+	 *
+	 * @param it A pointer to the current snapshot iterator. Two
+	 * useful information can be retrieved from this iterator in
+	 * this context:
+	 *
+	 * - it->vfile is a pointer to the descriptor of the virtual
+	 * file being rewound.
+	 *
+	 * - xnvfile_iterator_priv(it) returns a pointer to the
+	 * private data area, available from the descriptor, which
+	 * size is vfile->privsz. If the latter size is zero, the
+	 * returned pointer is meaningless and should not be used.
+	 *
+	 * @return A negative error code aborts the data collection,
+	 * and is passed back to the reader. Otherwise:
+	 *
+	 * - a strictly positive value is interpreted as the total
+	 * number of records which will be returned by the @ref
+	 * snapshot_next "next() handler" during the data collection
+	 * phase. If no @ref snapshot_begin "begin() handler" is
+	 * provided in the @ref snapshot_ops "operation descriptor",
+	 * this value is used to allocate the snapshot buffer
+	 * internally. The size of this buffer would then be
+	 * vfile->datasz * value.
+	 *
+	 * - zero leaves the allocation to the @ref snapshot_begin
+	 * "begin() handler" if present, or indicates that no record
+	 * is to be output in case such handler is not given.
+	 *
+	 * @note This handler is optional; a NULL value indicates that
+	 * nothing needs to be done for rewinding the vfile.  It is
+	 * called with the vfile lock held.
+	 */
+	int (*rewind)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_begin
+	 * This handler should allocate the snapshot buffer to hold
+	 * records during the data collection phase.  When specified,
+	 * all records collected via the @ref snapshot_next "next()
+	 * handler" will be written to a cell from the memory area
+	 * returned by begin().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @return A pointer to the record buffer, if the call
+	 * succeeds. Otherwise:
+	 *
+	 * - NULL in case of allocation error. This will abort the data
+	 * collection, and return -ENOMEM to the reader.
+	 *
+	 * - VFILE_SEQ_EMPTY, a special value indicating that no
+	 * record will be output. In such a case, the @ref
+	 * snapshot_next "next() handler" will not be called, and the
+	 * data collection will stop immediately. However, the @ref
+	 * snapshot_show "show() handler" will still be called once,
+	 * with a NULL data pointer (i.e. header display request).
+	 *
+	 * @note This handler is optional; if none is given, an
+	 * internal allocation depending on the value returned by the
+	 * @ref snapshot_rewind "rewind() handler" can be obtained.
+	 */
+	void *(*begin)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_end
+	 * This handler releases the memory buffer previously obtained
+	 * from begin(). It is usually called after the snapshot data
+	 * has been output by show(), but it may also be called before
+	 * rewinding the vfile after a revision change, to release the
+	 * dropped buffer.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param buf A pointer to the buffer to release.
+	 *
+	 * @note This routine is optional and the pointer may be
+	 * NULL. It is not needed upon internal buffer allocation;
+	 * see the description of the @ref snapshot_rewind "rewind()
+	 * handler".
+	 */
+	void (*end)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * @anchor snapshot_next
+	 * This handler fetches the next record, as part of the
+	 * snapshot data to be sent back to the reader via the
+	 * show().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to fill in.
+	 *
+	 * @return a strictly positive value, if the call succeeds and
+	 * leaves a valid record into @a data, which should be passed
+	 * to the @ref snapshot_show "show() handler()" during the
+	 * formatting and output phase. Otherwise:
+	 *
+	 * - A negative error code. This will abort the data
+	 * collection, and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped. In such a case, the @a
+	 * data pointer is not advanced to the next position before
+	 * the @ref snapshot_next "next() handler" is called anew.
+	 *
+	 * @note This handler is called with the vfile lock
+	 * held. Before each invocation of this handler, the vfile
+	 * core checks whether the revision tag has been touched, in
+	 * which case the data collection is restarted from scratch. A
+	 * data collection phase succeeds whenever all records can be
+	 * fetched via the @ref snapshot_next "next() handler", while
+	 * the revision tag remains unchanged, which indicates that a
+	 * consistent snapshot of the object state was taken.
+	 */
+	int (*next)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_show
+	 * This handler should format and output a record from the
+	 * collected data.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler is always passed a
+	 * NULL @a data pointer; the show handler should test this
+	 * special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating the associated Xenomai object's state, or
+	 * triggering any other action which fits. This is the only
+	 * handler which deals with the write-side of a vfile.  It is
+	 * called when writing to the /proc entry of the vfile
+	 * from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref snapshot_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+/**
+ * @brief Snapshot revision tag
+ * @anchor revision_tag
+ *
+ * This structure defines a revision tag to be used with @ref
+ * snapshot_vfile "snapshot-driven vfiles".
+ */
+struct xnvfile_rev_tag {
+	/** Current revision number. */
+	int rev;
+};
+
+struct xnvfile_snapshot_template {
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Snapshot vfile descriptor
+ * @anchor snapshot_vfile
+ *
+ * This structure describes a snapshot-driven vfile.  Reading from
+ * such a vfile involves a preliminary data collection phase under
+ * lock protection, and a subsequent formatting and output phase of
+ * the collected data records. Locking is done in a way that does not
+ * increase worst-case latency, regardless of the number of records to
+ * be collected for output.
+ */
+struct xnvfile_snapshot {
+	struct xnvfile entry;
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+};
+
+/**
+ * @brief Snapshot-driven vfile iterator
+ * @anchor snapshot_iterator
+ *
+ * This structure defines an iterator over a snapshot-driven vfile.
+ */
+struct xnvfile_snapshot_iterator {
+	/** Number of collected records. */
+	int nrdata;
+	/** Address of record buffer. */
+	caddr_t databuf;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_snapshot *vfile;
+	/** Buffer release handler. */
+	void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+struct xnvfile_directory {
+	struct xnvfile entry;
+};
+
+struct xnvfile_link {
+	struct xnvfile entry;
+};
+
+/* vfile.begin()=> */
+#define VFILE_SEQ_EMPTY			((void *)-1)
+/* =>vfile.show() */
+#define VFILE_SEQ_START			SEQ_START_TOKEN
+/* vfile.next/show()=> */
+#define VFILE_SEQ_SKIP			2
+
+#define xnvfile_printf(it, args...)	seq_printf((it)->seq, ##args)
+#define xnvfile_write(it, data, len)	seq_write((it)->seq, (data),(len))
+#define xnvfile_puts(it, s)		seq_puts((it)->seq, (s))
+#define xnvfile_putc(it, c)		seq_putc((it)->seq, (c))
+
+static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag)
+{
+	tag->rev++;
+}
+
+static inline void xnvfile_touch(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_touch_tag(vfile->tag);
+}
+
+#define xnvfile_noentry			\
+	{				\
+		.pde = NULL,		\
+		.private = NULL,	\
+		.file = NULL,		\
+		.refcnt = 0,		\
+	}
+
+#define xnvfile_nodir	{ .entry = xnvfile_noentry }
+#define xnvfile_nolink	{ .entry = xnvfile_noentry }
+#define xnvfile_nofile	{ .entry = xnvfile_noentry }
+
+#define xnvfile_priv(e)			((e)->entry.private)
+#define xnvfile_nref(e)			((e)->entry.refcnt)
+#define xnvfile_file(e)			((e)->entry.file)
+#define xnvfile_iterator_priv(it)	((void *)(&(it)->private))
+
+extern struct xnvfile_nklock_class xnvfile_nucleus_lock;
+
+extern struct xnvfile_directory cobalt_vfroot;
+
+int xnvfile_init_root(void);
+
+void xnvfile_destroy_root(void);
+
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent);
+
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent);
+
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent);
+
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent);
+
+void xnvfile_destroy(struct xnvfile *vfile);
+
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size);
+
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen);
+
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp);
+
+int __vfile_hostlock_get(struct xnvfile *vfile);
+
+void __vfile_hostlock_put(struct xnvfile *vfile);
+
+static inline
+void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_regular(struct xnvfile_regular *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_dir(struct xnvfile_directory *vdir)
+{
+	xnvfile_destroy(&vdir->entry);
+}
+
+static inline
+void xnvfile_destroy_link(struct xnvfile_link *vlink)
+{
+	xnvfile_destroy(&vlink->entry);
+}
+
+#define DEFINE_VFILE_HOSTLOCK(name)					\
+	struct xnvfile_hostlock_class name = {				\
+		.ops = {						\
+			.get = __vfile_hostlock_get,			\
+			.put = __vfile_hostlock_put,			\
+		},							\
+		.mutex = __MUTEX_INITIALIZER(name.mutex),		\
+	}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define xnvfile_touch_tag(tag)	do { } while (0)
+
+#define xnvfile_touch(vfile)	do { } while (0)
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_VFILE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/synch.h	2022-03-21 12:58:31.727866163 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SYNCH_H
+#define _COBALT_KERNEL_SYNCH_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/uapi/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_synch
+ * @{
+ */
+#define XNSYNCH_CLAIMED  0x100	/* Claimed by other thread(s) (PI) */
+#define XNSYNCH_CEILING  0x200	/* Actively boosting (PP) */
+
+/* Spare flags usable by upper interfaces */
+#define XNSYNCH_SPARE0  0x01000000
+#define XNSYNCH_SPARE1  0x02000000
+#define XNSYNCH_SPARE2  0x04000000
+#define XNSYNCH_SPARE3  0x08000000
+#define XNSYNCH_SPARE4  0x10000000
+#define XNSYNCH_SPARE5  0x20000000
+#define XNSYNCH_SPARE6  0x40000000
+#define XNSYNCH_SPARE7  0x80000000
+
+/* Statuses */
+#define XNSYNCH_DONE    0	/* Resource available / operation complete */
+#define XNSYNCH_WAIT    1	/* Calling thread blocked -- start rescheduling */
+#define XNSYNCH_RESCHED 2	/* Force rescheduling */
+
+struct xnthread;
+struct xnsynch;
+
+struct xnsynch {
+	/** wait (weighted) prio in thread->boosters */
+	int wprio;
+	/** thread->boosters */
+	struct list_head next;
+	/**
+	 *  &variable holding the current priority ceiling value
+	 *  (xnsched_class_rt-based, [1..255], XNSYNCH_PP).
+	 */
+	u32 *ceiling_ref;
+	/** Status word */
+	unsigned long status;
+	/** Pending threads */
+	struct list_head pendq;
+	/** Thread which owns the resource */
+	struct xnthread *owner;
+	 /** Pointer to fast lock word */
+	atomic_t *fastlock;
+	/* Cleanup handler */
+	void (*cleanup)(struct xnsynch *synch);
+};
+
+#define XNSYNCH_WAITQUEUE_INITIALIZER(__name) {		\
+		.status = XNSYNCH_PRIO,			\
+		.wprio = -1,				\
+		.pendq = LIST_HEAD_INIT((__name).pendq),	\
+		.owner = NULL,				\
+		.cleanup = NULL,			\
+		.fastlock = NULL,			\
+	}
+
+#define DEFINE_XNWAITQ(__name)	\
+	struct xnsynch __name = XNSYNCH_WAITQUEUE_INITIALIZER(__name)
+
+static inline void xnsynch_set_status(struct xnsynch *synch, int bits)
+{
+	synch->status |= bits;
+}
+
+static inline void xnsynch_clear_status(struct xnsynch *synch, int bits)
+{
+	synch->status &= ~bits;
+}
+
+#define xnsynch_for_each_sleeper(__pos, __synch)		\
+	list_for_each_entry(__pos, &(__synch)->pendq, plink)
+
+#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink)
+
+static inline int xnsynch_pended_p(struct xnsynch *synch)
+{
+	return !list_empty(&synch->pendq);
+}
+
+static inline struct xnthread *xnsynch_owner(struct xnsynch *synch)
+{
+	return synch->owner;
+}
+
+#define xnsynch_fastlock(synch)		((synch)->fastlock)
+#define xnsynch_fastlock_p(synch)	((synch)->fastlock != NULL)
+#define xnsynch_owner_check(synch, thread) \
+	xnsynch_fast_owner_check((synch)->fastlock, thread->handle)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper);
+
+void xnsynch_detect_boosted_relax(struct xnthread *owner);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+static inline void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper) { }
+
+static inline void xnsynch_detect_boosted_relax(struct xnthread *owner) { }
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+void xnsynch_init(struct xnsynch *synch, int flags,
+		  atomic_t *fastlock);
+
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref);
+
+int xnsynch_destroy(struct xnsynch *synch);
+
+void xnsynch_commit_ceiling(struct xnthread *curr);
+
+static inline void xnsynch_register_cleanup(struct xnsynch *synch,
+					    void (*handler)(struct xnsynch *))
+{
+	synch->cleanup = handler;
+}
+
+int __must_check xnsynch_sleep_on(struct xnsynch *synch,
+				  xnticks_t timeout,
+				  xntmode_t timeout_mode);
+
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr);
+
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch,
+				 struct xnthread *sleeper);
+
+int __must_check xnsynch_acquire(struct xnsynch *synch,
+				 xnticks_t timeout,
+				 xntmode_t timeout_mode);
+
+int __must_check xnsynch_try_acquire(struct xnsynch *synch);
+
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *thread);
+
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+
+int xnsynch_flush(struct xnsynch *synch, int reason);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread);
+
+void xnsynch_forget_sleeper(struct xnthread *thread);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SYNCH_H_ */
+++ linux-patched/include/xenomai/cobalt/kernel/sched.h	2022-03-21 12:58:31.720866231 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/map.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_H
+#define _COBALT_KERNEL_SCHED_H
+
+#include <linux/percpu.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/schedqueue.h>
+#include <cobalt/kernel/sched-tp.h>
+#include <cobalt/kernel/sched-weak.h>
+#include <cobalt/kernel/sched-sporadic.h>
+#include <cobalt/kernel/sched-quota.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/machine.h>
+#include <pipeline/sched.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Sched status flags */
+#define XNRESCHED	0x10000000	/* Needs rescheduling */
+#define XNINSW		0x20000000	/* In context switch */
+#define XNINTCK		0x40000000	/* In master tick handler context */
+
+/* Sched local flags */
+#define XNIDLE		0x00010000	/* Idle (no outstanding timer) */
+#define XNHTICK		0x00008000	/* Host tick pending  */
+#define XNINIRQ		0x00004000	/* In IRQ handling context */
+#define XNHDEFER	0x00002000	/* Host tick deferred */
+
+/*
+ * Hardware timer is stopped.
+ */
+#define XNTSTOP		0x00000800
+
+struct xnsched_rt {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+/*!
+ * \brief Scheduling information structure.
+ */
+
+struct xnsched {
+	/*!< Scheduler specific status bitmask. */
+	unsigned long status;
+	/*!< Scheduler specific local flags bitmask. */
+	unsigned long lflags;
+	/*!< Current thread. */
+	struct xnthread *curr;
+#ifdef CONFIG_SMP
+	/*!< Owner CPU id. */
+	int cpu;
+	/*!< Mask of CPUs needing rescheduling. */
+	cpumask_t resched;
+#endif
+	/*!< Context of built-in real-time class. */
+	struct xnsched_rt rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	/*!< Context of weak scheduling class. */
+	struct xnsched_weak weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	/*!< Context of TP class. */
+	struct xnsched_tp tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	/*!< Context of sporadic scheduling class. */
+	struct xnsched_sporadic pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	/*!< Context of runtime quota scheduling. */
+	struct xnsched_quota quota;
+#endif
+	/*!< Interrupt nesting level. */
+	volatile unsigned inesting;
+	/*!< Host timer. */
+	struct xntimer htimer;
+	/*!< Round-robin timer. */
+	struct xntimer rrbtimer;
+	/*!< Root thread control block. */
+	struct xnthread rootcb;
+#ifdef CONFIG_XENO_ARCH_FPU
+	/*!< Thread owning the current FPU context. */
+	struct xnthread *fpuholder;
+#endif
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	/*!< Watchdog timer object. */
+	struct xntimer wdtimer;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	/*!< Last account switch date (ticks). */
+	xnticks_t last_account_switch;
+	/*!< Currently active account */
+	xnstat_exectime_t *current_account;
+#endif
+};
+
+DECLARE_PER_CPU(struct xnsched, nksched);
+
+extern cpumask_t cobalt_cpu_affinity;
+
+extern struct list_head nkthreadq;
+
+extern int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+union xnsched_policy_param;
+
+struct xnsched_class {
+	void (*sched_init)(struct xnsched *sched);
+	void (*sched_enqueue)(struct xnthread *thread);
+	void (*sched_dequeue)(struct xnthread *thread);
+	void (*sched_requeue)(struct xnthread *thread);
+	struct xnthread *(*sched_pick)(struct xnsched *sched);
+	void (*sched_tick)(struct xnsched *sched);
+	void (*sched_rotate)(struct xnsched *sched,
+			     const union xnsched_policy_param *p);
+	void (*sched_migrate)(struct xnthread *thread,
+			      struct xnsched *sched);
+	int (*sched_chkparam)(struct xnthread *thread,
+			      const union xnsched_policy_param *p);
+	/**
+	 * Set base scheduling parameters. This routine is indirectly
+	 * called upon a change of base scheduling settings through
+	 * __xnthread_set_schedparam() -> xnsched_set_policy(),
+	 * exclusively.
+	 *
+	 * The scheduling class implementation should do the necessary
+	 * housekeeping to comply with the new settings.
+	 * thread->base_class is up to date before the call is made,
+	 * and should be considered for the new weighted priority
+	 * calculation. On the contrary, thread->sched_class should
+	 * NOT be referred to by this handler.
+	 *
+	 * sched_setparam() is NEVER involved in PI or PP
+	 * management. However it must deny a priority update if it
+	 * contradicts an ongoing boost for @a thread. This is
+	 * typically what the xnsched_set_effective_priority() helper
+	 * does for such handler.
+	 *
+	 * @param thread Affected thread.
+	 * @param p New base policy settings.
+	 *
+	 * @return True if the effective priority was updated
+	 * (thread->cprio).
+	 */
+	bool (*sched_setparam)(struct xnthread *thread,
+			       const union xnsched_policy_param *p);
+	void (*sched_getparam)(struct xnthread *thread,
+			       union xnsched_policy_param *p);
+	void (*sched_trackprio)(struct xnthread *thread,
+				const union xnsched_policy_param *p);
+	void (*sched_protectprio)(struct xnthread *thread, int prio);
+	int (*sched_declare)(struct xnthread *thread,
+			     const union xnsched_policy_param *p);
+	void (*sched_forget)(struct xnthread *thread);
+	void (*sched_kick)(struct xnthread *thread);
+#ifdef CONFIG_XENO_OPT_VFILE
+	int (*sched_init_vfile)(struct xnsched_class *schedclass,
+				struct xnvfile_directory *vfroot);
+	void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
+#endif
+	int nthreads;
+	struct xnsched_class *next;
+	int weight;
+	int policy;
+	const char *name;
+};
+
+#define XNSCHED_CLASS_WEIGHT(n)		(n * XNSCHED_CLASS_WEIGHT_FACTOR)
+
+/* Placeholder for current thread priority */
+#define XNSCHED_RUNPRIO   0x80000000
+
+#define xnsched_for_each_thread(__thread)	\
+	list_for_each_entry(__thread, &nkthreadq, glink)
+
+#ifdef CONFIG_SMP
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return sched->cpu;
+}
+#else /* !CONFIG_SMP */
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+static inline struct xnsched *xnsched_struct(int cpu)
+{
+	return &per_cpu(nksched, cpu);
+}
+
+static inline struct xnsched *xnsched_current(void)
+{
+	/* IRQs off */
+	return raw_cpu_ptr(&nksched);
+}
+
+static inline struct xnthread *xnsched_current_thread(void)
+{
+	return xnsched_current()->curr;
+}
+
+/* Test resched flag of given sched. */
+static inline int xnsched_resched_p(struct xnsched *sched)
+{
+	return sched->status & XNRESCHED;
+}
+
+/* Set self resched flag for the current scheduler. */
+static inline void xnsched_set_self_resched(struct xnsched *sched)
+{
+	sched->status |= XNRESCHED;
+}
+
+/* Set resched flag for the given scheduler. */
+#ifdef CONFIG_SMP
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	struct xnsched *current_sched = xnsched_current();
+
+	if (current_sched == sched)
+		current_sched->status |= XNRESCHED;
+	else if (!xnsched_resched_p(sched)) {
+		cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
+		sched->status |= XNRESCHED;
+		current_sched->status |= XNRESCHED;
+	}
+}
+
+#define xnsched_realtime_cpus    cobalt_pipeline.supported_cpus
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	xnsched_set_self_resched(sched);
+}
+
+#define xnsched_realtime_cpus CPU_MASK_ALL
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return 1;
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return 1;
+}
+
+#endif /* !CONFIG_SMP */
+
+#define for_each_realtime_cpu(cpu)		\
+	for_each_online_cpu(cpu)		\
+		if (xnsched_supported_cpu(cpu))	\
+
+int ___xnsched_run(struct xnsched *sched);
+
+void __xnsched_run_handler(void);
+
+static inline int __xnsched_run(struct xnsched *sched)
+{
+	/*
+	 * Reschedule if XNSCHED is pending, but never over an IRQ
+	 * handler or in the middle of unlocked context switch.
+	 */
+	if (((sched->status|sched->lflags) &
+	     (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+		return 0;
+
+	return pipeline_schedule(sched);
+}
+
+static inline int xnsched_run(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/*
+	 * sched->curr is shared locklessly with ___xnsched_run().
+	 * READ_ONCE() makes sure the compiler never uses load tearing
+	 * for reading this pointer piecemeal, so that multiple stores
+	 * occurring concurrently on remote CPUs never yield a
+	 * spurious merged value on the local one.
+	 */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	/*
+	 * If running over the root thread, hard irqs must be off
+	 * (asserted out of line in ___xnsched_run()).
+	 */
+	return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
+}
+
+void xnsched_lock(void);
+
+void xnsched_unlock(void);
+
+static inline int xnsched_interrupt_p(void)
+{
+	return xnsched_current()->lflags & XNINIRQ;
+}
+
+static inline int xnsched_root_p(void)
+{
+	return xnthread_test_state(xnsched_current_thread(), XNROOT);
+}
+
+static inline int xnsched_unblockable_p(void)
+{
+	return xnsched_interrupt_p() || xnsched_root_p();
+}
+
+static inline int xnsched_primary_p(void)
+{
+	return !xnsched_unblockable_p();
+}
+
+bool xnsched_set_effective_priority(struct xnthread *thread,
+				    int prio);
+
+#include <cobalt/kernel/sched-idle.h>
+#include <cobalt/kernel/sched-rt.h>
+
+int xnsched_init_proc(void);
+
+void xnsched_cleanup_proc(void);
+
+void xnsched_register_classes(void);
+
+void xnsched_init_all(void);
+
+void xnsched_destroy_all(void);
+
+struct xnthread *xnsched_pick_next(struct xnsched *sched);
+
+void xnsched_putback(struct xnthread *thread);
+
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p);
+
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target);
+
+void xnsched_protect_priority(struct xnthread *thread,
+			      int prio);
+
+void xnsched_migrate(struct xnthread *thread,
+		     struct xnsched *sched);
+
+void xnsched_migrate_passive(struct xnthread *thread,
+			     struct xnsched *sched);
+
+/**
+ * @fn void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
+ * @brief Rotate a scheduler runqueue.
+ *
+ * The specified scheduling class is requested to rotate its runqueue
+ * for the given scheduler. Rotation is performed according to the
+ * scheduling parameter specified by @a sched_param.
+ *
+ * @note The nucleus supports round-robin scheduling for the members
+ * of the RT class.
+ *
+ * @param sched The per-CPU scheduler hosting the target scheduling
+ * class.
+ *
+ * @param sched_class The scheduling class which should rotate its
+ * runqueue.
+ *
+ * @param sched_param The scheduling parameter providing rotation
+ * information to the specified scheduling class.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline void xnsched_rotate(struct xnsched *sched,
+				  struct xnsched_class *sched_class,
+				  const union xnsched_policy_param *sched_param)
+{
+	sched_class->sched_rotate(sched, sched_param);
+}
+
+static inline int xnsched_init_thread(struct xnthread *thread)
+{
+	int ret = 0;
+
+	xnsched_idle_init_thread(thread);
+	xnsched_rt_init_thread(thread);
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	ret = xnsched_tp_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	ret = xnsched_sporadic_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	ret = xnsched_quota_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+	return ret;
+}
+
+static inline int xnsched_root_priority(struct xnsched *sched)
+{
+	return sched->rootcb.cprio;
+}
+
+static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
+{
+	return sched->rootcb.sched_class;
+}
+
+static inline void xnsched_tick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr;
+	struct xnsched_class *sched_class = curr->sched_class;
+	/*
+	 * A thread that undergoes round-robin scheduling only
+	 * consumes its time slice when it runs within its own
+	 * scheduling class, which excludes temporary PI boosts, and
+	 * does not hold the scheduler lock.
+	 */
+	if (sched_class == curr->base_class &&
+	    sched_class->sched_tick &&
+	    xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
+		curr->lock_count == 0)
+		sched_class->sched_tick(sched);
+}
+
+static inline int xnsched_chkparam(struct xnsched_class *sched_class,
+				   struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (sched_class->sched_chkparam)
+		return sched_class->sched_chkparam(thread, p);
+
+	return 0;
+}
+
+static inline int xnsched_declare(struct xnsched_class *sched_class,
+				  struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	int ret;
+
+	if (sched_class->sched_declare) {
+		ret = sched_class->sched_declare(thread, p);
+		if (ret)
+			return ret;
+	}
+	if (sched_class != thread->base_class)
+		sched_class->nthreads++;
+
+	return 0;
+}
+
+static inline int xnsched_calc_wprio(struct xnsched_class *sched_class,
+				     int prio)
+{
+	return prio + sched_class->weight;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_requeue(thread);
+}
+
+static inline
+bool xnsched_setparam(struct xnthread *thread,
+		      const union xnsched_policy_param *p)
+{
+	return thread->base_class->sched_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_trackprio(thread, p);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	thread->sched_class->sched_protectprio(thread, prio);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	--sched_class->nthreads;
+
+	if (sched_class->sched_forget)
+		sched_class->sched_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	xnthread_set_info(thread, XNKICKED);
+
+	if (sched_class->sched_kick)
+		sched_class->sched_kick(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/*
+ * If only the RT and IDLE scheduling classes are compiled in, we can
+ * fully inline common helpers for dealing with those.
+ */
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_requeue(thread);
+}
+
+static inline bool xnsched_setparam(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	if (sched_class == &xnsched_class_idle)
+		return __xnsched_idle_setparam(thread, p);
+
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_getparam(thread, p);
+	else
+		__xnsched_rt_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_trackprio(thread, p);
+	else
+		__xnsched_rt_trackprio(thread, p);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_protectprio(thread, prio);
+	else
+		__xnsched_rt_protectprio(thread, prio);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	--thread->base_class->nthreads;
+	__xnsched_rt_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	xnthread_set_info(thread, XNKICKED);
+	xnsched_set_resched(thread->sched);
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_H */
+++ linux-patched/include/xenomai/cobalt/kernel/map.h	2022-03-21 12:58:31.713866299 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-idle.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_MAP_H
+#define _COBALT_KERNEL_MAP_H
+
+#include <asm/bitsperlong.h>
+
+/**
+ * @addtogroup cobalt_core_map
+ * @{
+ */
+
+#define XNMAP_MAX_KEYS	(BITS_PER_LONG * BITS_PER_LONG)
+
+struct xnmap {
+    int nkeys;
+    int ukeys;
+    int offset;
+    unsigned long himask;
+    unsigned long himap;
+#define __IDMAP_LONGS	((XNMAP_MAX_KEYS+BITS_PER_LONG-1)/BITS_PER_LONG)
+    unsigned long lomap[__IDMAP_LONGS];
+#undef __IDMAP_LONGS
+    void *objarray[1];
+};
+
+struct xnmap *xnmap_create(int nkeys,
+			   int reserve,
+			   int offset);
+
+void xnmap_delete(struct xnmap *map);
+
+int xnmap_enter(struct xnmap *map,
+		int key,
+		void *objaddr);
+
+int xnmap_remove(struct xnmap *map,
+		 int key);
+
+static inline void *xnmap_fetch_nocheck(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+	return map->objarray[ofkey];
+}
+
+static inline void *xnmap_fetch(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return NULL;
+
+	return map->objarray[ofkey];
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_MAP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-idle.h	2022-03-21 12:58:31.705866377 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-weak.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_IDLE_H
+#define _COBALT_KERNEL_SCHED_IDLE_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-idle.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Idle priority level - actually never used for indexing. */
+#define XNSCHED_IDLE_PRIO	-1
+
+extern struct xnsched_class xnsched_class_idle;
+
+static inline bool __xnsched_idle_setparam(struct xnthread *thread,
+					   const union xnsched_policy_param *p)
+{
+	xnthread_clear_state(thread, XNWEAK);
+	return xnsched_set_effective_priority(thread, p->idle.prio);
+}
+
+static inline void __xnsched_idle_getparam(struct xnthread *thread,
+					   union xnsched_policy_param *p)
+{
+	p->idle.prio = thread->cprio;
+}
+
+static inline void __xnsched_idle_trackprio(struct xnthread *thread,
+					    const union xnsched_policy_param *p)
+{
+	if (p)
+		/* Inheriting a priority-less class makes no sense. */
+		XENO_WARN_ON_ONCE(COBALT, 1);
+	else
+		thread->cprio = XNSCHED_IDLE_PRIO;
+}
+
+static inline void __xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	XENO_WARN_ON_ONCE(COBALT, 1);
+}
+
+static inline int xnsched_idle_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_IDLE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-weak.h	2022-03-21 12:58:31.698866445 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_WEAK_H
+#define _COBALT_KERNEL_SCHED_WEAK_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-weak.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+
+#define XNSCHED_WEAK_MIN_PRIO	0
+#define XNSCHED_WEAK_MAX_PRIO	99
+#define XNSCHED_WEAK_NR_PRIO	\
+	(XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO + 1)
+
+#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+	(defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&		\
+	 XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "WEAK class has too many priority levels"
+#endif
+
+extern struct xnsched_class xnsched_class_weak;
+
+struct xnsched_weak {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+static inline int xnsched_weak_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_WEAK */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_WEAK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/arith.h	2022-03-21 12:58:31.691866513 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-tp.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ARITH_H
+#define _COBALT_KERNEL_ARITH_H
+
+#include <asm/byteorder.h>
+#include <asm/div64.h>
+
+#ifdef __BIG_ENDIAN
+#define endianstruct { unsigned int _h; unsigned int _l; }
+#else /* __LITTLE_ENDIAN */
+#define endianstruct { unsigned int _l; unsigned int _h; }
+#endif
+
+#include <asm/xenomai/uapi/arith.h>
+
+#endif /* _COBALT_KERNEL_ARITH_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-tp.h	2022-03-21 12:58:31.683866592 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/ppd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_TP_H
+#define _COBALT_KERNEL_SCHED_TP_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-tp.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+#define XNSCHED_TP_MIN_PRIO	1
+#define XNSCHED_TP_MAX_PRIO	255
+#define XNSCHED_TP_NR_PRIO	\
+	(XNSCHED_TP_MAX_PRIO - XNSCHED_TP_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_tp;
+
+struct xnsched_tp_window {
+	xnticks_t w_offset;
+	int w_part;
+};
+
+struct xnsched_tp_schedule {
+	int pwin_nr;
+	xnticks_t tf_duration;
+	atomic_t refcount;
+	struct xnsched_tp_window pwins[0];
+};
+
+struct xnsched_tp {
+	struct xnsched_tpslot {
+		/** Per-partition runqueue. */
+		xnsched_queue_t runnable;
+	} partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART];
+	/** Idle slot for passive windows. */
+	struct xnsched_tpslot idle;
+	/** Active partition slot */
+	struct xnsched_tpslot *tps;
+	/** Time frame timer */
+	struct xntimer tf_timer;
+	/** Global partition schedule */
+	struct xnsched_tp_schedule *gps;
+	/** Window index of next partition */
+	int wnext;
+	/** Start of next time frame */
+	xnticks_t tf_start;
+	/** Assigned thread queue */
+	struct list_head threads;
+};
+
+static inline int xnsched_tp_init_thread(struct xnthread *thread)
+{
+	thread->tps = NULL;
+
+	return 0;
+}
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps);
+
+void xnsched_tp_start_schedule(struct xnsched *sched);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched);
+
+int xnsched_tp_get_partition(struct xnsched *sched);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps);
+
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_TP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/ppd.h	2022-03-21 12:58:31.676866660 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/compat.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright &copy; 2006 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PPD_H
+#define _COBALT_KERNEL_PPD_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/heap.h>
+
+struct cobalt_umm {
+	struct xnheap heap;
+	atomic_t refcount;
+	void (*release)(struct cobalt_umm *umm);
+};
+
+struct cobalt_ppd {
+	struct cobalt_umm umm;
+	atomic_t refcnt;
+	char *exe_path;
+	struct rb_root fds;
+};
+
+extern struct cobalt_ppd cobalt_kernel_ppd;
+
+#endif /* _COBALT_KERNEL_PPD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/compat.h	2022-03-21 12:58:31.668866738 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/assert.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_COMPAT_H
+#define _COBALT_KERNEL_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <linux/compat.h>
+#include <net/compat.h>
+#include <asm/xenomai/wrappers.h>
+#include <cobalt/uapi/sched.h>
+
+struct mq_attr;
+
+struct __compat_sched_ss_param {
+	int __sched_low_priority;
+	struct old_timespec32 __sched_repl_period;
+	struct old_timespec32 __sched_init_budget;
+	int __sched_max_repl;
+};
+
+struct __compat_sched_rr_param {
+	struct old_timespec32 __sched_rr_quantum;
+};
+
+struct compat_sched_param_ex {
+	int sched_priority;
+	union {
+		struct __compat_sched_ss_param ss;
+		struct __compat_sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+struct compat_mq_attr {
+	compat_long_t mq_flags;
+	compat_long_t mq_maxmsg;
+	compat_long_t mq_msgsize;
+	compat_long_t mq_curmsgs;
+};
+
+struct compat_sched_tp_window {
+	struct old_timespec32 offset;
+	struct old_timespec32 duration;
+	int ptid;
+};
+
+struct __compat_sched_config_tp {
+	int op;
+	int nr_windows;
+	struct compat_sched_tp_window windows[0];
+};
+
+union compat_sched_config {
+	struct __compat_sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#define compat_sched_tp_confsz(nr_win) \
+  (sizeof(struct __compat_sched_config_tp) + nr_win * sizeof(struct compat_sched_tp_window))
+
+typedef struct {
+	compat_ulong_t fds_bits[__FD_SETSIZE / (8 * sizeof(compat_long_t))];
+} compat_fd_set;
+
+struct compat_rtdm_mmap_request {
+	u64 offset;
+	compat_size_t length;
+	int prot;
+	int flags;
+};
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *cts);
+
+int sys32_put_timespec(struct old_timespec32 __user *cts,
+		       const struct timespec64 *ts);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize);
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize);
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *ciov,
+		    int ciovlen);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen);
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_KERNEL_COMPAT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/assert.h	2022-03-21 12:58:31.661866806 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/timer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ASSERT_H
+#define _COBALT_KERNEL_ASSERT_H
+
+#include <linux/kconfig.h>
+
+#define XENO_INFO	KERN_INFO    "[Xenomai] "
+#define XENO_WARNING	KERN_WARNING "[Xenomai] "
+#define XENO_ERR	KERN_ERR     "[Xenomai] "
+
+#define XENO_DEBUG(__subsys)				\
+	IS_ENABLED(CONFIG_XENO_OPT_DEBUG_##__subsys)
+#define XENO_ASSERT(__subsys, __cond)			\
+	(!WARN_ON(XENO_DEBUG(__subsys) && !(__cond)))
+#define XENO_BUG(__subsys)				\
+	BUG_ON(XENO_DEBUG(__subsys))
+#define XENO_BUG_ON(__subsys, __cond)			\
+	BUG_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN(__subsys, __cond, __fmt...)		\
+	WARN(XENO_DEBUG(__subsys) && (__cond), __fmt)
+#define XENO_WARN_ON(__subsys, __cond)			\
+	WARN_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN_ON_ONCE(__subsys, __cond)		\
+	WARN_ON_ONCE(XENO_DEBUG(__subsys) && (__cond))
+#ifdef CONFIG_SMP
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	XENO_BUG_ON(__subsys, __cond)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	XENO_WARN_ON(__subsys, __cond)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	XENO_WARN_ON_ONCE(__subsys, __cond)
+#else
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	do { } while (0)
+#endif
+
+#define TODO()    BUILD_BUG_ON(IS_ENABLED(CONFIG_XENO_TODO))
+
+#define primary_mode_only()	XENO_BUG_ON(CONTEXT, is_secondary_domain())
+#define secondary_mode_only()	XENO_BUG_ON(CONTEXT, !is_secondary_domain())
+#define interrupt_only()	XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p())
+#define realtime_cpu_only()	XENO_BUG_ON(CONTEXT, !xnsched_supported_cpu(raw_smp_processor_id()))
+#define thread_only()		XENO_BUG_ON(CONTEXT, xnsched_interrupt_p())
+#define irqoff_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+#define atomic_only()		XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) && hard_irqs_disabled()) == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) || hard_irqs_disabled())
+#else
+#define atomic_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, hard_irqs_disabled() != 0)
+#endif
+
+#endif /* !_COBALT_KERNEL_ASSERT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/timer.h	2022-03-21 12:58:31.654866874 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/init.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_TIMER_H
+#define _COBALT_KERNEL_TIMER_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_timer
+ * @{
+ */
+#define XN_INFINITE   ((xnticks_t)0)
+#define XN_NONBLOCK   ((xnticks_t)-1)
+
+/* Timer modes */
+typedef enum xntmode {
+	XN_RELATIVE,
+	XN_ABSOLUTE,
+	XN_REALTIME
+} xntmode_t;
+
+/* Timer status */
+#define XNTIMER_DEQUEUED  0x00000001
+#define XNTIMER_KILLED    0x00000002
+#define XNTIMER_PERIODIC  0x00000004
+#define XNTIMER_REALTIME  0x00000008
+#define XNTIMER_FIRED     0x00000010
+#define XNTIMER_RUNNING   0x00000020
+#define XNTIMER_KGRAVITY  0x00000040
+#define XNTIMER_UGRAVITY  0x00000080
+#define XNTIMER_IGRAVITY  0	     /* most conservative */
+
+#define XNTIMER_GRAVITY_MASK	(XNTIMER_KGRAVITY|XNTIMER_UGRAVITY)
+#define XNTIMER_INIT_MASK	XNTIMER_GRAVITY_MASK
+
+/* These flags are available to the real-time interfaces */
+#define XNTIMER_SPARE0  0x01000000
+#define XNTIMER_SPARE1  0x02000000
+#define XNTIMER_SPARE2  0x04000000
+#define XNTIMER_SPARE3  0x08000000
+#define XNTIMER_SPARE4  0x10000000
+#define XNTIMER_SPARE5  0x20000000
+#define XNTIMER_SPARE6  0x40000000
+#define XNTIMER_SPARE7  0x80000000
+
+/* Timer priorities */
+#define XNTIMER_LOPRIO  (-999999999)
+#define XNTIMER_STDPRIO 0
+#define XNTIMER_HIPRIO  999999999
+
+struct xntlholder {
+	struct list_head link;
+	xnticks_t key;
+	int prio;
+};
+
+#define xntlholder_date(h)	((h)->key)
+#define xntlholder_prio(h)	((h)->prio)
+#define xntlist_init(q)		INIT_LIST_HEAD(q)
+#define xntlist_empty(q)	list_empty(q)
+
+static inline struct xntlholder *xntlist_head(struct list_head *q)
+{
+	if (list_empty(q))
+		return NULL;
+
+	return list_first_entry(q, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_next(struct list_head *q,
+					      struct xntlholder *h)
+{
+	if (list_is_last(&h->link, q))
+		return NULL;
+
+	return list_entry(h->link.next, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_second(struct list_head *q,
+	struct xntlholder *h)
+{
+	return xntlist_next(q, h);
+}
+
+static inline void xntlist_insert(struct list_head *q, struct xntlholder *holder)
+{
+	struct xntlholder *p;
+
+	if (list_empty(q)) {
+		list_add(&holder->link, q);
+		return;
+	}
+
+	/*
+	 * Insert the new timer at the proper place in the single
+	 * queue. O(N) here, but this is the price for the increased
+	 * flexibility...
+	 */
+	list_for_each_entry_reverse(p, q, link) {
+		if ((xnsticks_t) (holder->key - p->key) > 0 ||
+		    (holder->key == p->key && holder->prio <= p->prio))
+		  break;
+	}
+
+	list_add(&holder->link, &p->link);
+}
+
+#define xntlist_remove(q, h)			\
+	do {					\
+		(void)(q);			\
+		list_del(&(h)->link);		\
+	} while (0)
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+
+#include <linux/rbtree.h>
+
+typedef struct {
+	unsigned long long date;
+	unsigned prio;
+	struct rb_node link;
+} xntimerh_t;
+
+#define xntimerh_date(h) ((h)->date)
+#define xntimerh_prio(h) ((h)->prio)
+#define xntimerh_init(h) do { } while (0)
+
+typedef struct {
+	struct rb_root root;
+	xntimerh_t *head;
+} xntimerq_t;
+
+#define xntimerq_init(q)			\
+	({					\
+		xntimerq_t *_q = (q);		\
+		_q->root = RB_ROOT;		\
+		_q->head = NULL;		\
+	})
+
+#define xntimerq_destroy(q) do { } while (0)
+#define xntimerq_empty(q) ((q)->head == NULL)
+
+#define xntimerq_head(q) ((q)->head)
+
+#define xntimerq_next(q, h)						\
+	({								\
+		struct rb_node *_node = rb_next(&(h)->link);		\
+		_node ? (container_of(_node, xntimerh_t, link)) : NULL; \
+	})
+
+#define xntimerq_second(q, h) xntimerq_next(q, h)
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder);
+
+static inline void xntimerq_remove(xntimerq_t *q, xntimerh_t *holder)
+{
+	if (holder == q->head)
+		q->head = xntimerq_second(q, holder);
+
+	rb_erase(&holder->link, &q->root);
+}
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)	((void) (i), xntimerq_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntimerq_next((q),(h)))
+
+#else /* CONFIG_XENO_OPT_TIMER_LIST */
+
+typedef struct xntlholder xntimerh_t;
+
+#define xntimerh_date(h)       xntlholder_date(h)
+#define xntimerh_prio(h)       xntlholder_prio(h)
+#define xntimerh_init(h)       do { } while (0)
+
+typedef struct list_head xntimerq_t;
+
+#define xntimerq_init(q)        xntlist_init(q)
+#define xntimerq_destroy(q)     do { } while (0)
+#define xntimerq_empty(q)       xntlist_empty(q)
+#define xntimerq_head(q)        xntlist_head(q)
+#define xntimerq_second(q, h)   xntlist_second((q),(h))
+#define xntimerq_insert(q, h)   xntlist_insert((q),(h))
+#define xntimerq_remove(q, h)   xntlist_remove((q),(h))
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)  ((void) (i), xntlist_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntlist_next((q),(h)))
+
+#endif /* CONFIG_XENO_OPT_TIMER_LIST */
+
+struct xnsched;
+
+struct xntimerdata {
+	xntimerq_t q;
+};
+
+static inline struct xntimerdata *
+xnclock_percpu_timerdata(struct xnclock *clock, int cpu)
+{
+	return per_cpu_ptr(clock->timerdata, cpu);
+}
+
+static inline struct xntimerdata *
+xnclock_this_timerdata(struct xnclock *clock)
+{
+	return raw_cpu_ptr(clock->timerdata);
+}
+
+struct xntimer {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *clock;
+#endif
+	/** Link in timers list. */
+	xntimerh_t aplink;
+	struct list_head adjlink;
+	/** Timer status. */
+	unsigned long status;
+	/** Periodic interval (clock ticks, 0 == one shot). */
+	xnticks_t interval;
+	/** Periodic interval (nanoseconds, 0 == one shot). */
+	xnticks_t interval_ns;
+	/** Count of timer ticks in periodic mode. */
+	xnticks_t periodic_ticks;
+	/** First tick date in periodic mode. */
+	xnticks_t start_date;
+	/** Date of next periodic release point (timer ticks). */
+	xnticks_t pexpect_ticks;
+	/** Sched structure to which the timer is attached. */
+	struct xnsched *sched;
+	/** Timeout handler. */
+	void (*handler)(struct xntimer *timer);
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *tracker;
+#endif
+	/** Timer name to be displayed. */
+	char name[XNOBJECT_NAME_LEN];
+	/** Timer holder in timebase. */
+	struct list_head next_stat;
+	/** Number of timer schedules. */
+	xnstat_counter_t scheduled;
+	/** Number of timer events. */
+	xnstat_counter_t fired;
+#endif /* CONFIG_XENO_OPT_STATS */
+};
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return timer->clock;
+}
+
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock);
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return &nkclock;
+}
+
+static inline void xntimer_set_clock(struct xntimer *timer,
+				     struct xnclock *newclock)
+{
+	XENO_BUG_ON(COBALT, newclock != &nkclock);
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+#ifdef CONFIG_SMP
+static inline struct xnsched *xntimer_sched(struct xntimer *timer)
+{
+	return timer->sched;
+}
+#else /* !CONFIG_SMP */
+#define xntimer_sched(t)	xnsched_current()
+#endif /* !CONFIG_SMP */
+
+#define xntimer_percpu_queue(__timer)					\
+	({								\
+		struct xntimerdata *tmd;				\
+		int cpu = xnsched_cpu((__timer)->sched);		\
+		tmd = xnclock_percpu_timerdata(xntimer_clock(__timer), cpu); \
+		&tmd->q;						\
+	})
+
+static inline unsigned long xntimer_gravity(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+
+	if (timer->status & XNTIMER_KGRAVITY)
+		return clock->gravity.kernel;
+
+	if (timer->status & XNTIMER_UGRAVITY)
+		return clock->gravity.user;
+
+	return clock->gravity.irq;
+}
+
+static inline void xntimer_update_date(struct xntimer *timer)
+{
+	xntimerh_date(&timer->aplink) = timer->start_date
+		+ xnclock_ns_to_ticks(xntimer_clock(timer),
+			timer->periodic_ticks * timer->interval_ns)
+		- xntimer_gravity(timer);
+}
+
+static inline xnticks_t xntimer_pexpect(struct xntimer *timer)
+{
+	return timer->start_date +
+		xnclock_ns_to_ticks(xntimer_clock(timer),
+				timer->pexpect_ticks * timer->interval_ns);
+}
+
+static inline void xntimer_set_priority(struct xntimer *timer,
+					int prio)
+{
+	xntimerh_prio(&timer->aplink) = prio;
+}
+
+static inline int xntimer_active_p(struct xntimer *timer)
+{
+	return timer->sched != NULL;
+}
+
+static inline int xntimer_running_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_RUNNING) != 0;
+}
+
+static inline int xntimer_fired_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_FIRED) != 0;
+}
+
+static inline int xntimer_periodic_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_PERIODIC) != 0;
+}
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags);
+
+void xntimer_set_gravity(struct xntimer *timer,
+			 int gravity);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+#define xntimer_init(__timer, __clock, __handler, __sched, __flags)	\
+do {									\
+	__xntimer_init(__timer, __clock, __handler, __sched, __flags);	\
+	xntimer_set_name(__timer, #__handler);				\
+} while (0)
+
+static inline void xntimer_reset_stats(struct xntimer *timer)
+{
+	xnstat_counter_set(&timer->scheduled, 0);
+	xnstat_counter_set(&timer->fired, 0);
+}
+
+static inline void xntimer_account_scheduled(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->scheduled);
+}
+
+static inline void xntimer_account_fired(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->fired);
+}
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name)
+{
+	knamecpy(timer->name, name);
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+
+#define xntimer_init	__xntimer_init
+
+static inline void xntimer_reset_stats(struct xntimer *timer) { }
+
+static inline void xntimer_account_scheduled(struct xntimer *timer) { }
+
+static inline void xntimer_account_fired(struct xntimer *timer) { }
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name) { }
+
+#endif /* !CONFIG_XENO_OPT_STATS */
+
+#if defined(CONFIG_XENO_OPT_EXTCLOCK) && defined(CONFIG_XENO_OPT_STATS)
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock);
+#else
+static inline
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock) { }
+#endif
+
+void xntimer_destroy(struct xntimer *timer);
+
+/**
+ * @fn xnticks_t xntimer_interval(struct xntimer *timer)
+ *
+ * @brief Return the timer interval value.
+ *
+ * Return the timer interval value in nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The duration of a period in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled or
+ * one shot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline xnticks_t xntimer_interval(struct xntimer *timer)
+{
+	return timer->interval_ns;
+}
+
+static inline xnticks_t xntimer_expiry(struct xntimer *timer)
+{
+	/* Real expiry date in ticks without anticipation (no gravity) */
+	return xntimerh_date(&timer->aplink) + xntimer_gravity(timer);
+}
+
+int xntimer_start(struct xntimer *timer,
+		xnticks_t value,
+		xnticks_t interval,
+		xntmode_t mode);
+
+void __xntimer_stop(struct xntimer *timer);
+
+xnticks_t xntimer_get_date(struct xntimer *timer);
+
+xnticks_t __xntimer_get_timeout(struct xntimer *timer);
+
+xnticks_t xntimer_get_interval(struct xntimer *timer);
+
+int xntimer_heading_p(struct xntimer *timer);
+
+static inline void xntimer_stop(struct xntimer *timer)
+{
+	if (timer->status & XNTIMER_RUNNING)
+		__xntimer_stop(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout(struct xntimer *timer)
+{
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return __xntimer_get_timeout(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout_stopped(struct xntimer *timer)
+{
+	return __xntimer_get_timeout(timer);
+}
+
+static inline void xntimer_enqueue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_insert(q, &timer->aplink);
+	timer->status &= ~XNTIMER_DEQUEUED;
+	xntimer_account_scheduled(timer);
+}
+
+static inline void xntimer_dequeue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_remove(q, &timer->aplink);
+	timer->status |= XNTIMER_DEQUEUED;
+}
+
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now);
+
+#ifdef CONFIG_SMP
+
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched);
+
+static inline
+void xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (timer->sched != sched)
+		__xntimer_migrate(timer, sched);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer,
+			    struct xnsched *sched);
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	if (sched != xntimer_sched(timer))
+		__xntimer_set_affinity(timer, sched);
+}
+
+#else /* ! CONFIG_SMP */
+
+static inline void xntimer_migrate(struct xntimer *timer,
+				   struct xnsched *sched)
+{
+	timer->sched = sched;
+}
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	xntimer_migrate(timer, sched);
+}
+
+#endif /* CONFIG_SMP */
+
+char *xntimer_format_time(xnticks_t ns,
+			  char *buf, size_t bufsz);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_TIMER_H */
+++ linux-patched/include/xenomai/cobalt/kernel/init.h	2022-03-21 12:58:31.646866952 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/registry.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INIT_H
+#define _COBALT_KERNEL_INIT_H
+
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <cobalt/uapi/corectl.h>
+
+extern atomic_t cobalt_runstate;
+
+static inline enum cobalt_run_states realtime_core_state(void)
+{
+	return atomic_read(&cobalt_runstate);
+}
+
+static inline int realtime_core_enabled(void)
+{
+	return atomic_read(&cobalt_runstate) != COBALT_STATE_DISABLED;
+}
+
+static inline int realtime_core_running(void)
+{
+	return atomic_read(&cobalt_runstate) == COBALT_STATE_RUNNING;
+}
+
+static inline void set_realtime_core_state(enum cobalt_run_states state)
+{
+	atomic_set(&cobalt_runstate, state);
+}
+
+void cobalt_add_state_chain(struct notifier_block *nb);
+
+void cobalt_remove_state_chain(struct notifier_block *nb);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate);
+
+#endif /* !_COBALT_KERNEL_INIT_H_ */
+++ linux-patched/include/xenomai/cobalt/kernel/registry.h	2022-03-21 12:58:31.639867021 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-rt.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_REGISTRY_H
+#define _COBALT_KERNEL_REGISTRY_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/vfile.h>
+
+/**
+ * @addtogroup cobalt_core_registry
+ *
+ * @{
+ */
+struct xnpnode;
+
+struct xnobject {
+	void *objaddr;
+	const char *key;	  /* !< Hash key. May be NULL if anonynous. */
+	unsigned long cstamp;		  /* !< Creation stamp. */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnpnode *pnode;	/* !< v-file information class. */
+	union {
+		struct {
+			struct xnvfile_rev_tag tag;
+			struct xnvfile_snapshot file;
+		} vfsnap; /* !< virtual snapshot file. */
+		struct xnvfile_regular vfreg; /* !< virtual regular file */
+		struct xnvfile_link link;     /* !< virtual link. */
+	} vfile_u;
+	struct xnvfile *vfilp;
+#endif /* CONFIG_XENO_OPT_VFILE */
+	struct hlist_node hlink; /* !< Link in h-table */
+	struct list_head link;
+};
+
+int xnregistry_init(void);
+
+void xnregistry_cleanup(void);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#define XNOBJECT_EXPORT_SCHEDULED  ((struct xnvfile *)1L)
+#define XNOBJECT_EXPORT_INPROGRESS ((struct xnvfile *)2L)
+#define XNOBJECT_EXPORT_ABORTED    ((struct xnvfile *)3L)
+
+struct xnptree {
+	const char *dirname;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+#define DEFINE_XNPTREE(__var, __name)		\
+	struct xnptree __var = {		\
+		.dirname = __name,		\
+		.entries = 0,			\
+		.vdir = xnvfile_nodir,		\
+	}
+
+struct xnpnode_ops {
+	int (*export)(struct xnobject *object, struct xnpnode *pnode);
+	void (*unexport)(struct xnobject *object, struct xnpnode *pnode);
+	void (*touch)(struct xnobject *object);
+};
+
+struct xnpnode {
+	const char *dirname;
+	struct xnptree *root;
+	struct xnpnode_ops *ops;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+	struct xnvfile_snapshot_template vfile;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+	struct xnvfile_regular_template vfile;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+	char *(*target)(void *obj);
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define DEFINE_XNPTREE(__var, __name);
+
+/* Placeholders. */
+
+struct xnpnode {
+	const char *dirname;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/* Public interface. */
+
+extern struct xnobject *registry_obj_slots;
+
+static inline struct xnobject *xnregistry_validate(xnhandle_t handle)
+{
+	struct xnobject *object;
+	/*
+	 * Careful: a removed object which is still in flight to be
+	 * unexported carries a NULL objaddr, so we have to check this
+	 * as well.
+	 */
+	handle = xnhandle_get_index(handle);
+	if (likely(handle && handle < CONFIG_XENO_OPT_REGISTRY_NRSLOTS)) {
+		object = &registry_obj_slots[handle];
+		return object->objaddr ? object : NULL;
+	}
+
+	return NULL;
+}
+
+static inline const char *xnregistry_key(xnhandle_t handle)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+	return object ? object->key : NULL;
+}
+
+int xnregistry_enter(const char *key,
+		     void *objaddr,
+		     xnhandle_t *phandle,
+		     struct xnpnode *pnode);
+
+static inline int
+xnregistry_enter_anon(void *objaddr, xnhandle_t *phandle)
+{
+	return xnregistry_enter(NULL, objaddr, phandle, NULL);
+}
+
+int xnregistry_bind(const char *key,
+		    xnticks_t timeout,
+		    int timeout_mode,
+		    xnhandle_t *phandle);
+
+int xnregistry_remove(xnhandle_t handle);
+
+static inline
+void *xnregistry_lookup(xnhandle_t handle,
+			unsigned long *cstamp_r)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+
+	if (object == NULL)
+		return NULL;
+
+	if (cstamp_r)
+		*cstamp_r = object->cstamp;
+
+	return object->objaddr;
+}
+
+int xnregistry_unlink(const char *key);
+
+unsigned xnregistry_hash_size(void);
+
+extern struct xnpnode_ops xnregistry_vfsnap_ops;
+
+extern struct xnpnode_ops xnregistry_vlink_ops;
+
+extern struct xnpnode_ops xnregistry_vfreg_ops;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_REGISTRY_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-rt.h	2022-03-21 12:58:31.631867099 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/time.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_RT_H
+#define _COBALT_KERNEL_SCHED_RT_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-rt.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/*
+ * Global priority scale for Xenomai's core scheduling class,
+ * available to SCHED_COBALT members.
+ */
+#define XNSCHED_CORE_MIN_PRIO	0
+#define XNSCHED_CORE_MAX_PRIO	259
+#define XNSCHED_CORE_NR_PRIO	\
+	(XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1)
+
+/*
+ * Priority range for SCHED_FIFO, and all other classes Cobalt
+ * implements except SCHED_COBALT.
+ */
+#define XNSCHED_FIFO_MIN_PRIO	1
+#define XNSCHED_FIFO_MAX_PRIO	256
+
+#if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+  (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&			\
+   XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "XNSCHED_MLQ_LEVELS is too low"
+#endif
+
+extern struct xnsched_class xnsched_class_rt;
+
+static inline void __xnsched_rt_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_track_weakness(struct xnthread *thread)
+{
+	/*
+	 * We have to track threads exiting weak scheduling, i.e. any
+	 * thread leaving the WEAK class code if compiled in, or
+	 * assigned a zero priority if weak threads are hosted by the
+	 * RT class.
+	 *
+	 * CAUTION: since we need to check the effective priority
+	 * level for determining the weakness state, this can only
+	 * apply to non-boosted threads.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio)
+		xnthread_clear_state(thread, XNWEAK);
+	else
+		xnthread_set_state(thread, XNWEAK);
+}
+
+static inline bool __xnsched_rt_setparam(struct xnthread *thread,
+					 const union xnsched_policy_param *p)
+{
+	bool ret = xnsched_set_effective_priority(thread, p->rt.prio);
+	
+	if (!xnthread_test_state(thread, XNBOOST))
+		__xnsched_rt_track_weakness(thread);
+
+	return ret;
+}
+
+static inline void __xnsched_rt_getparam(struct xnthread *thread,
+					 union xnsched_policy_param *p)
+{
+	p->rt.prio = thread->cprio;
+}
+
+static inline void __xnsched_rt_trackprio(struct xnthread *thread,
+					  const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->rt.prio; /* Force update. */
+	else {
+		thread->cprio = thread->bprio;
+		/* Leaving PI/PP, so non-boosted by definition. */
+		__xnsched_rt_track_weakness(thread);
+	}
+}
+
+static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	/*
+	 * The RT class supports the widest priority range from
+	 * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive,
+	 * no need to cap the input value which is guaranteed to be in
+	 * the range [1..XNSCHED_CORE_MAX_PRIO].
+	 */
+	thread->cprio = prio;
+}
+
+static inline void __xnsched_rt_forget(struct xnthread *thread)
+{
+}
+
+static inline int xnsched_rt_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+struct xnthread *xnsched_rt_pick(struct xnsched *sched);
+#else
+static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->rt.runnable);
+}
+#endif
+
+void xnsched_rt_tick(struct xnsched *sched);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_RT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/time.h	2022-03-21 12:58:31.624867167 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/intr.h	1970-01-01 01:00:00.000000000 +0100
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _COBALT_KERNEL_TIME_H
+#define _COBALT_KERNEL_TIME_H
+
+#include <linux/time.h>
+#include <linux/time64.h>
+
+/**
+ * Read struct __kernel_timespec from userspace and convert to
+ * struct timespec64
+ *
+ * @param ts The destination, will be filled
+ * @param uts The source, provided by an application
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts);
+
+/**
+ * Covert struct timespec64 to struct __kernel_timespec
+ * and copy to userspace
+ *
+ * @param ts The source, provided by kernel
+ * @param uts The destination, will be filled
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_put_timespec64(const struct timespec64 *ts,
+			   struct __kernel_timespec __user *uts);
+
+#endif //_COBALT_KERNEL_TIME_H
+++ linux-patched/include/xenomai/cobalt/kernel/intr.h	2022-03-21 12:58:31.617867235 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/schedqueue.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INTR_H
+#define _COBALT_KERNEL_INTR_H
+
+#include <linux/spinlock.h>
+#include <cobalt/kernel/stat.h>
+#include <pipeline/irq.h>
+
+/**
+ * @addtogroup cobalt_core_irq
+ * @{
+ */
+
+/* Possible return values of a handler. */
+#define XN_IRQ_NONE	 0x1
+#define XN_IRQ_HANDLED	 0x2
+#define XN_IRQ_STATMASK	 (XN_IRQ_NONE|XN_IRQ_HANDLED)
+#define XN_IRQ_PROPAGATE 0x100
+#define XN_IRQ_DISABLE   0x200
+
+/* Init flags. */
+#define XN_IRQTYPE_SHARED  0x1
+#define XN_IRQTYPE_EDGE    0x2
+
+/* Status bits. */
+#define XN_IRQSTAT_ATTACHED   0
+#define _XN_IRQSTAT_ATTACHED  (1 << XN_IRQSTAT_ATTACHED)
+#define XN_IRQSTAT_DISABLED   1
+#define _XN_IRQSTAT_DISABLED  (1 << XN_IRQSTAT_DISABLED)
+
+struct xnintr;
+struct xnsched;
+
+typedef int (*xnisr_t)(struct xnintr *intr);
+
+typedef void (*xniack_t)(unsigned irq, void *arg);
+
+struct xnirqstat {
+	/** Number of handled receipts since attachment. */
+	xnstat_counter_t hits;
+	/** Runtime accounting entity */
+	xnstat_exectime_t account;
+	/** Accumulated accounting entity */
+	xnstat_exectime_t sum;
+};
+
+struct xnintr {
+#ifdef CONFIG_XENO_OPT_SHIRQ
+	/** Next object in the IRQ-sharing chain. */
+	struct xnintr *next;
+#endif
+	/** Number of consequent unhandled interrupts */
+	unsigned int unhandled;
+	/** Interrupt service routine. */
+	xnisr_t isr;
+	/** User-defined cookie value. */
+	void *cookie;
+	/** runtime status */
+	unsigned long status;
+	/** Creation flags. */
+	int flags;
+	/** IRQ number. */
+	unsigned int irq;
+	/** Interrupt acknowledge routine. */
+	xniack_t iack;
+	/** Symbolic name. */
+	const char *name;
+	/** Descriptor maintenance lock. */
+	raw_spinlock_t lock;
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	/** Statistics. */
+	struct xnirqstat *stats;
+#endif
+};
+
+struct xnintr_iterator {
+    int cpu;		/** Current CPU in iteration. */
+    unsigned long hits;	/** Current hit counter. */
+    xnticks_t exectime_period;	/** Used CPU time in current accounting period. */
+    xnticks_t account_period; /** Length of accounting period. */
+    xnticks_t exectime_total;	/** Overall CPU time consumed. */
+    int list_rev;	/** System-wide xnintr list revision (internal use). */
+    struct xnintr *prev;	/** Previously visited xnintr object (internal use). */
+};
+
+void xnintr_core_clock_handler(void);
+
+void xnintr_host_tick(struct xnsched *sched);
+
+    /* Public interface. */
+
+int xnintr_init(struct xnintr *intr,
+		const char *name,
+		unsigned irq,
+		xnisr_t isr,
+		xniack_t iack,
+		int flags);
+
+void xnintr_destroy(struct xnintr *intr);
+
+int xnintr_attach(struct xnintr *intr,
+		  void *cookie, const cpumask_t *cpumask);
+
+void xnintr_detach(struct xnintr *intr);
+
+void xnintr_enable(struct xnintr *intr);
+
+void xnintr_disable(struct xnintr *intr);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask);
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+
+int xnintr_query_init(struct xnintr_iterator *iterator);
+
+int xnintr_get_query_lock(void);
+
+void xnintr_put_query_lock(void);
+
+int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
+		      char *name_buf);
+
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+static inline int xnintr_query_init(struct xnintr_iterator *iterator)
+{
+	return 0;
+}
+
+static inline int xnintr_get_query_lock(void)
+{
+	return 0;
+}
+
+static inline void xnintr_put_query_lock(void) {}
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_INTR_H */
+++ linux-patched/include/xenomai/cobalt/kernel/schedqueue.h	2022-03-21 12:58:31.609867313 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/vdso.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDQUEUE_H
+#define _COBALT_KERNEL_SCHEDQUEUE_H
+
+#include <cobalt/kernel/list.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#define XNSCHED_CLASS_WEIGHT_FACTOR	1024
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+#include <linux/bitmap.h>
+
+/*
+ * Multi-level priority queue, suitable for handling the runnable
+ * thread queue of the core scheduling class with O(1) property. We
+ * only manage a descending queuing order, i.e. highest numbered
+ * priorities come first.
+ */
+#define XNSCHED_MLQ_LEVELS  260	/* i.e. XNSCHED_CORE_NR_PRIO */
+
+struct xnsched_mlq {
+	int elems;
+	DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS);
+	struct list_head heads[XNSCHED_MLQ_LEVELS];
+};
+
+struct xnthread;
+
+void xnsched_initq(struct xnsched_mlq *q);
+
+void xnsched_addq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+void xnsched_addq_tail(struct xnsched_mlq *q, 
+		       struct xnthread *thread);
+
+void xnsched_delq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q);
+
+static inline int xnsched_emptyq_p(struct xnsched_mlq *q)
+{
+	return q->elems == 0;
+}
+
+static inline int xnsched_weightq(struct xnsched_mlq *q)
+{
+	return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS);
+}
+
+typedef struct xnsched_mlq xnsched_queue_t;
+
+#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+typedef struct list_head xnsched_queue_t;
+
+#define xnsched_initq(__q)			INIT_LIST_HEAD(__q)
+#define xnsched_emptyq_p(__q)			list_empty(__q)
+#define xnsched_addq(__q, __t)			list_add_prilf(__t, __q, cprio, rlink)
+#define xnsched_addq_tail(__q, __t)		list_add_priff(__t, __q, cprio, rlink)
+#define xnsched_delq(__q, __t)			(void)(__q), list_del(&(__t)->rlink)
+#define xnsched_getq(__q)							\
+	({									\
+		struct xnthread *__t = NULL;					\
+		if (!list_empty(__q))						\
+			__t = list_get_entry(__q, struct xnthread, rlink);	\
+		__t;								\
+	})
+#define xnsched_weightq(__q)						\
+	({								\
+		struct xnthread *__t;					\
+		__t = list_first_entry(__q, struct xnthread, rlink);	\
+		__t->cprio;						\
+	})
+	
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/vdso.h	2022-03-21 12:58:31.602867381 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/pipe.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_VDSO_H
+#define _COBALT_KERNEL_VDSO_H
+
+#include <linux/time.h>
+#include <asm/barrier.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <cobalt/uapi/kernel/vdso.h>
+
+extern struct xnvdso *nkvdso;
+
+/*
+ * Define the available feature set here. We have a single feature
+ * defined for now, only in the I-pipe case.
+ */
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+
+#define XNVDSO_FEATURES XNVDSO_FEAT_HOST_REALTIME
+
+static inline struct xnvdso_hostrt_data *get_hostrt_data(void)
+{
+	return &nkvdso->hostrt_data;
+}
+
+#else
+
+#define XNVDSO_FEATURES 0
+
+#endif
+
+#endif /* _COBALT_KERNEL_VDSO_H */
+++ linux-patched/include/xenomai/cobalt/kernel/pipe.h	2022-03-21 12:58:31.595867450 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/ancillaries.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PIPE_H
+#define _COBALT_KERNEL_PIPE_H
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/kernel/pipe.h>
+
+#define XNPIPE_NDEVS      CONFIG_XENO_OPT_PIPE_NRDEV
+#define XNPIPE_DEV_MAJOR  150
+
+#define XNPIPE_KERN_CONN         0x1
+#define XNPIPE_KERN_LCLOSE       0x2
+#define XNPIPE_USER_CONN         0x4
+#define XNPIPE_USER_SIGIO        0x8
+#define XNPIPE_USER_WREAD        0x10
+#define XNPIPE_USER_WREAD_READY  0x20
+#define XNPIPE_USER_WSYNC        0x40
+#define XNPIPE_USER_WSYNC_READY  0x80
+#define XNPIPE_USER_LCONN        0x100
+
+#define XNPIPE_USER_ALL_WAIT \
+(XNPIPE_USER_WREAD|XNPIPE_USER_WSYNC)
+
+#define XNPIPE_USER_ALL_READY \
+(XNPIPE_USER_WREAD_READY|XNPIPE_USER_WSYNC_READY)
+
+struct xnpipe_mh {
+	size_t size;
+	size_t rdoff;
+	struct list_head link;
+};
+
+struct xnpipe_state;
+
+struct xnpipe_operations {
+	void (*output)(struct xnpipe_mh *mh, void *xstate);
+	int (*input)(struct xnpipe_mh *mh, int retval, void *xstate);
+	void *(*alloc_ibuf)(size_t size, void *xstate);
+	void (*free_ibuf)(void *buf, void *xstate);
+	void (*free_obuf)(void *buf, void *xstate);
+	void (*release)(void *xstate);
+};
+
+struct xnpipe_state {
+	struct list_head slink;	/* Link on sleep queue */
+	struct list_head alink;	/* Link on async queue */
+
+	struct list_head inq;		/* From user-space to kernel */
+	int nrinq;
+	struct list_head outq;		/* From kernel to user-space */
+	int nroutq;
+	struct xnsynch synchbase;
+	struct xnpipe_operations ops;
+	void *xstate;		/* Extra state managed by caller */
+
+	/* Linux kernel part */
+	unsigned long status;
+	struct fasync_struct *asyncq;
+	wait_queue_head_t readq;	/* open/read/poll waiters */
+	wait_queue_head_t syncq;	/* sync waiters */
+	int wcount;			/* number of waiters on this minor */
+	size_t ionrd;
+};
+
+extern struct xnpipe_state xnpipe_states[];
+
+#define xnminor_from_state(s) (s - xnpipe_states)
+
+#ifdef CONFIG_XENO_OPT_PIPE
+int xnpipe_mount(void);
+void xnpipe_umount(void);
+#else /* !CONFIG_XENO_OPT_PIPE */
+static inline int xnpipe_mount(void) { return 0; }
+static inline void xnpipe_umount(void) { }
+#endif /* !CONFIG_XENO_OPT_PIPE */
+
+/* Entry points of the kernel interface. */
+
+int xnpipe_connect(int minor,
+		   struct xnpipe_operations *ops, void *xstate);
+
+int xnpipe_disconnect(int minor);
+
+ssize_t xnpipe_send(int minor,
+		    struct xnpipe_mh *mh, size_t size, int flags);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size);
+
+ssize_t xnpipe_recv(int minor,
+		    struct xnpipe_mh **pmh, xnticks_t timeout);
+
+int xnpipe_flush(int minor, int mode);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r);
+
+static inline unsigned int __xnpipe_pollstate(int minor)
+{
+	struct xnpipe_state *state = xnpipe_states + minor;
+	unsigned int mask = POLLOUT;
+
+	if (!list_empty(&state->inq))
+		mask |= POLLIN;
+
+	return mask;
+}
+
+static inline char *xnpipe_m_data(struct xnpipe_mh *mh)
+{
+	return (char *)(mh + 1);
+}
+
+#define xnpipe_m_size(mh) ((mh)->size)
+
+#define xnpipe_m_rdoff(mh) ((mh)->rdoff)
+
+#endif /* !_COBALT_KERNEL_PIPE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/ancillaries.h	2022-03-21 12:58:31.587867528 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/bufd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ANCILLARIES_H
+#define _COBALT_KERNEL_ANCILLARIES_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/uidgid.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+#define ksformat(__dst, __len, __fmt, __args...)			\
+	({								\
+		size_t __ret;						\
+		__ret = snprintf(__dst, __len, __fmt, ##__args);	\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kasformat(__fmt, __args...)					\
+	({								\
+		kasprintf(GFP_KERNEL, __fmt, ##__args);			\
+	})
+
+#define kvsformat(__dst, __len, __fmt, __ap)				\
+	({								\
+		size_t __ret;						\
+		__ret = vsnprintf(__dst, __len, __fmt, __ap);		\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kvasformat(__fmt, __ap)						\
+	({								\
+		kvasprintf(GFP_KERNEL, __fmt, __ap);			\
+	})
+
+void __knamecpy_requires_character_array_as_destination(void);
+
+#define knamecpy(__dst, __src)						\
+	({								\
+		if (!__builtin_types_compatible_p(typeof(__dst), char[])) \
+			__knamecpy_requires_character_array_as_destination();	\
+		strncpy((__dst), __src, sizeof(__dst));			\
+		__dst[sizeof(__dst) - 1] = '\0';			\
+		__dst;							\
+	 })
+
+#define get_current_uuid() from_kuid_munged(current_user_ns(), current_uid())
+
+#endif /* !_COBALT_KERNEL_ANCILLARIES_H */
+++ linux-patched/include/xenomai/cobalt/kernel/bufd.h	2022-03-21 12:58:31.580867596 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-quota.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_BUFD_H
+#define _COBALT_KERNEL_BUFD_H
+
+#include <linux/types.h>
+
+/**
+ * @addtogroup cobalt_core_bufd
+ *
+ * @{
+ */
+
+struct mm_struct;
+
+struct xnbufd {
+	caddr_t b_ptr;		/* src/dst buffer address */
+	size_t b_len;		/* total length of buffer */
+	off_t b_off;		/* # of bytes read/written */
+	struct mm_struct *b_mm;	/* src/dst address space */
+	caddr_t b_carry;	/* pointer to carry over area */
+	char b_buf[64];		/* fast carry over area */
+};
+
+void xnbufd_map_umem(struct xnbufd *bufd,
+		     void __user *ptr, size_t len);
+
+static inline void xnbufd_map_uread(struct xnbufd *bufd,
+				    const void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, (void __user *)ptr, len);
+}
+
+static inline void xnbufd_map_uwrite(struct xnbufd *bufd,
+				     void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd);
+
+void xnbufd_map_kmem(struct xnbufd *bufd,
+		     void *ptr, size_t len);
+
+static inline void xnbufd_map_kread(struct xnbufd *bufd,
+				    const void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, (void *)ptr, len);
+}
+
+static inline void xnbufd_map_kwrite(struct xnbufd *bufd,
+				     void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd);
+
+ssize_t xnbufd_copy_to_kmem(void *ptr,
+			    struct xnbufd *bufd, size_t len);
+
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd,
+			      void *from, size_t len);
+
+void xnbufd_invalidate(struct xnbufd *bufd);
+
+static inline void xnbufd_reset(struct xnbufd *bufd)
+{
+	bufd->b_off = 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_BUFD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-quota.h	2022-03-21 12:58:31.572867674 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/tree.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_QUOTA_H
+#define _COBALT_KERNEL_SCHED_QUOTA_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-quota.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+#define XNSCHED_QUOTA_MIN_PRIO	1
+#define XNSCHED_QUOTA_MAX_PRIO	255
+#define XNSCHED_QUOTA_NR_PRIO	\
+	(XNSCHED_QUOTA_MAX_PRIO - XNSCHED_QUOTA_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_quota;
+
+struct xnsched_quota_group {
+	struct xnsched *sched;
+	xnticks_t quota_ns;
+	xnticks_t quota_peak_ns;
+	xnticks_t run_start_ns;
+	xnticks_t run_budget_ns;
+	xnticks_t run_credit_ns;
+	struct list_head members;
+	struct list_head expired;
+	struct list_head next;
+	int nr_active;
+	int nr_threads;
+	int tgid;
+	int quota_percent;
+	int quota_peak_percent;
+};
+
+struct xnsched_quota {
+	xnticks_t period_ns;
+	struct xntimer refill_timer;
+	struct xntimer limit_timer;
+	struct list_head groups;
+};
+
+static inline int xnsched_quota_init_thread(struct xnthread *thread)
+{
+	thread->quota = NULL;
+	INIT_LIST_HEAD(&thread->quota_expired);
+
+	return 0;
+}
+
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force,
+				int *quota_sum_r);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid);
+
+int xnsched_quota_sum_all(struct xnsched *sched);
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */
+++ linux-patched/include/xenomai/cobalt/kernel/tree.h	2022-03-21 12:58:31.565867742 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-sporadic.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_TREE_H
+#define _COBALT_KERNEL_TREE_H
+
+#include <linux/errno.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/assert.h>
+
+typedef unsigned long long xnkey_t;
+
+static inline xnkey_t PTR_KEY(void *p)
+{
+	return (xnkey_t)(long)p;
+}
+
+struct xnid {
+	xnkey_t key;
+	struct rb_node link;
+};
+
+#define xnid_entry(ptr, type, member)					\
+	({								\
+		typeof(ptr) _ptr = (ptr);				\
+		(_ptr ? container_of(_ptr, type, member.link) : NULL);	\
+	})
+
+#define xnid_next_entry(ptr, member)				\
+	xnid_entry(rb_next(&ptr->member.link), typeof(*ptr), member)
+
+static inline void xntree_init(struct rb_root *t)
+{
+	*t = RB_ROOT;
+}
+
+#define xntree_for_each_entry(pos, root, member)			\
+	for (pos = xnid_entry(rb_first(root), typeof(*pos), member);	\
+	     pos; pos = xnid_next_entry(pos, member))
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id));
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key);
+
+static inline xnkey_t xnid_key(struct xnid *i)
+{
+	return i->key;
+}
+
+static inline
+struct xnid *xnid_fetch(struct rb_root *t, xnkey_t key)
+{
+	struct rb_node *node = t->rb_node;
+
+	while (node) {
+		struct xnid *i = container_of(node, struct xnid, link);
+
+		if (key < i->key)
+			node = node->rb_left;
+		else if (key > i->key)
+			node = node->rb_right;
+		else
+			return i;
+	}
+
+	return NULL;
+}
+
+static inline int xnid_remove(struct rb_root *t, struct xnid *xnid)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	if (xnid_fetch(t, xnid->key) != xnid)
+		return -ENOENT;
+#endif
+	rb_erase(&xnid->link, t);
+	return 0;
+}
+
+#endif /* _COBALT_KERNEL_TREE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-sporadic.h	2022-03-21 12:58:31.558867811 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/stat.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_SPORADIC_H
+#define _COBALT_KERNEL_SCHED_SPORADIC_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-sporadic.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+
+#define XNSCHED_SPORADIC_MIN_PRIO	1
+#define XNSCHED_SPORADIC_MAX_PRIO	255
+#define XNSCHED_SPORADIC_NR_PRIO	\
+	(XNSCHED_SPORADIC_MAX_PRIO - XNSCHED_SPORADIC_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_sporadic;
+
+struct xnsched_sporadic_repl {
+	xnticks_t date;
+	xnticks_t amount;
+};
+
+struct xnsched_sporadic_data {
+	xnticks_t resume_date;
+	xnticks_t budget;
+	int repl_in;
+	int repl_out;
+	int repl_pending;
+	struct xntimer repl_timer;
+	struct xntimer drop_timer;
+	struct xnsched_sporadic_repl repl_data[CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL];
+	struct xnsched_sporadic_param param;
+	struct xnthread *thread;
+};
+
+struct xnsched_sporadic {
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	unsigned long drop_retries;
+#endif
+};
+
+static inline int xnsched_sporadic_init_thread(struct xnthread *thread)
+{
+	thread->pss = NULL;
+
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_SPORADIC */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_SPORADIC_H */
+++ linux-patched/include/xenomai/cobalt/kernel/stat.h	2022-03-21 12:58:31.550867889 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_STAT_H
+#define _COBALT_KERNEL_STAT_H
+
+#include <cobalt/kernel/clock.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_stat Thread runtime statistics
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_STATS
+
+typedef struct xnstat_exectime {
+
+	xnticks_t start;   /* Start of execution time accumulation */
+
+	xnticks_t total; /* Accumulated execution time */
+
+} xnstat_exectime_t;
+
+/* Return current date which can be passed to other xnstat services for
+   immediate or lazy accounting. */
+#define xnstat_exectime_now() xnclock_core_read_raw()
+
+/* Accumulate exectime of the current account until the given date. */
+#define xnstat_exectime_update(sched, date) \
+do { \
+	xnticks_t __date = date; \
+	(sched)->current_account->total += \
+		__date - (sched)->last_account_switch; \
+	(sched)->last_account_switch = __date; \
+	/* All changes must be committed before changing the current_account \
+	   reference in sched (required for xnintr_sync_stat_references) */ \
+	smp_wmb(); \
+} while (0)
+
+/* Update the current account reference, returning the previous one. */
+#define xnstat_exectime_set_current(sched, new_account) \
+({ \
+	xnstat_exectime_t *__prev; \
+	__prev = (xnstat_exectime_t *) \
+		atomic_long_xchg((atomic_long_t *)&(sched)->current_account, \
+				 (long)(new_account)); \
+	__prev; \
+})
+
+/* Return the currently active accounting entity. */
+#define xnstat_exectime_get_current(sched) ((sched)->current_account)
+
+/* Finalize an account (no need to accumulate the exectime, just mark the
+   switch date and set the new account). */
+#define xnstat_exectime_finalize(sched, new_account) \
+do { \
+	(sched)->last_account_switch = xnclock_core_read_raw(); \
+	(sched)->current_account = (new_account); \
+} while (0)
+
+/* Obtain content of xnstat_exectime_t */
+#define xnstat_exectime_get_start(account)	((account)->start)
+#define xnstat_exectime_get_total(account)	((account)->total)
+
+/* Obtain last account switch date of considered sched */
+#define xnstat_exectime_get_last_switch(sched)	((sched)->last_account_switch)
+
+/* Reset statistics from inside the accounted entity (e.g. after CPU
+   migration). */
+#define xnstat_exectime_reset_stats(stat) \
+do { \
+	(stat)->total = 0; \
+	(stat)->start = xnclock_core_read_raw(); \
+} while (0)
+
+
+typedef struct xnstat_counter {
+	unsigned long counter;
+} xnstat_counter_t;
+
+static inline unsigned long xnstat_counter_inc(xnstat_counter_t *c)
+{
+	return c->counter++;
+}
+
+static inline unsigned long xnstat_counter_get(xnstat_counter_t *c)
+{
+	return c->counter;
+}
+
+static inline void xnstat_counter_set(xnstat_counter_t *c, unsigned long value)
+{
+	c->counter = value;
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+typedef struct xnstat_exectime {
+} xnstat_exectime_t;
+
+#define xnstat_exectime_now()					({ 0; })
+#define xnstat_exectime_update(sched, date)			do { } while (0)
+#define xnstat_exectime_set_current(sched, new_account)		({ (void)sched; NULL; })
+#define xnstat_exectime_get_current(sched)			({ (void)sched; NULL; })
+#define xnstat_exectime_finalize(sched, new_account)		do { } while (0)
+#define xnstat_exectime_get_start(account)			({ 0; })
+#define xnstat_exectime_get_total(account)			({ 0; })
+#define xnstat_exectime_get_last_switch(sched)			({ 0; })
+#define xnstat_exectime_reset_stats(account)			do { } while (0)
+
+typedef struct xnstat_counter {
+} xnstat_counter_t;
+
+#define xnstat_counter_inc(c) ({ do { } while(0); 0; })
+#define xnstat_counter_get(c) ({ 0; })
+#define xnstat_counter_set(c, value) do { } while (0)
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/* Account the exectime of the current account until now, switch to
+   new_account, and return the previous one. */
+#define xnstat_exectime_switch(sched, new_account) \
+({ \
+	xnstat_exectime_update(sched, xnstat_exectime_now()); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/* Account the exectime of the current account until given start time, switch
+   to new_account, and return the previous one. */
+#define xnstat_exectime_lazy_switch(sched, new_account, date) \
+({ \
+	xnstat_exectime_update(sched, date); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_STAT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/thread.h	2022-03-21 12:58:31.543867957 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/select.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_THREAD_H
+#define _COBALT_KERNEL_THREAD_H
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <pipeline/thread.h>
+#include <pipeline/inband_work.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/schedparam.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/machine.h>
+#include <asm/xenomai/thread.h>
+
+/**
+ * @addtogroup cobalt_core_thread
+ * @{
+ */
+#define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
+#define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
+
+#define XNTHREAD_SIGDEBUG		0
+#define XNTHREAD_SIGSHADOW_HARDEN	1
+#define XNTHREAD_SIGSHADOW_BACKTRACE	2
+#define XNTHREAD_SIGSHADOW_HOME		3
+#define XNTHREAD_SIGTERM		4
+#define XNTHREAD_MAX_SIGNALS		5
+
+struct xnthread;
+struct xnsched;
+struct xnselector;
+struct xnsched_class;
+struct xnsched_tpslot;
+struct xnthread_personality;
+struct completion;
+
+struct lostage_signal {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+	int signo, sigval;
+	struct lostage_signal *self; /* Revisit: I-pipe requirement */
+};
+
+struct xnthread_init_attr {
+	struct xnthread_personality *personality;
+	cpumask_t affinity;
+	int flags;
+	const char *name;
+};
+
+struct xnthread_start_attr {
+	int mode;
+	void (*entry)(void *cookie);
+	void *cookie;
+};
+
+struct xnthread_wait_context {
+	int posted;
+};
+
+struct xnthread_personality {
+	const char *name;
+	unsigned int magic;
+	int xid;
+	atomic_t refcnt;
+	struct {
+		void *(*attach_process)(void);
+		void (*detach_process)(void *arg);
+		void (*map_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*move_thread)(struct xnthread *thread,
+							    int dest_cpu);
+		struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
+	} ops;
+	struct module *module;
+};
+
+struct xnthread {
+	struct xnarchtcb tcb;	/* Architecture-dependent block */
+
+	__u32 state;		/* Thread state flags */
+	__u32 info;		/* Thread information flags */
+	__u32 local_info;	/* Local thread information flags */
+
+	struct xnsched *sched;		/* Thread scheduler */
+	struct xnsched_class *sched_class; /* Current scheduling class */
+	struct xnsched_class *base_class; /* Base scheduling class */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tpslot *tps;	/* Current partition slot for TP scheduling */
+	struct list_head tp_link;	/* Link in per-sched TP thread queue */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group *quota; /* Quota scheduling group. */
+	struct list_head quota_expired;
+	struct list_head quota_next;
+#endif
+	cpumask_t affinity;	/* Processor affinity. */
+
+	/** Base priority (before PI/PP boost) */
+	int bprio;
+
+	/** Current (effective) priority */
+	int cprio;
+
+	/**
+	 * Weighted priority (cprio + scheduling class weight).
+	 */
+	int wprio;
+
+	int lock_count;	/** Scheduler lock count. */
+
+	/**
+	 * Thread holder in xnsched run queue. Ordered by
+	 * thread->cprio.
+	 */
+	struct list_head rlink;
+
+	/**
+	 * Thread holder in xnsynch pendq. Prioritized by
+	 * thread->cprio + scheduling class weight.
+	 */
+	struct list_head plink;
+
+	/** Thread holder in global queue. */
+	struct list_head glink;
+
+	/**
+	 * List of xnsynch owned by this thread which cause a priority
+	 * boost due to one of the following reasons:
+	 *
+	 * - they are currently claimed by other thread(s) when
+	 * enforcing the priority inheritance protocol (XNSYNCH_PI).
+	 *
+	 * - they require immediate priority ceiling (XNSYNCH_PP).
+	 *
+	 * This list is ordered by decreasing (weighted) thread
+	 * priorities.
+	 */
+	struct list_head boosters;
+
+	struct xnsynch *wchan;		/* Resource the thread pends on */
+
+	struct xnsynch *wwake;		/* Wait channel the thread was resumed from */
+
+	int res_count;			/* Held resources count */
+
+	struct xntimer rtimer;		/* Resource timer */
+
+	struct xntimer ptimer;		/* Periodic timer */
+
+	xnticks_t rrperiod;		/* Allotted round-robin period (ns) */
+
+  	struct xnthread_wait_context *wcontext;	/* Active wait context. */
+
+	struct {
+		xnstat_counter_t ssw;	/* Primary -> secondary mode switch count */
+		xnstat_counter_t csw;	/* Context switches (includes secondary -> primary switches) */
+		xnstat_counter_t xsc;	/* Xenomai syscalls */
+		xnstat_counter_t pf;	/* Number of page faults */
+		xnstat_exectime_t account; /* Execution time accounting entity */
+		xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
+	} stat;
+
+	struct xnselector *selector;    /* For select. */
+
+	xnhandle_t handle;	/* Handle in registry */
+
+	char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
+
+	void (*entry)(void *cookie); /* Thread entry routine */
+	void *cookie;		/* Cookie to pass to the entry routine */
+
+	/**
+	 * Thread data visible from userland through a window on the
+	 * global heap.
+	 */
+	struct xnthread_user_window *u_window;
+
+	struct xnthread_personality *personality;
+
+	struct completion exited;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+	const char *exe_path;	/* Executable path */
+	u32 proghash;		/* Hash value for exe_path */
+#endif
+	struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS];
+};
+
+static inline int xnthread_get_state(const struct xnthread *thread)
+{
+	return thread->state;
+}
+
+static inline int xnthread_test_state(struct xnthread *thread, int bits)
+{
+	return thread->state & bits;
+}
+
+static inline void xnthread_set_state(struct xnthread *thread, int bits)
+{
+	thread->state |= bits;
+}
+
+static inline void xnthread_clear_state(struct xnthread *thread, int bits)
+{
+	thread->state &= ~bits;
+}
+
+static inline int xnthread_test_info(struct xnthread *thread, int bits)
+{
+	return thread->info & bits;
+}
+
+static inline void xnthread_set_info(struct xnthread *thread, int bits)
+{
+	thread->info |= bits;
+}
+
+static inline void xnthread_clear_info(struct xnthread *thread, int bits)
+{
+	thread->info &= ~bits;
+}
+
+static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
+{
+	return curr->local_info & bits;
+}
+
+static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info |= bits;
+}
+
+static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info &= ~bits;
+}
+
+static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
+{
+	return &thread->tcb;
+}
+
+static inline int xnthread_base_priority(const struct xnthread *thread)
+{
+	return thread->bprio;
+}
+
+static inline int xnthread_current_priority(const struct xnthread *thread)
+{
+	return thread->cprio;
+}
+
+static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
+{
+	return xnarch_host_task(xnthread_archtcb(thread));
+}
+
+#define xnthread_for_each_booster(__pos, __thread)		\
+	list_for_each_entry(__pos, &(__thread)->boosters, next)
+
+#define xnthread_for_each_booster_safe(__pos, __tmp, __thread)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next)
+
+#define xnthread_run_handler(__t, __h, __a...)				\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		if ((__p__)->ops.__h)					\
+			(__p__)->ops.__h(__t, ##__a);			\
+	} while (0)
+
+#define xnthread_run_handler_stack(__t, __h, __a...)			\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		do {							\
+			if ((__p__)->ops.__h == NULL)			\
+				break;					\
+			__p__ = (__p__)->ops.__h(__t, ##__a);		\
+		} while (__p__);					\
+	} while (0)
+
+static inline
+struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
+{
+	return thread->wcontext;
+}
+
+static inline
+int xnthread_register(struct xnthread *thread, const char *name)
+{
+	return xnregistry_enter(name, thread, &thread->handle, NULL);
+}
+
+static inline
+struct xnthread *xnthread_lookup(xnhandle_t threadh)
+{
+	struct xnthread *thread = xnregistry_lookup(threadh, NULL);
+	return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
+}
+
+static inline void xnthread_sync_window(struct xnthread *thread)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state & ~state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state | state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline int normalize_priority(int prio)
+{
+	return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param);
+
+void __xnthread_test_cancel(struct xnthread *curr);
+
+void __xnthread_cleanup(struct xnthread *curr);
+
+void __xnthread_discard(struct xnthread *thread);
+
+/**
+ * @fn struct xnthread *xnthread_current(void)
+ * @brief Retrieve the current Cobalt core TCB.
+ *
+ * Returns the address of the current Cobalt core thread descriptor,
+ * or NULL if running over a regular Linux task. This call is not
+ * affected by the current runtime mode of the core thread.
+ *
+ * @note The returned value may differ from xnsched_current_thread()
+ * called from the same context, since the latter returns the root
+ * thread descriptor for the current CPU if the caller is running in
+ * secondary mode.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_current(void)
+{
+	return pipeline_current()->thread;
+}
+
+/**
+ * @fn struct xnthread *xnthread_from_task(struct task_struct *p)
+ * @brief Retrieve the Cobalt core TCB attached to a Linux task.
+ *
+ * Returns the address of the Cobalt core thread descriptor attached
+ * to the Linux task @a p, or NULL if @a p is a regular Linux
+ * task. This call is not affected by the current runtime mode of the
+ * core thread.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_from_task(struct task_struct *p)
+{
+	return pipeline_thread_from_task(p);
+}
+
+/**
+ * @fn void xnthread_test_cancel(void)
+ * @brief Introduce a thread cancellation point.
+ *
+ * Terminates the current thread if a cancellation request is pending
+ * for it, i.e. if xnthread_cancel() was called.
+ *
+ * @coretags{mode-unrestricted}
+ */
+static inline void xnthread_test_cancel(void)
+{
+	struct xnthread *curr = xnthread_current();
+
+	if (curr && xnthread_test_info(curr, XNCANCELD))
+		__xnthread_test_cancel(curr);
+}
+
+static inline
+void xnthread_complete_wait(struct xnthread_wait_context *wc)
+{
+	wc->posted = 1;
+}
+
+static inline
+int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
+{
+	return wc->posted;
+}
+
+#ifdef CONFIG_XENO_ARCH_FPU
+void xnthread_switch_fpu(struct xnsched *sched);
+#else
+static inline void xnthread_switch_fpu(struct xnsched *sched) { }
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+void xnthread_deregister(struct xnthread *thread);
+
+char *xnthread_format_status(unsigned long status,
+			     char *buf, int size);
+
+pid_t xnthread_host_pid(struct xnthread *thread);
+
+int xnthread_set_clock(struct xnthread *thread,
+		       struct xnclock *newclock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread,
+			       xnticks_t ns);
+
+xnticks_t xnthread_get_period(struct xnthread *thread);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc);
+
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param);
+
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr);
+
+int xnthread_set_mode(int clrmask,
+		      int setmask);
+
+void xnthread_suspend(struct xnthread *thread,
+		      int mask,
+		      xnticks_t timeout,
+		      xntmode_t timeout_mode,
+		      struct xnsynch *wchan);
+
+void xnthread_resume(struct xnthread *thread,
+		     int mask);
+
+int xnthread_unblock(struct xnthread *thread);
+
+int xnthread_set_periodic(struct xnthread *thread,
+			  xnticks_t idate,
+			  xntmode_t timeout_mode,
+			  xnticks_t period);
+
+int xnthread_wait_period(unsigned long *overruns_r);
+
+int xnthread_set_slice(struct xnthread *thread,
+		       xnticks_t quantum);
+
+void xnthread_cancel(struct xnthread *thread);
+
+int xnthread_join(struct xnthread *thread, bool uninterruptible);
+
+int xnthread_harden(void);
+
+void xnthread_relax(int notify, int reason);
+
+void __xnthread_kick(struct xnthread *thread);
+
+void xnthread_kick(struct xnthread *thread);
+
+void __xnthread_demote(struct xnthread *thread);
+
+void xnthread_demote(struct xnthread *thread);
+
+void __xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_pin_initial(struct xnthread *thread);
+
+void xnthread_call_mayday(struct xnthread *thread, int reason);
+
+static inline void xnthread_get_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK|XNDEBUG))
+		curr->res_count++;
+}
+
+static inline int xnthread_put_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK) ||
+	    IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
+		if (unlikely(curr->res_count == 0)) {
+			if (xnthread_test_state(curr, XNWARN))
+				xnthread_signal(curr, SIGDEBUG,
+						SIGDEBUG_RESCNT_IMBALANCE);
+			return -EPERM;
+		}
+		curr->res_count--;
+	}
+
+	return 0;
+}
+
+static inline void xnthread_commit_ceiling(struct xnthread *curr)
+{
+	if (curr->u_window->pp_pending)
+		xnsynch_commit_ceiling(curr);
+}
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread,
+			      struct xnsched *sched);
+#else
+
+static inline void xnthread_migrate_passive(struct xnthread *thread,
+					    struct xnsched *sched)
+{ }
+
+#endif
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param);
+
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param);
+
+int xnthread_killall(int grace, int mask);
+
+void __xnthread_propagate_schedparam(struct xnthread *curr);
+
+static inline void xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	if (xnthread_test_info(curr, XNSCHEDP))
+		__xnthread_propagate_schedparam(curr);
+}
+
+extern struct xnthread_personality xenomai_personality;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/select.h	2022-03-21 12:58:31.535868035 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/lock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Efixo <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SELECT_H
+#define _COBALT_KERNEL_SELECT_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_select
+ * @{
+ */
+
+#define XNSELECT_READ      0
+#define XNSELECT_WRITE     1
+#define XNSELECT_EXCEPT    2
+#define XNSELECT_MAX_TYPES 3
+
+struct xnselector {
+	struct xnsynch synchbase;
+	struct fds {
+		fd_set expected;
+		fd_set pending;
+	} fds [XNSELECT_MAX_TYPES];
+	struct list_head destroy_link;
+	struct list_head bindings; /* only used by xnselector_destroy */
+};
+
+#define __NFDBITS__	(8 * sizeof(unsigned long))
+#define __FDSET_LONGS__	(__FD_SETSIZE/__NFDBITS__)
+#define	__FDELT__(d)	((d) / __NFDBITS__)
+#define	__FDMASK__(d)	(1UL << ((d) % __NFDBITS__))
+
+static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+static inline void __FD_ZERO__(__kernel_fd_set *__p)
+{
+	unsigned long *__tmp = __p->fds_bits;
+	int __i;
+
+	__i = __FDSET_LONGS__;
+	while (__i) {
+		__i--;
+		*__tmp = 0;
+		__tmp++;
+	}
+}
+
+struct xnselect {
+	struct list_head bindings;
+};
+
+#define DECLARE_XNSELECT(name) struct xnselect name
+
+struct xnselect_binding {
+	struct xnselector *selector;
+	struct xnselect *fd;
+	unsigned int type;
+	unsigned int bit_index;
+	struct list_head link;  /* link in selected fds list. */
+	struct list_head slink; /* link in selector list */
+};
+
+void xnselect_init(struct xnselect *select_block);
+
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned int type,
+		  unsigned int bit_index,
+		  unsigned int state);
+
+int __xnselect_signal(struct xnselect *select_block, unsigned int state);
+
+/**
+ * Signal a file descriptor state change.
+ *
+ * @param select_block pointer to an @a xnselect structure representing the file
+ * descriptor whose state changed;
+ * @param state new value of the state.
+ *
+ * @retval 1 if rescheduling is needed;
+ * @retval 0 otherwise.
+ */
+static inline int
+xnselect_signal(struct xnselect *select_block, unsigned int state)
+{
+	if (!list_empty(&select_block->bindings))
+		return __xnselect_signal(select_block, state);
+
+	return 0;
+}
+
+void xnselect_destroy(struct xnselect *select_block);
+
+int xnselector_init(struct xnselector *selector);
+
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode);
+
+void xnselector_destroy(struct xnselector *selector);
+
+int xnselect_mount(void);
+
+int xnselect_umount(void);
+
+/** @} */
+
+#endif /* _COBALT_KERNEL_SELECT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/lock.h	2022-03-21 12:58:31.528868103 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/heap.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2008,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LOCK_H
+#define _COBALT_KERNEL_LOCK_H
+
+#include <pipeline/lock.h>
+#include <linux/percpu.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/pipeline.h>
+
+/**
+ * @addtogroup cobalt_core_lock
+ *
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+	const char *file;
+	const char *function;
+	unsigned int line;
+	int cpu;
+	unsigned long long spin_time;
+	unsigned long long lock_date;
+};
+
+struct xnlockinfo {
+	unsigned long long spin_time;
+	unsigned long long lock_time;
+	const char *file;
+	const char *function;
+	unsigned int line;
+};
+
+#define XNARCH_LOCK_UNLOCKED (struct xnlock) {	\
+	~0,					\
+	__ARCH_SPIN_LOCK_UNLOCKED,		\
+	NULL,					\
+	NULL,					\
+	0,					\
+	-1,					\
+	0LL,					\
+	0LL,					\
+}
+
+#define XNLOCK_DBG_CONTEXT		, __FILE__, __LINE__, __FUNCTION__
+#define XNLOCK_DBG_CONTEXT_ARGS					\
+	, const char *file, int line, const char *function
+#define XNLOCK_DBG_PASS_CONTEXT		, file, line, function
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start);
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit);
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+			 unsigned long long *start,
+			 const char *file, int line,
+			 const char *function);
+int xnlock_dbg_release(struct xnlock *lock,
+			 const char *file, int line,
+			 const char *function);
+
+DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+};
+
+#define XNARCH_LOCK_UNLOCKED			\
+	(struct xnlock) {			\
+		~0,				\
+		__ARCH_SPIN_LOCK_UNLOCKED,	\
+	}
+
+#define XNLOCK_DBG_CONTEXT
+#define XNLOCK_DBG_CONTEXT_ARGS
+#define XNLOCK_DBG_PASS_CONTEXT
+
+static inline
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+}
+
+static inline
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit)
+{
+}
+
+static inline void
+xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+		    unsigned long long *start)
+{
+}
+
+static inline int xnlock_dbg_release(struct xnlock *lock)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+
+#define xnlock_get(lock)		__xnlock_get(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_put(lock)		__xnlock_put(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_get_irqsave(lock,x) \
+	((x) = __xnlock_get_irqsave(lock  XNLOCK_DBG_CONTEXT))
+#define xnlock_put_irqrestore(lock,x) \
+	__xnlock_put_irqrestore(lock,x  XNLOCK_DBG_CONTEXT)
+#define xnlock_clear_irqoff(lock)	xnlock_put_irqrestore(lock, 1)
+#define xnlock_clear_irqon(lock)	xnlock_put_irqrestore(lock, 0)
+
+static inline void xnlock_init (struct xnlock *lock)
+{
+	*lock = XNARCH_LOCK_UNLOCKED;
+}
+
+#define DECLARE_XNLOCK(lock)		struct xnlock lock
+#define DECLARE_EXTERN_XNLOCK(lock)	extern struct xnlock lock
+#define DEFINE_XNLOCK(lock)		struct xnlock lock = XNARCH_LOCK_UNLOCKED
+#define DEFINE_PRIVATE_XNLOCK(lock)	static DEFINE_XNLOCK(lock)
+
+static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	int cpu = raw_smp_processor_id();
+	unsigned long long start;
+
+	if (lock->owner == cpu)
+		return 2;
+
+	xnlock_dbg_prepare_acquire(&start);
+
+	arch_spin_lock(&lock->alock);
+	lock->owner = cpu;
+
+	xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT))
+		return;
+
+	lock->owner = ~0U;
+	arch_spin_unlock(&lock->alock);
+}
+
+#ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+#define ___xnlock_get ____xnlock_get
+#define ___xnlock_put ____xnlock_put
+#else /* out of line xnlock */
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+#endif /* out of line xnlock */
+
+static inline spl_t
+__xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	unsigned long flags;
+
+	splhigh(flags);
+
+	if (__locking_active__)
+		flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return flags;
+}
+
+static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags
+					   /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	/* Only release the lock if we didn't take it recursively. */
+	if (__locking_active__ && !(flags & 2))
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	splexit(flags & 1);
+}
+
+static inline int xnlock_is_owner(struct xnlock *lock)
+{
+	if (__locking_active__)
+		return lock->owner == raw_smp_processor_id();
+
+	return 1;
+}
+
+static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+}
+
+#undef __locking_active__
+
+#else /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+#define xnlock_init(lock)		do { } while(0)
+#define xnlock_get(lock)		do { } while(0)
+#define xnlock_put(lock)		do { } while(0)
+#define xnlock_get_irqsave(lock,x)	splhigh(x)
+#define xnlock_put_irqrestore(lock,x)	splexit(x)
+#define xnlock_clear_irqoff(lock)	splmax()
+#define xnlock_clear_irqon(lock)	splnone()
+#define xnlock_is_owner(lock)		1
+
+#define DECLARE_XNLOCK(lock)
+#define DECLARE_EXTERN_XNLOCK(lock)
+#define DEFINE_XNLOCK(lock)
+#define DEFINE_PRIVATE_XNLOCK(lock)
+
+#endif /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+DECLARE_EXTERN_XNLOCK(nklock);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_LOCK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/heap.h	2022-03-21 12:58:31.521868171 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_HEAP_H
+#define _COBALT_KERNEL_HEAP_H
+
+#include <linux/string.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/heap.h>
+
+/**
+ * @addtogroup cobalt_core_heap
+ * @{
+ */
+
+#define XNHEAP_PAGE_SHIFT	9 /* 2^9 => 512 bytes */
+#define XNHEAP_PAGE_SIZE	(1UL << XNHEAP_PAGE_SHIFT)
+#define XNHEAP_PAGE_MASK	(~(XNHEAP_PAGE_SIZE - 1))
+#define XNHEAP_MIN_LOG2		4 /* 16 bytes */
+/*
+ * Use bucketed memory for sizes between 2^XNHEAP_MIN_LOG2 and
+ * 2^(XNHEAP_PAGE_SHIFT-1).
+ */
+#define XNHEAP_MAX_BUCKETS	(XNHEAP_PAGE_SHIFT - XNHEAP_MIN_LOG2)
+#define XNHEAP_MIN_ALIGN	(1U << XNHEAP_MIN_LOG2)
+/* Maximum size of a heap (4Gb - PAGE_SIZE). */
+#define XNHEAP_MAX_HEAPSZ	(4294967295U - PAGE_SIZE + 1)
+/* Bits we need for encoding a page # */
+#define XNHEAP_PGENT_BITS      (32 - XNHEAP_PAGE_SHIFT)
+/* Each page is represented by a page map entry. */
+#define XNHEAP_PGMAP_BYTES	sizeof(struct xnheap_pgentry)
+
+struct xnheap_pgentry {
+	/* Linkage in bucket list. */
+	unsigned int prev : XNHEAP_PGENT_BITS;
+	unsigned int next : XNHEAP_PGENT_BITS;
+	/*  page_list or log2. */
+	unsigned int type : 6;
+	/*
+	 * We hold either a spatial map of busy blocks within the page
+	 * for bucketed memory (up to 32 blocks per page), or the
+	 * overall size of the multi-page block if entry.type ==
+	 * page_list.
+	 */
+	union {
+		u32 map;
+		u32 bsize;
+	};
+};
+
+/*
+ * A range descriptor is stored at the beginning of the first page of
+ * a range of free pages. xnheap_range.size is nrpages *
+ * XNHEAP_PAGE_SIZE. Ranges are indexed by address and size in
+ * rbtrees.
+ */
+struct xnheap_range {
+	struct rb_node addr_node;
+	struct rb_node size_node;
+	size_t size;
+};
+
+struct xnheap {
+	void *membase;
+	struct rb_root addr_tree;
+	struct rb_root size_tree;
+	struct xnheap_pgentry *pagemap;
+	size_t usable_size;
+	size_t used_size;
+	u32 buckets[XNHEAP_MAX_BUCKETS];
+	char name[XNOBJECT_NAME_LEN];
+	DECLARE_XNLOCK(lock);
+	struct list_head next;
+};
+
+extern struct xnheap cobalt_heap;
+
+#define xnmalloc(size)     xnheap_alloc(&cobalt_heap, size)
+#define xnfree(ptr)        xnheap_free(&cobalt_heap, ptr)
+
+static inline void *xnheap_get_membase(const struct xnheap *heap)
+{
+	return heap->membase;
+}
+
+static inline
+size_t xnheap_get_size(const struct xnheap *heap)
+{
+	return heap->usable_size;
+}
+
+static inline
+size_t xnheap_get_used(const struct xnheap *heap)
+{
+	return heap->used_size;
+}
+
+static inline
+size_t xnheap_get_free(const struct xnheap *heap)
+{
+	return heap->usable_size - heap->used_size;
+}
+
+int xnheap_init(struct xnheap *heap,
+		void *membase, size_t size);
+
+void xnheap_destroy(struct xnheap *heap);
+
+void *xnheap_alloc(struct xnheap *heap, size_t size);
+
+void xnheap_free(struct xnheap *heap, void *block);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block);
+
+void xnheap_set_name(struct xnheap *heap,
+		     const char *name, ...);
+
+void *xnheap_vmalloc(size_t size);
+
+void xnheap_vfree(void *p);
+
+static inline void *xnheap_zalloc(struct xnheap *heap, size_t size)
+{
+	void *p;
+
+	p = xnheap_alloc(heap, size);
+	if (p)
+		memset(p, 0, size);
+
+	return p;
+}
+
+static inline char *xnstrdup(const char *s)
+{
+	char *p;
+
+	p = xnmalloc(strlen(s) + 1);
+	if (p == NULL)
+		return NULL;
+
+	return strcpy(p, s);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+void xnheap_init_proc(void);
+void xnheap_cleanup_proc(void);
+#else /* !CONFIG_XENO_OPT_VFILE */
+static inline void xnheap_init_proc(void) { }
+static inline void xnheap_cleanup_proc(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_HEAP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/trace.h	2022-03-21 12:58:31.513868249 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_TRACE_H
+#define _COBALT_KERNEL_TRACE_H
+
+#include <pipeline/trace.h>
+
+#endif /* !_COBALT_KERNEL_TRACE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/clock.h	2022-03-21 12:58:31.506868318 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/list.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006,2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_CLOCK_H
+#define _COBALT_KERNEL_CLOCK_H
+
+#include <pipeline/pipeline.h>
+#include <pipeline/clock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_clock
+ * @{
+ */
+
+struct xnsched;
+struct xntimerdata;
+struct __kernel_timex;
+
+struct xnclock_gravity {
+	unsigned long irq;
+	unsigned long kernel;
+	unsigned long user;
+};
+
+struct xnclock {
+	/** (ns) */
+	xnsticks_t wallclock_offset;
+	/** (ns) */
+	xnticks_t resolution;
+	/** (raw clock ticks). */
+	struct xnclock_gravity gravity;
+	/** Clock name. */
+	const char *name;
+	struct {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+		xnticks_t (*read_raw)(struct xnclock *clock);
+		xnticks_t (*read_monotonic)(struct xnclock *clock);
+		int (*set_time)(struct xnclock *clock,
+				const struct timespec64 *ts);
+		xnsticks_t (*ns_to_ticks)(struct xnclock *clock,
+					  xnsticks_t ns);
+		xnsticks_t (*ticks_to_ns)(struct xnclock *clock,
+					  xnsticks_t ticks);
+		xnsticks_t (*ticks_to_ns_rounded)(struct xnclock *clock,
+						  xnsticks_t ticks);
+		void (*program_local_shot)(struct xnclock *clock,
+					   struct xnsched *sched);
+		void (*program_remote_shot)(struct xnclock *clock,
+					    struct xnsched *sched);
+#endif
+		int (*adjust_time)(struct xnclock *clock,
+				   struct __kernel_timex *tx);
+		int (*set_gravity)(struct xnclock *clock,
+				   const struct xnclock_gravity *p);
+		void (*reset_gravity)(struct xnclock *clock);
+#ifdef CONFIG_XENO_OPT_VFILE
+		void (*print_status)(struct xnclock *clock,
+				     struct xnvfile_regular_iterator *it);
+#endif
+	} ops;
+	/* Private section. */
+	struct xntimerdata *timerdata;
+	int id;
+#ifdef CONFIG_SMP
+	/** Possible CPU affinity of clock beat. */
+	cpumask_t affinity;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	struct xnvfile_snapshot timer_vfile;
+	struct xnvfile_rev_tag timer_revtag;
+	struct list_head timerq;
+	int nrtimers;
+#endif /* CONFIG_XENO_OPT_STATS */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_regular vfile;
+#endif
+};
+
+struct xnclock_ratelimit_state {
+	xnticks_t interval;
+	xnticks_t begin;
+	int burst;
+	int printed;
+	int missed;
+};
+
+extern struct xnclock nkclock;
+
+int xnclock_register(struct xnclock *clock,
+		     const cpumask_t *affinity);
+
+void xnclock_deregister(struct xnclock *clock);
+
+void xnclock_tick(struct xnclock *clock);
+
+void xnclock_core_local_shot(struct xnsched *sched);
+
+void xnclock_core_remote_shot(struct xnsched *sched);
+
+xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns);
+
+xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks);
+
+xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks);
+
+xnticks_t xnclock_core_read_monotonic(void);
+
+static inline xnticks_t xnclock_core_read_raw(void)
+{
+	return pipeline_read_cycle_counter();
+}
+
+/* We use the Linux defaults */
+#define XN_RATELIMIT_INTERVAL	5000000000LL
+#define XN_RATELIMIT_BURST	10
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func);
+
+#define xnclock_ratelimit()	({					\
+	static struct xnclock_ratelimit_state __state = {		\
+		.interval	= XN_RATELIMIT_INTERVAL,		\
+		.burst		= XN_RATELIMIT_BURST,			\
+	};								\
+	__xnclock_ratelimit(&__state, __func__);			\
+})
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	if (likely(clock == &nkclock))
+		xnclock_core_local_shot(sched);
+	else if (clock->ops.program_local_shot)
+		clock->ops.program_local_shot(clock, sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	if (likely(clock == &nkclock))
+		xnclock_core_remote_shot(sched);
+	else if (clock->ops.program_remote_shot)
+		clock->ops.program_remote_shot(clock, sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_raw();
+
+	return clock->ops.read_raw(clock);
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ns_to_ticks(ns);
+
+	return clock->ops.ns_to_ticks(clock, ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns(ticks);
+
+	return clock->ops.ticks_to_ns(clock, ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns_rounded(ticks);
+
+	return clock->ops.ticks_to_ns_rounded(clock, ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_monotonic();
+
+	return clock->ops.read_monotonic(clock);
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	if (likely(clock == &nkclock))
+		return -EINVAL;
+
+	return clock->ops.set_time(clock, ts);
+}
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	xnclock_core_local_shot(sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	xnclock_core_remote_shot(sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	return xnclock_core_read_raw();
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	return xnclock_core_ns_to_ticks(ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns(ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns_rounded(ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	return xnclock_core_read_monotonic();
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	/*
+	 * There is no way to change the core clock's idea of time.
+	 */
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline int xnclock_adjust_time(struct xnclock *clock,
+				      struct __kernel_timex *tx)
+{
+	if (clock->ops.adjust_time == NULL)
+		return -EOPNOTSUPP;
+
+	return clock->ops.adjust_time(clock, tx);
+}
+
+static inline xnticks_t xnclock_get_offset(struct xnclock *clock)
+{
+	return clock->wallclock_offset;
+}
+
+static inline xnticks_t xnclock_get_resolution(struct xnclock *clock)
+{
+	return clock->resolution; /* ns */
+}
+
+static inline void xnclock_set_resolution(struct xnclock *clock,
+					  xnticks_t resolution)
+{
+	clock->resolution = resolution; /* ns */
+}
+
+static inline int xnclock_set_gravity(struct xnclock *clock,
+				      const struct xnclock_gravity *gravity)
+{
+	if (clock->ops.set_gravity)
+		return clock->ops.set_gravity(clock, gravity);
+
+	return -EINVAL;
+}
+
+static inline void xnclock_reset_gravity(struct xnclock *clock)
+{
+	if (clock->ops.reset_gravity)
+		clock->ops.reset_gravity(clock);
+}
+
+#define xnclock_get_gravity(__clock, __type)  ((__clock)->gravity.__type)
+
+static inline xnticks_t xnclock_read_realtime(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return pipeline_read_wallclock();
+	/*
+	 * Return an adjusted value of the monotonic time with the
+	 * translated system wallclock offset.
+	 */
+	return xnclock_read_monotonic(clock) + xnclock_get_offset(clock);
+}
+
+void xnclock_apply_offset(struct xnclock *clock,
+			  xnsticks_t delta_ns);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns);
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+void xnclock_init_proc(void);
+
+void xnclock_cleanup_proc(void);
+
+static inline void xnclock_print_status(struct xnclock *clock,
+					struct xnvfile_regular_iterator *it)
+{
+	if (clock->ops.print_status)
+		clock->ops.print_status(clock, it);
+}
+
+#else
+static inline void xnclock_init_proc(void) { }
+static inline void xnclock_cleanup_proc(void) { }
+#endif
+
+int xnclock_init(void);
+
+void xnclock_cleanup(void);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_CLOCK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/list.h	2022-03-21 12:58:31.498868396 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/linux/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LIST_H
+#define _COBALT_KERNEL_LIST_H
+
+#include <linux/list.h>
+
+#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop)	\
+do {										\
+	typeof(*__new) *__pos;							\
+	if (list_empty(__head))							\
+		list_add(&(__new)->__member_next, __head);		 	\
+	else {									\
+		list_for_each_entry_reverse(__pos, __head, __member_next) {	\
+			if ((__new)->__member_pri __relop __pos->__member_pri)	\
+				break;						\
+		}								\
+		list_add(&(__new)->__member_next, &__pos->__member_next); 	\
+	}									\
+} while (0)
+
+#define list_add_priff(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <=)
+
+#define list_add_prilf(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <)
+
+#define list_get_entry(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del(&__item->__member);				\
+	  __item;						\
+  })
+
+#define list_get_entry_init(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del_init(&__item->__member);			\
+	  __item;						\
+  })
+
+#ifndef list_next_entry
+#define list_next_entry(__item, __member)			\
+	list_entry((__item)->__member.next, typeof(*(__item)), __member)
+#endif
+
+#endif /* !_COBALT_KERNEL_LIST_H_ */
+++ linux-patched/include/linux/xenomai/wrappers.h	2022-03-21 12:58:28.942893320 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_LINUX_WRAPPERS_H
+#define _COBALT_LINUX_WRAPPERS_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
+#include <linux/signal.h>
+typedef siginfo_t kernel_siginfo_t;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#else
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <uapi/linux/sched/types.h>
+#endif
+
+#include <pipeline/wrappers.h>
+
+#endif /* !_COBALT_LINUX_WRAPPERS_H */
+++ linux-patched/include/asm-generic/xenomai/wrappers.h	2022-03-21 12:58:28.937893369 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_WRAPPERS_H
+
+#include <linux/xenomai/wrappers.h>
+
+#define COBALT_BACKPORT(__sym) __cobalt_backport_ ##__sym
+
+/*
+ * To keep the #ifdefery as readable as possible, please:
+ *
+ * - keep the conditional structure flat, no nesting (e.g. do not fold
+ *   the pre-3.11 conditions into the pre-3.14 ones).
+ * - group all wrappers for a single kernel revision.
+ * - list conditional blocks in order of kernel release, latest first
+ * - identify the first kernel release for which the wrapper should
+ *   be defined, instead of testing the existence of a preprocessor
+ *   symbol, so that obsolete wrappers can be spotted.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#define raw_copy_to_user(__to, __from, __n)	__copy_to_user_inatomic(__to, __from, __n)
+#define raw_copy_from_user(__to, __from, __n)	__copy_from_user_inatomic(__to, __from, __n)
+#define raw_put_user(__from, __to)		__put_user_inatomic(__from, __to)
+#define raw_get_user(__to, __from)		__get_user_inatomic(__to, __from)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
+#define in_ia32_syscall() (current_thread_info()->status & TS_COMPAT)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+#define cobalt_gpiochip_dev(__gc)	((__gc)->dev)
+#else
+#define cobalt_gpiochip_dev(__gc)	((__gc)->parent)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
+#define cobalt_get_restart_block(p)	(&task_thread_info(p)->restart_block)
+#else
+#define cobalt_get_restart_block(p)	(&(p)->restart_block)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
+#define user_msghdr msghdr
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+#include <linux/netdevice.h>
+
+#undef alloc_netdev
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+ 
+#include <linux/trace_seq.h>
+
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+	return s->buffer + s->len;
+}
+
+#endif /* < 3.17 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#define smp_mb__before_atomic()  smp_mb()
+#define smp_mb__after_atomic()   smp_mb()
+#endif /* < 3.16 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
+#define raw_cpu_ptr(v)	__this_cpu_ptr(v)
+#endif /* < 3.15 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#include <linux/pci.h>
+
+#ifdef CONFIG_PCI
+#define pci_enable_msix_range COBALT_BACKPORT(pci_enable_msix_range)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec);
+#else /* !CONFIG_PCI_MSI */
+static inline
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec)
+{
+	return -ENOSYS;
+}
+#endif /* !CONFIG_PCI_MSI */
+#endif /* CONFIG_PCI */
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#include <linux/dma-mapping.h>
+#include <linux/hwmon.h>
+
+#define dma_set_mask_and_coherent COBALT_BACKPORT(dma_set_mask_and_coherent)
+static inline
+int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	int rc = dma_set_mask(dev, mask);
+	if (rc == 0)
+		dma_set_coherent_mask(dev, mask);
+	return rc;
+}
+
+#ifdef CONFIG_HWMON
+#define hwmon_device_register_with_groups \
+	COBALT_BACKPORT(hwmon_device_register_with_groups)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+
+#define devm_hwmon_device_register_with_groups \
+	COBALT_BACKPORT(devm_hwmon_device_register_with_groups)
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+#endif /* !CONFIG_HWMON */
+
+#define reinit_completion(__x)	INIT_COMPLETION(*(__x))
+
+#endif /* < 3.13 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
+#define DEVICE_ATTR_RW(_name)	__ATTR_RW(_name)
+#define DEVICE_ATTR_RO(_name)	__ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name)	__ATTR_WO(_name)
+#endif /* < 3.11 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
+#error "Xenomai/cobalt requires Linux kernel 3.10 or above"
+#endif /* < 3.10 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)
+#define __kernel_timex		timex
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#define old_timex32		compat_timex
+#define SO_RCVTIMEO_OLD		SO_RCVTIMEO
+#define SO_SNDTIMEO_OLD		SO_SNDTIMEO
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+#define mmiowb()		do { } while (0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#define __kernel_old_timeval	timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define mmap_read_lock(__mm)	down_read(&mm->mmap_sem)
+#define mmap_read_unlock(__mm)	up_read(&mm->mmap_sem)
+#define mmap_write_lock(__mm)	down_write(&mm->mmap_sem)
+#define mmap_write_unlock(__mm)	up_write(&mm->mmap_sem)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \
+	struct file_operations __name = {			    \
+		.open = (__open),				    \
+		.release = (__release),				    \
+		.read = (__read),				    \
+		.write = (__write),				    \
+		.llseek = seq_lseek,				    \
+}
+#else
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write)	\
+	struct proc_ops __name = {					\
+		.proc_open = (__open),					\
+		.proc_release = (__release),				\
+		.proc_read = (__read),					\
+		.proc_write = (__write),				\
+		.proc_lseek = seq_lseek,				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)
+#define pci_aer_clear_nonfatal_status	pci_cleanup_aer_uncorrect_error_status
+#define old_timespec32    compat_timespec
+#define old_itimerspec32  compat_itimerspec
+#define old_timeval32     compat_timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	({								\
+		loff_t ___file_size;					\
+		int __ret;						\
+		__ret = kernel_read_file(__file, __buf, &___file_size,	\
+				__buf_size, __id);			\
+		(*__file_size) = ___file_size;				\
+		__ret;							\
+	})
+#else
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	kernel_read_file(__file, 0, __buf, __buf_size, __file_size, __id)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#if __has_attribute(__fallthrough__)
+# define fallthrough			__attribute__((__fallthrough__))
+#else
+# define fallthrough			do {} while (0)  /* fallthrough */
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
+#define IRQ_WORK_INIT(_func) (struct irq_work) {	\
+	.flags = ATOMIC_INIT(0),			\
+	.func = (_func),				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)
+#define close_fd(__ufd)	__close_fd(current->files, __ufd)
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_WRAPPERS_H */
+++ linux-patched/include/asm-generic/xenomai/syscall.h	2022-03-21 12:58:28.933893408 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/pci_ids.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL_H
+#define _COBALT_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/xenomai/machine.h>
+#include <cobalt/uapi/asm-generic/syscall.h>
+#include <cobalt/uapi/kernel/types.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+#define access_rok(addr, size)	access_ok((addr), (size))
+#define access_wok(addr, size)	access_ok((addr), (size))
+#else
+#define access_rok(addr, size)	access_ok(VERIFY_READ, (addr), (size))
+#define access_wok(addr, size)	access_ok(VERIFY_WRITE, (addr), (size))
+#endif
+
+#define __xn_copy_from_user(dstP, srcP, n)	raw_copy_from_user(dstP, srcP, n)
+#define __xn_copy_to_user(dstP, srcP, n)	raw_copy_to_user(dstP, srcP, n)
+#define __xn_put_user(src, dstP)		__put_user(src, dstP)
+#define __xn_get_user(dst, srcP)		__get_user(dst, srcP)
+#define __xn_strncpy_from_user(dstP, srcP, n)	strncpy_from_user(dstP, srcP, n)
+
+static inline int cobalt_copy_from_user(void *dst, const void __user *src,
+					size_t size)
+{
+	size_t remaining = size;
+
+	if (likely(access_rok(src, size)))
+		remaining = __xn_copy_from_user(dst, src, size);
+
+	if (unlikely(remaining > 0)) {
+		memset(dst + (size - remaining), 0, remaining);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static inline int cobalt_copy_to_user(void __user *dst, const void *src,
+				      size_t size)
+{
+	if (unlikely(!access_wok(dst, size) ||
+	    __xn_copy_to_user(dst, src, size)))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int cobalt_strncpy_from_user(char *dst, const char __user *src,
+					   size_t count)
+{
+	if (unlikely(!access_rok(src, 1)))
+		return -EFAULT;
+
+	return __xn_strncpy_from_user(dst, src, count);
+}
+
+
+/*
+ * NOTE: those copy helpers won't work in compat mode: use
+ * sys32_get_*(), sys32_put_*() instead.
+ */
+
+static inline int cobalt_get_u_timespec(struct timespec64 *dst,
+			const struct __user_old_timespec __user *src)
+{
+	struct __user_old_timespec u_ts;
+	int ret;
+
+	ret = cobalt_copy_from_user(&u_ts, src, sizeof(u_ts));
+	if (ret)
+		return ret;
+
+	dst->tv_sec = u_ts.tv_sec;
+	dst->tv_nsec = u_ts.tv_nsec;
+
+	return 0;
+}
+
+static inline int cobalt_put_u_timespec(
+	struct __user_old_timespec __user *dst,
+	const struct timespec64 *src)
+{
+	struct __user_old_timespec u_ts;
+	int ret;
+
+	u_ts.tv_sec = src->tv_sec;
+	u_ts.tv_nsec = src->tv_nsec;
+
+	ret = cobalt_copy_to_user(dst, &u_ts, sizeof(*dst));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static inline int cobalt_get_u_itimerspec(struct itimerspec64 *dst,
+			const struct __user_old_itimerspec __user *src)
+{
+	struct __user_old_itimerspec u_its;
+	int ret;
+
+	ret = cobalt_copy_from_user(&u_its, src, sizeof(u_its));
+	if (ret)
+		return ret;
+
+	dst->it_interval.tv_sec = u_its.it_interval.tv_sec;
+	dst->it_interval.tv_nsec = u_its.it_interval.tv_nsec;
+	dst->it_value.tv_sec = u_its.it_value.tv_sec;
+	dst->it_value.tv_nsec = u_its.it_value.tv_nsec;
+
+	return 0;
+}
+
+static inline int cobalt_put_u_itimerspec(
+	struct __user_old_itimerspec __user *dst,
+	const struct itimerspec64 *src)
+{
+	struct __user_old_itimerspec u_its;
+
+	u_its.it_interval.tv_sec = src->it_interval.tv_sec;
+	u_its.it_interval.tv_nsec = src->it_interval.tv_nsec;
+	u_its.it_value.tv_sec = src->it_value.tv_sec;
+	u_its.it_value.tv_nsec = src->it_value.tv_nsec;
+
+	return cobalt_copy_to_user(dst, &u_its, sizeof(*dst));
+}
+
+/* 32bit syscall emulation */
+#define __COBALT_COMPAT_BIT	0x1
+/* 32bit syscall emulation - extended form */
+#define __COBALT_COMPATX_BIT	0x2
+
+#endif /* !_COBALT_ASM_GENERIC_SYSCALL_H */
+++ linux-patched/include/asm-generic/xenomai/pci_ids.h	2022-03-21 12:58:28.930893437 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/ipipe/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_PCI_IDS_H
+#define _COBALT_ASM_GENERIC_PCI_IDS_H
+
+#include <linux/pci_ids.h>
+
+/* SMI */
+#ifndef PCI_DEVICE_ID_INTEL_ESB2_0
+#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH7_0
+#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH7_1
+#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH8_4
+#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH9_1
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH9_5
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH10_1
+#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_PCH_LPC_MIN
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
+#endif
+
+/* RTCAN */
+#ifndef PCI_VENDOR_ID_ESDGMBH
+#define PCI_VENDOR_ID_ESDGMBH 0x12fe
+#endif
+#ifndef PCI_DEVICE_ID_PLX_9030
+#define PCI_DEVICE_ID_PLX_9030 0x9030
+#endif
+#ifndef PCI_DEVICE_ID_PLX_9056
+#define PCI_DEVICE_ID_PLX_9056 0x9056
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_PCI_IDS_H */
+++ linux-patched/include/asm-generic/xenomai/ipipe/thread.h	2022-03-21 12:58:28.926893476 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_IPIPE_THREAD_H
+#define _COBALT_ASM_GENERIC_IPIPE_THREAD_H
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+struct task_struct;
+
+struct xntcb {
+	struct task_struct *host_task;
+	struct thread_struct *tsp;
+	struct mm_struct *mm;
+	struct mm_struct *active_mm;
+	struct thread_struct ts;
+	struct thread_info *tip;
+#ifdef CONFIG_XENO_ARCH_FPU
+	struct task_struct *user_fpu_owner;
+#endif
+};
+
+#endif /* !_COBALT_ASM_GENERIC_IPIPE_THREAD_H */
+++ linux-patched/include/asm-generic/xenomai/machine.h	2022-03-21 12:58:28.923893505 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/dovetail/thread.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Copyright &copy; 2012 Philippe Gerum.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_MACHINE_H
+#define _COBALT_ASM_GENERIC_MACHINE_H
+
+#include <pipeline/machine.h>
+
+#ifndef xnarch_cache_aliasing
+#define xnarch_cache_aliasing()  0
+#endif
+
+#endif /* !_COBALT_ASM_GENERIC_MACHINE_H */
+++ linux-patched/include/asm-generic/xenomai/dovetail/thread.h	2022-03-21 12:58:28.919893545 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/syscall32.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H
+#define _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnarchtcb {
+	struct dovetail_altsched_context altsched;
+};
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->altsched.task;
+}
+
+#endif /* !_COBALT_ASM_GENERIC_DOVETAIL_THREAD_H */
+++ linux-patched/include/asm-generic/xenomai/syscall32.h	2022-03-21 12:58:28.916893574 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/trace/events/cobalt-posix.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL32_H
+#define _COBALT_ASM_GENERIC_SYSCALL32_H
+
+#define __COBALT_CALL32_INITHAND(__handler)
+
+#define __COBALT_CALL32_INITMODE(__mode)
+
+#define __COBALT_CALL32_ENTRY(__name, __handler)
+
+#define __COBALT_CALL_COMPAT(__reg)	0
+
+#endif /* !_COBALT_ASM_GENERIC_SYSCALL32_H */
+++ linux-patched/include/trace/events/cobalt-posix.h	2022-03-21 12:58:28.910893632 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/trace/events/cobalt-rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_posix
+
+#if !defined(_TRACE_COBALT_POSIX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_POSIX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <xenomai/posix/cond.h>
+#include <xenomai/posix/mqueue.h>
+#include <xenomai/posix/event.h>
+
+#define __timespec_fields(__name)				\
+	__field(time64_t, tv_sec_##__name)			\
+	__field(long, tv_nsec_##__name)
+
+#define __assign_timespec(__to, __from)				\
+	do {							\
+		__entry->tv_sec_##__to = (__from)->tv_sec;	\
+		__entry->tv_nsec_##__to = (__from)->tv_nsec;	\
+	} while (0)
+
+#define __timespec_args(__name)					\
+	(long long)__entry->tv_sec_##__name, __entry->tv_nsec_##__name
+
+#ifdef CONFIG_IA32_EMULATION
+#define __sc_compat(__name)	, { sc_cobalt_##__name + __COBALT_IA32_BASE, "compat-" #__name }
+#else
+#define __sc_compat(__name)
+#endif
+
+#define __cobalt_symbolic_syscall(__name)				\
+	{ sc_cobalt_##__name, #__name }					\
+	__sc_compat(__name)						\
+
+#define __cobalt_syscall_name(__nr)					\
+	__print_symbolic((__nr),					\
+		__cobalt_symbolic_syscall(bind),			\
+		__cobalt_symbolic_syscall(thread_create),		\
+		__cobalt_symbolic_syscall(thread_getpid),		\
+		__cobalt_symbolic_syscall(thread_setmode),		\
+		__cobalt_symbolic_syscall(thread_setname),		\
+		__cobalt_symbolic_syscall(thread_join),			\
+		__cobalt_symbolic_syscall(thread_kill),			\
+		__cobalt_symbolic_syscall(thread_setschedparam_ex),	\
+		__cobalt_symbolic_syscall(thread_getschedparam_ex),	\
+		__cobalt_symbolic_syscall(thread_setschedprio),		\
+		__cobalt_symbolic_syscall(thread_getstat),		\
+		__cobalt_symbolic_syscall(sem_init),			\
+		__cobalt_symbolic_syscall(sem_destroy),			\
+		__cobalt_symbolic_syscall(sem_post),			\
+		__cobalt_symbolic_syscall(sem_wait),			\
+		__cobalt_symbolic_syscall(sem_trywait),			\
+		__cobalt_symbolic_syscall(sem_getvalue),		\
+		__cobalt_symbolic_syscall(sem_open),			\
+		__cobalt_symbolic_syscall(sem_close),			\
+		__cobalt_symbolic_syscall(sem_unlink),			\
+		__cobalt_symbolic_syscall(sem_timedwait),		\
+		__cobalt_symbolic_syscall(sem_inquire),			\
+		__cobalt_symbolic_syscall(sem_broadcast_np),		\
+		__cobalt_symbolic_syscall(clock_getres),		\
+		__cobalt_symbolic_syscall(clock_gettime),		\
+		__cobalt_symbolic_syscall(clock_settime),		\
+		__cobalt_symbolic_syscall(clock_nanosleep),		\
+		__cobalt_symbolic_syscall(mutex_init),			\
+		__cobalt_symbolic_syscall(mutex_check_init),		\
+		__cobalt_symbolic_syscall(mutex_destroy),		\
+		__cobalt_symbolic_syscall(mutex_lock),			\
+		__cobalt_symbolic_syscall(mutex_timedlock),		\
+		__cobalt_symbolic_syscall(mutex_trylock),		\
+		__cobalt_symbolic_syscall(mutex_unlock),		\
+		__cobalt_symbolic_syscall(cond_init),			\
+		__cobalt_symbolic_syscall(cond_destroy),		\
+		__cobalt_symbolic_syscall(cond_wait_prologue),		\
+		__cobalt_symbolic_syscall(cond_wait_epilogue),		\
+		__cobalt_symbolic_syscall(mq_open),			\
+		__cobalt_symbolic_syscall(mq_close),			\
+		__cobalt_symbolic_syscall(mq_unlink),			\
+		__cobalt_symbolic_syscall(mq_getattr),			\
+		__cobalt_symbolic_syscall(mq_timedsend),		\
+		__cobalt_symbolic_syscall(mq_timedreceive),		\
+		__cobalt_symbolic_syscall(mq_notify),			\
+		__cobalt_symbolic_syscall(sched_minprio),		\
+		__cobalt_symbolic_syscall(sched_maxprio),		\
+		__cobalt_symbolic_syscall(sched_weightprio),		\
+		__cobalt_symbolic_syscall(sched_yield),			\
+		__cobalt_symbolic_syscall(sched_setscheduler_ex),	\
+		__cobalt_symbolic_syscall(sched_getscheduler_ex),	\
+		__cobalt_symbolic_syscall(sched_setconfig_np),		\
+		__cobalt_symbolic_syscall(sched_getconfig_np),		\
+		__cobalt_symbolic_syscall(timer_create),		\
+		__cobalt_symbolic_syscall(timer_delete),		\
+		__cobalt_symbolic_syscall(timer_settime),		\
+		__cobalt_symbolic_syscall(timer_gettime),		\
+		__cobalt_symbolic_syscall(timer_getoverrun),		\
+		__cobalt_symbolic_syscall(timerfd_create),		\
+		__cobalt_symbolic_syscall(timerfd_settime),		\
+		__cobalt_symbolic_syscall(timerfd_gettime),		\
+		__cobalt_symbolic_syscall(sigwait),			\
+		__cobalt_symbolic_syscall(sigwaitinfo),			\
+		__cobalt_symbolic_syscall(sigtimedwait),		\
+		__cobalt_symbolic_syscall(sigpending),			\
+		__cobalt_symbolic_syscall(kill),			\
+		__cobalt_symbolic_syscall(sigqueue),			\
+		__cobalt_symbolic_syscall(monitor_init),		\
+		__cobalt_symbolic_syscall(monitor_destroy),		\
+		__cobalt_symbolic_syscall(monitor_enter),		\
+		__cobalt_symbolic_syscall(monitor_wait),		\
+		__cobalt_symbolic_syscall(monitor_sync),		\
+		__cobalt_symbolic_syscall(monitor_exit),		\
+		__cobalt_symbolic_syscall(event_init),			\
+		__cobalt_symbolic_syscall(event_wait),			\
+		__cobalt_symbolic_syscall(event_sync),			\
+		__cobalt_symbolic_syscall(event_destroy),		\
+		__cobalt_symbolic_syscall(event_inquire),		\
+		__cobalt_symbolic_syscall(open),			\
+		__cobalt_symbolic_syscall(socket),			\
+		__cobalt_symbolic_syscall(close),			\
+		__cobalt_symbolic_syscall(ioctl),			\
+		__cobalt_symbolic_syscall(read),			\
+		__cobalt_symbolic_syscall(write),			\
+		__cobalt_symbolic_syscall(recvmsg),			\
+		__cobalt_symbolic_syscall(sendmsg),			\
+		__cobalt_symbolic_syscall(mmap),			\
+		__cobalt_symbolic_syscall(select),			\
+		__cobalt_symbolic_syscall(fcntl),			\
+		__cobalt_symbolic_syscall(migrate),			\
+		__cobalt_symbolic_syscall(archcall),			\
+		__cobalt_symbolic_syscall(trace),			\
+		__cobalt_symbolic_syscall(corectl),			\
+		__cobalt_symbolic_syscall(get_current),			\
+		__cobalt_symbolic_syscall(backtrace),			\
+		__cobalt_symbolic_syscall(serialdbg),			\
+		__cobalt_symbolic_syscall(extend),			\
+		__cobalt_symbolic_syscall(ftrace_puts),			\
+		__cobalt_symbolic_syscall(recvmmsg),			\
+		__cobalt_symbolic_syscall(sendmmsg),			\
+		__cobalt_symbolic_syscall(clock_adjtime),		\
+		__cobalt_symbolic_syscall(sem_timedwait64),		\
+		__cobalt_symbolic_syscall(clock_gettime64),		\
+		__cobalt_symbolic_syscall(clock_settime64),		\
+		__cobalt_symbolic_syscall(clock_nanosleep64),		\
+		__cobalt_symbolic_syscall(clock_getres64),		\
+		__cobalt_symbolic_syscall(clock_adjtime64),		\
+		__cobalt_symbolic_syscall(mutex_timedlock64),		\
+		__cobalt_symbolic_syscall(mq_timedsend64),  		\
+		__cobalt_symbolic_syscall(mq_timedreceive64),		\
+		__cobalt_symbolic_syscall(sigtimedwait64),		\
+		__cobalt_symbolic_syscall(monitor_wait64),		\
+		__cobalt_symbolic_syscall(event_wait64),		\
+		__cobalt_symbolic_syscall(recvmmsg64))
+
+DECLARE_EVENT_CLASS(cobalt_syscall_entry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, nr)
+	),
+
+	TP_fast_assign(
+		__entry->nr = nr;
+	),
+
+	TP_printk("syscall=%s", __cobalt_syscall_name(__entry->nr))
+);
+
+DECLARE_EVENT_CLASS(cobalt_syscall_exit,
+	TP_PROTO(long result),
+	TP_ARGS(result),
+
+	TP_STRUCT__entry(
+		__field(long, result)
+	),
+
+	TP_fast_assign(
+		__entry->result = result;
+	),
+
+	TP_printk("result=%ld", __entry->result)
+);
+
+#define cobalt_print_sched_policy(__policy)			\
+	__print_symbolic(__policy,				\
+			 {SCHED_NORMAL, "normal"},		\
+			 {SCHED_FIFO, "fifo"},			\
+			 {SCHED_RR, "rr"},			\
+			 {SCHED_TP, "tp"},			\
+			 {SCHED_QUOTA, "quota"},		\
+			 {SCHED_SPORADIC, "sporadic"},		\
+			 {SCHED_COBALT, "cobalt"},		\
+			 {SCHED_WEAK, "weak"})
+
+const char *cobalt_trace_parse_sched_params(struct trace_seq *, int,
+					    struct sched_param_ex *);
+
+#define __parse_sched_params(policy, params)			\
+	cobalt_trace_parse_sched_params(p, policy,		\
+					(struct sched_param_ex *)(params))
+
+DECLARE_EVENT_CLASS(cobalt_posix_schedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, policy)
+		__dynamic_array(char, param_ex, sizeof(struct sched_param_ex))
+	),
+
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->policy = policy;
+		memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex));
+	),
+
+	TP_printk("pth=%p policy=%s param={ %s }",
+		  (void *)__entry->pth,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __parse_sched_params(__entry->policy,
+				       __get_dynamic_array(param_ex))
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_scheduler,
+	TP_PROTO(pid_t pid, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pid, policy, param_ex),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(int, policy)
+		__dynamic_array(char, param_ex, sizeof(struct sched_param_ex))
+	),
+
+	TP_fast_assign(
+		__entry->pid = pid;
+		__entry->policy = policy;
+		memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex));
+	),
+
+	TP_printk("pid=%d policy=%s param={ %s }",
+		  __entry->pid,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __parse_sched_params(__entry->policy,
+				       __get_dynamic_array(param_ex))
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_void,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy),
+	TP_STRUCT__entry(
+		__field(int, dummy)
+	),
+	TP_fast_assign(
+		(void)dummy;
+	),
+	TP_printk("%s", "")
+);
+
+DEFINE_EVENT(cobalt_syscall_entry, cobalt_head_sysentry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr)
+);
+
+DEFINE_EVENT(cobalt_syscall_exit, cobalt_head_sysexit,
+	TP_PROTO(long result),
+	TP_ARGS(result)
+);
+
+DEFINE_EVENT(cobalt_syscall_entry, cobalt_root_sysentry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr)
+);
+
+DEFINE_EVENT(cobalt_syscall_exit, cobalt_root_sysexit,
+	TP_PROTO(long result),
+	TP_ARGS(result)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_create,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_setschedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_getschedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+TRACE_EVENT(cobalt_pthread_setschedprio,
+	TP_PROTO(unsigned long pth, int prio),
+	TP_ARGS(pth, prio),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->prio = prio;
+	),
+	TP_printk("pth=%p prio=%d", (void *)__entry->pth, __entry->prio)
+);
+
+#define cobalt_print_thread_mode(__mode)			\
+	__print_flags(__mode, "|",				\
+		      {PTHREAD_WARNSW, "warnsw"},		\
+		      {PTHREAD_LOCK_SCHED, "lock"},		\
+		      {PTHREAD_DISABLE_LOCKBREAK, "nolockbreak"})
+
+TRACE_EVENT(cobalt_pthread_setmode,
+	TP_PROTO(int clrmask, int setmask),
+	TP_ARGS(clrmask, setmask),
+	TP_STRUCT__entry(
+		__field(int, clrmask)
+		__field(int, setmask)
+	),
+	TP_fast_assign(
+		__entry->clrmask = clrmask;
+		__entry->setmask = setmask;
+	),
+	TP_printk("clrmask=%#x(%s) setmask=%#x(%s)",
+		  __entry->clrmask, cobalt_print_thread_mode(__entry->clrmask),
+		  __entry->setmask, cobalt_print_thread_mode(__entry->setmask))
+);
+
+TRACE_EVENT(cobalt_pthread_setname,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p name=%s", (void *)__entry->pth, __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_pid,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid),
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+	),
+	TP_printk("pid=%d", __entry->pid)
+);
+
+DEFINE_EVENT(cobalt_posix_pid, cobalt_pthread_stat,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid)
+);
+
+TRACE_EVENT(cobalt_pthread_kill,
+	TP_PROTO(unsigned long pth, int sig),
+	TP_ARGS(pth, sig),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, sig)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->sig = sig;
+	),
+	TP_printk("pth=%p sig=%d", (void *)__entry->pth, __entry->sig)
+);
+
+TRACE_EVENT(cobalt_pthread_join,
+	TP_PROTO(unsigned long pth),
+	TP_ARGS(pth),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+	),
+	TP_printk("pth=%p", (void *)__entry->pth)
+);
+
+TRACE_EVENT(cobalt_pthread_pid,
+	TP_PROTO(unsigned long pth),
+	TP_ARGS(pth),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+	),
+	TP_printk("pth=%p", (void *)__entry->pth)
+);
+
+TRACE_EVENT(cobalt_pthread_extend,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p +personality=%s", (void *)__entry->pth, __get_str(name))
+);
+
+TRACE_EVENT(cobalt_pthread_restrict,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p -personality=%s", (void *)__entry->pth, __get_str(name))
+);
+
+DEFINE_EVENT(cobalt_void, cobalt_pthread_yield,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy)
+);
+
+TRACE_EVENT(cobalt_sched_setconfig,
+	TP_PROTO(int cpu, int policy, size_t len),
+	TP_ARGS(cpu, policy, len),
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(int, policy)
+		__field(size_t, len)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->policy = policy;
+		__entry->len = len;
+	),
+	TP_printk("cpu=%d policy=%d(%s) len=%zu",
+		  __entry->cpu, __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->len)
+);
+
+TRACE_EVENT(cobalt_sched_get_config,
+	TP_PROTO(int cpu, int policy, size_t rlen),
+	TP_ARGS(cpu, policy, rlen),
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(int, policy)
+		__field(ssize_t, rlen)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->policy = policy;
+		__entry->rlen = rlen;
+	),
+	TP_printk("cpu=%d policy=%d(%s) rlen=%Zd",
+		  __entry->cpu, __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->rlen)
+);
+
+DEFINE_EVENT(cobalt_posix_scheduler, cobalt_sched_setscheduler,
+	TP_PROTO(pid_t pid, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pid, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_pid, cobalt_sched_getscheduler,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_prio_bound,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio),
+	TP_STRUCT__entry(
+		__field(int, policy)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->policy = policy;
+		__entry->prio = prio;
+	),
+	TP_printk("policy=%d(%s) prio=%d",
+		  __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->prio)
+);
+
+DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_min_prio,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio)
+);
+
+DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_max_prio,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_sem,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle),
+	TP_STRUCT__entry(
+		__field(xnhandle_t, handle)
+	),
+	TP_fast_assign(
+		__entry->handle = handle;
+	),
+	TP_printk("sem=%#x", __entry->handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_wait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_trywait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_timedwait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_post,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_destroy,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_broadcast,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_inquire,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+TRACE_EVENT(cobalt_psem_getvalue,
+	TP_PROTO(xnhandle_t handle, int value),
+	TP_ARGS(handle, value),
+	TP_STRUCT__entry(
+		__field(xnhandle_t, handle)
+		__field(int, value)
+	),
+	TP_fast_assign(
+		__entry->handle = handle;
+		__entry->value = value;
+	),
+	TP_printk("sem=%#x value=%d", __entry->handle, __entry->value)
+);
+
+#define cobalt_print_sem_flags(__flags)				\
+  	__print_flags(__flags, "|",				\
+			 {SEM_FIFO, "fifo"},			\
+			 {SEM_PULSE, "pulse"},			\
+			 {SEM_PSHARED, "pshared"},		\
+			 {SEM_REPORT, "report"},		\
+			 {SEM_WARNDEL, "warndel"},		\
+			 {SEM_RAWCLOCK, "rawclock"},		\
+			 {SEM_NOBUSYDEL, "nobusydel"})
+
+TRACE_EVENT(cobalt_psem_init,
+	TP_PROTO(const char *name, xnhandle_t handle,
+		 int flags, unsigned int value),
+	TP_ARGS(name, handle, flags, value),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(xnhandle_t, handle)
+		__field(int, flags)
+		__field(unsigned int, value)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->handle = handle;
+		__entry->flags = flags;
+		__entry->value = value;
+	),
+	TP_printk("sem=%#x(%s) flags=%#x(%s) value=%u",
+		  __entry->handle,
+		  __get_str(name),
+		  __entry->flags,
+		  cobalt_print_sem_flags(__entry->flags),
+		  __entry->value)
+);
+
+TRACE_EVENT(cobalt_psem_init_failed,
+	TP_PROTO(const char *name, int flags, unsigned int value, int status),
+	TP_ARGS(name, flags, value, status),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, flags)
+		__field(unsigned int, value)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->flags = flags;
+		__entry->value = value;
+		__entry->status = status;
+	),
+	TP_printk("name=%s flags=%#x(%s) value=%u error=%d",
+		  __get_str(name),
+		  __entry->flags,
+		  cobalt_print_sem_flags(__entry->flags),
+		  __entry->value, __entry->status)
+);
+
+#define cobalt_print_oflags(__flags)		\
+	__print_flags(__flags,  "|", 		\
+		      {O_RDONLY, "rdonly"},	\
+		      {O_WRONLY, "wronly"},	\
+		      {O_RDWR, "rdwr"},		\
+		      {O_CREAT, "creat"},	\
+		      {O_EXCL, "excl"},		\
+		      {O_DIRECT, "direct"},	\
+		      {O_NONBLOCK, "nonblock"},	\
+		      {O_TRUNC, "trunc"})
+
+TRACE_EVENT(cobalt_psem_open,
+	TP_PROTO(const char *name, xnhandle_t handle,
+		 int oflags, mode_t mode, unsigned int value),
+	TP_ARGS(name, handle, oflags, mode, value),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(xnhandle_t, handle)
+		__field(int, oflags)
+		__field(mode_t, mode)
+		__field(unsigned int, value)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->handle = handle;
+		__entry->oflags = oflags;
+		if (oflags & O_CREAT) {
+			__entry->mode = mode;
+			__entry->value = value;
+		} else {
+			__entry->mode = 0;
+			__entry->value = 0;
+		}
+	),
+	TP_printk("named_sem=%#x=(%s) oflags=%#x(%s) mode=%o value=%u",
+		  __entry->handle, __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode, __entry->value)
+);
+
+TRACE_EVENT(cobalt_psem_open_failed,
+	TP_PROTO(const char *name, int oflags, mode_t mode,
+		 unsigned int value, int status),
+	TP_ARGS(name, oflags, mode, value, status),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, oflags)
+		__field(mode_t, mode)
+		__field(unsigned int, value)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->oflags = oflags;
+		__entry->status = status;
+		if (oflags & O_CREAT) {
+			__entry->mode = mode;
+			__entry->value = value;
+		} else {
+			__entry->mode = 0;
+			__entry->value = 0;
+		}
+	),
+	TP_printk("named_sem=%s oflags=%#x(%s) mode=%o value=%u error=%d",
+		  __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode, __entry->value, __entry->status)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_close,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+TRACE_EVENT(cobalt_psem_unlink,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+	TP_printk("name=%s", __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(cobalt_clock_timespec,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *val),
+	TP_ARGS(clk_id, val),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__timespec_fields(val)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__assign_timespec(val, val);
+	),
+
+	TP_printk("clock_id=%d timeval=(%lld.%09ld)",
+		  __entry->clk_id,
+		  __timespec_args(val)
+	)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_getres,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *res),
+	TP_ARGS(clk_id, res)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_gettime,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *time),
+	TP_ARGS(clk_id, time)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_settime,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *time),
+	TP_ARGS(clk_id, time)
+);
+
+TRACE_EVENT(cobalt_clock_adjtime,
+	TP_PROTO(clockid_t clk_id, struct __kernel_timex *tx),
+	TP_ARGS(clk_id, tx),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__field(struct __kernel_timex *, tx)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__entry->tx = tx;
+	),
+
+	TP_printk("clock_id=%d timex=%p",
+		  __entry->clk_id,
+		  __entry->tx
+	)
+);
+
+#define cobalt_print_timer_flags(__flags)			\
+	__print_flags(__flags, "|",				\
+		      {TIMER_ABSTIME, "TIMER_ABSTIME"})
+
+TRACE_EVENT(cobalt_clock_nanosleep,
+	TP_PROTO(clockid_t clk_id, int flags, const struct timespec64 *time),
+	TP_ARGS(clk_id, flags, time),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__field(int, flags)
+		__timespec_fields(time)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__entry->flags = flags;
+		__assign_timespec(time, time);
+	),
+
+	TP_printk("clock_id=%d flags=%#x(%s) rqt=(%lld.%09ld)",
+		  __entry->clk_id,
+		  __entry->flags, cobalt_print_timer_flags(__entry->flags),
+		  __timespec_args(time)
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_clock_ident,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(clockid_t, clk_id)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->clk_id = clk_id;
+	),
+	TP_printk("name=%s, id=%#x", __get_str(name), __entry->clk_id)
+);
+
+DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_register,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id)
+);
+
+DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_deregister,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id)
+);
+
+#define cobalt_print_clock(__clk_id)					\
+	__print_symbolic(__clk_id,					\
+			 {CLOCK_MONOTONIC, "CLOCK_MONOTONIC"},		\
+			 {CLOCK_MONOTONIC_RAW, "CLOCK_MONOTONIC_RAW"},	\
+			 {CLOCK_REALTIME, "CLOCK_REALTIME"})
+
+TRACE_EVENT(cobalt_cond_init,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_condattr *attr),
+	TP_ARGS(u_cnd, attr),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(clockid_t, clk_id)
+		__field(int, pshared)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->clk_id = attr->clock;
+		__entry->pshared = attr->pshared;
+	),
+	TP_printk("cond=%p attr={ .clock=%s, .pshared=%d }",
+		  __entry->u_cnd,
+		  cobalt_print_clock(__entry->clk_id),
+		  __entry->pshared)
+);
+
+TRACE_EVENT(cobalt_cond_destroy,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd),
+	TP_ARGS(u_cnd),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+	),
+	TP_printk("cond=%p", __entry->u_cnd)
+);
+
+TRACE_EVENT(cobalt_cond_timedwait,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_mutex_shadow __user *u_mx,
+		 const struct timespec64 *timeout),
+	TP_ARGS(u_cnd, u_mx, timeout),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(const struct cobalt_mutex_shadow __user *, u_mx)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->u_mx = u_mx;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("cond=%p, mutex=%p, timeout=(%lld.%09ld)",
+		  __entry->u_cnd, __entry->u_mx, __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_cond_wait,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_mutex_shadow __user *u_mx),
+	TP_ARGS(u_cnd, u_mx),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(const struct cobalt_mutex_shadow __user *, u_mx)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->u_mx = u_mx;
+	),
+	TP_printk("cond=%p, mutex=%p",
+		  __entry->u_cnd, __entry->u_mx)
+);
+
+TRACE_EVENT(cobalt_mq_open,
+	TP_PROTO(const char *name, int oflags, mode_t mode),
+	TP_ARGS(name, oflags, mode),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, oflags)
+		__field(mode_t, mode)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->oflags = oflags;
+		__entry->mode = (oflags & O_CREAT) ? mode : 0;
+	),
+
+	TP_printk("name=%s oflags=%#x(%s) mode=%o",
+		  __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode)
+);
+
+TRACE_EVENT(cobalt_mq_notify,
+	TP_PROTO(mqd_t mqd, const struct sigevent *sev),
+	TP_ARGS(mqd, sev),
+
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(int, signo)
+	),
+
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->signo = sev && sev->sigev_notify != SIGEV_NONE ?
+			sev->sigev_signo : 0;
+	),
+
+	TP_printk("mqd=%d signo=%d",
+		  __entry->mqd, __entry->signo)
+);
+
+TRACE_EVENT(cobalt_mq_close,
+	TP_PROTO(mqd_t mqd),
+	TP_ARGS(mqd),
+
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+	),
+
+	TP_fast_assign(
+		__entry->mqd = mqd;
+	),
+
+	TP_printk("mqd=%d", __entry->mqd)
+);
+
+TRACE_EVENT(cobalt_mq_unlink,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("name=%s", __get_str(name))
+);
+
+TRACE_EVENT(cobalt_mq_send,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len,
+		 unsigned int prio),
+	TP_ARGS(mqd, u_buf, len, prio),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+		__field(unsigned int, prio)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+		__entry->prio = prio;
+	),
+	TP_printk("mqd=%d buf=%p len=%zu prio=%u",
+		  __entry->mqd, __entry->u_buf, __entry->len,
+		  __entry->prio)
+);
+
+TRACE_EVENT(cobalt_mq_timedreceive,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len,
+		 const struct timespec64 *timeout),
+	TP_ARGS(mqd, u_buf, len, timeout),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("mqd=%d buf=%p len=%zu timeout=(%lld.%09ld)",
+		  __entry->mqd, __entry->u_buf, __entry->len,
+		  __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_mq_receive,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len),
+	TP_ARGS(mqd, u_buf, len),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+	),
+	TP_printk("mqd=%d buf=%p len=%zu",
+		  __entry->mqd, __entry->u_buf, __entry->len)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_mqattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(long, flags)
+		__field(long, curmsgs)
+		__field(long, msgsize)
+		__field(long, maxmsg)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->flags = attr->mq_flags;
+		__entry->curmsgs = attr->mq_curmsgs;
+		__entry->msgsize = attr->mq_msgsize;
+		__entry->maxmsg = attr->mq_maxmsg;
+	),
+	TP_printk("mqd=%d flags=%#lx(%s) curmsgs=%ld msgsize=%ld maxmsg=%ld",
+		  __entry->mqd,
+		  __entry->flags, cobalt_print_oflags(__entry->flags),
+		  __entry->curmsgs,
+		  __entry->msgsize,
+		  __entry->maxmsg
+	)
+);
+
+DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_getattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr)
+);
+
+DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_setattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr)
+);
+
+#define cobalt_print_evflags(__flags)			\
+	__print_flags(__flags,  "|",			\
+		      {COBALT_EVENT_SHARED, "shared"},	\
+		      {COBALT_EVENT_PRIO, "prio"})
+
+TRACE_EVENT(cobalt_event_init,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long value, int flags),
+	TP_ARGS(u_event, value, flags),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, value)
+		__field(int, flags)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->value = value;
+		__entry->flags = flags;
+	),
+	TP_printk("event=%p value=%lu flags=%#x(%s)",
+		  __entry->u_event, __entry->value,
+		  __entry->flags, cobalt_print_evflags(__entry->flags))
+);
+
+#define cobalt_print_evmode(__mode)			\
+	__print_symbolic(__mode,			\
+			 {COBALT_EVENT_ANY, "any"},	\
+			 {COBALT_EVENT_ALL, "all"})
+
+TRACE_EVENT(cobalt_event_timedwait,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long bits, int mode,
+		 const struct timespec64 *timeout),
+	TP_ARGS(u_event, bits, mode, timeout),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, bits)
+		__field(int, mode)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->bits = bits;
+		__entry->mode = mode;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("event=%p bits=%#lx mode=%#x(%s) timeout=(%lld.%09ld)",
+		  __entry->u_event, __entry->bits, __entry->mode,
+		  cobalt_print_evmode(__entry->mode),
+		  __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_event_wait,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long bits, int mode),
+	TP_ARGS(u_event, bits, mode),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, bits)
+		__field(int, mode)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->bits = bits;
+		__entry->mode = mode;
+	),
+	TP_printk("event=%p bits=%#lx mode=%#x(%s)",
+		  __entry->u_event, __entry->bits, __entry->mode,
+		  cobalt_print_evmode(__entry->mode))
+);
+
+DECLARE_EVENT_CLASS(cobalt_event_ident,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+	),
+	TP_printk("event=%p", __entry->u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_destroy,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_sync,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_inquire,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+#endif /* _TRACE_COBALT_POSIX_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-posix
+#include <trace/define_trace.h>
+++ linux-patched/include/trace/events/cobalt-rtdm.h	2022-03-21 12:58:28.904893691 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/trace/events/cobalt-core.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_rtdm
+
+#if !defined(_TRACE_COBALT_RTDM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_RTDM_H
+
+#include <linux/tracepoint.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+
+struct rtdm_fd;
+struct rtdm_event;
+struct rtdm_sem;
+struct rtdm_mutex;
+struct xnthread;
+struct rtdm_device;
+struct rtdm_dev_context;
+struct _rtdm_mmap_request;
+
+DECLARE_EVENT_CLASS(fd_event,
+	TP_PROTO(struct rtdm_fd *fd, int ufd),
+	TP_ARGS(fd, ufd),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+	),
+
+	TP_fast_assign(
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+	),
+
+	TP_printk("device=%p fd=%d",
+		  __entry->dev, __entry->ufd)
+);
+
+DECLARE_EVENT_CLASS(fd_request,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, unsigned long arg),
+	TP_ARGS(task, fd, ufd, arg),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+		__field(unsigned long, arg)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+		__entry->arg = arg;
+	),
+
+	TP_printk("device=%p fd=%d arg=%#lx pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->arg,
+		  __entry->pid, __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(fd_request_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, int status),
+	TP_ARGS(task, fd, ufd, status),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev =
+			!IS_ERR(fd) ? rtdm_fd_to_context(fd)->device : NULL;
+		__entry->ufd = ufd;
+	),
+
+	TP_printk("device=%p fd=%d pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->pid, __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(task_op,
+	TP_PROTO(struct xnthread *task),
+	TP_ARGS(task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+	),
+
+	TP_printk("task %p(%s)", __entry->task, __get_str(task_name))
+);
+
+DECLARE_EVENT_CLASS(event_op,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_event *, ev)
+	),
+
+	TP_fast_assign(
+		__entry->ev = ev;
+	),
+
+	TP_printk("event=%p", __entry->ev)
+);
+
+DECLARE_EVENT_CLASS(sem_op,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_sem *, sem)
+	),
+
+	TP_fast_assign(
+		__entry->sem = sem;
+	),
+
+	TP_printk("sem=%p", __entry->sem)
+);
+
+DECLARE_EVENT_CLASS(mutex_op,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_mutex *, mutex)
+	),
+
+	TP_fast_assign(
+		__entry->mutex = mutex;
+	),
+
+	TP_printk("mutex=%p", __entry->mutex)
+);
+
+TRACE_EVENT(cobalt_device_register,
+	TP_PROTO(struct rtdm_device *dev),
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__string(device_name, dev->name)
+		__field(int, flags)
+		__field(int, class_id)
+		__field(int, subclass_id)
+		__field(int, profile_version)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dev;
+		__assign_str(device_name, dev->name);
+		__entry->flags = dev->driver->device_flags;
+		__entry->class_id = dev->driver->profile_info.class_id;
+		__entry->subclass_id = dev->driver->profile_info.subclass_id;
+		__entry->profile_version = dev->driver->profile_info.version;
+	),
+
+	TP_printk("%s device %s=%p flags=0x%x, class=%d.%d profile=%d",
+		  (__entry->flags & RTDM_DEVICE_TYPE_MASK)
+		  == RTDM_NAMED_DEVICE ? "named" : "protocol",
+		  __get_str(device_name), __entry->dev,
+		  __entry->flags, __entry->class_id, __entry->subclass_id,
+		  __entry->profile_version)
+);
+
+TRACE_EVENT(cobalt_device_unregister,
+	TP_PROTO(struct rtdm_device *dev),
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__string(device_name, dev->name)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dev;
+		__assign_str(device_name, dev->name);
+	),
+
+	TP_printk("device %s=%p",
+		  __get_str(device_name), __entry->dev)
+);
+
+DEFINE_EVENT(fd_event, cobalt_fd_created,
+	TP_PROTO(struct rtdm_fd *fd, int ufd),
+	TP_ARGS(fd, ufd)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_open,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long oflags),
+	TP_ARGS(task, fd, ufd, oflags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_close,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long lock_count),
+	TP_ARGS(task, fd, ufd, lock_count)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_socket,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long protocol_family),
+	TP_ARGS(task, fd, ufd, protocol_family)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_read,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long len),
+	TP_ARGS(task, fd, ufd, len)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_write,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long len),
+	TP_ARGS(task, fd, ufd, len)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_ioctl,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long request),
+	TP_ARGS(task, fd, ufd, request)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_sendmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_sendmmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_recvmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_recvmmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+#define cobalt_print_protbits(__prot)		\
+	__print_flags(__prot,  "|", 		\
+		      {PROT_EXEC, "exec"},	\
+		      {PROT_READ, "read"},	\
+		      {PROT_WRITE, "write"})
+
+#define cobalt_print_mapbits(__flags)		\
+	__print_flags(__flags,  "|", 		\
+		      {MAP_SHARED, "shared"},	\
+		      {MAP_PRIVATE, "private"},	\
+		      {MAP_ANONYMOUS, "anon"},	\
+		      {MAP_FIXED, "fixed"},	\
+		      {MAP_HUGETLB, "huge"},	\
+		      {MAP_NONBLOCK, "nonblock"},	\
+		      {MAP_NORESERVE, "noreserve"},	\
+		      {MAP_POPULATE, "populate"},	\
+		      {MAP_UNINITIALIZED, "uninit"})
+
+TRACE_EVENT(cobalt_fd_mmap,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, struct _rtdm_mmap_request *rma),
+        TP_ARGS(task, fd, ufd, rma),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+		__field(size_t, length)
+		__field(off_t, offset)
+		__field(int, prot)
+		__field(int, flags)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+		__entry->length = rma->length;
+		__entry->offset = rma->offset;
+		__entry->prot = rma->prot;
+		__entry->flags = rma->flags;
+	),
+
+	TP_printk("device=%p fd=%d area={ len:%zu, off:%Lu }"
+		  " prot=%#x(%s) flags=%#x(%s) pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->length,
+		  (unsigned long long)__entry->offset,
+		  __entry->prot, cobalt_print_protbits(__entry->prot),
+		  __entry->flags, cobalt_print_mapbits(__entry->flags),
+		  __entry->pid, __entry->comm)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_ioctl_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_read_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_write_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_recvmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_recvmmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_sendmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_sendmmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_mmap_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(task_op, cobalt_driver_task_join,
+	TP_PROTO(struct xnthread *task),
+	TP_ARGS(task)
+);
+
+TRACE_EVENT(cobalt_driver_event_init,
+	TP_PROTO(struct rtdm_event *ev, unsigned long pending),
+	TP_ARGS(ev, pending),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_event *, ev)
+		__field(unsigned long,	pending)
+	),
+
+	TP_fast_assign(
+		__entry->ev = ev;
+		__entry->pending = pending;
+	),
+
+	TP_printk("event=%p pending=%#lx",
+		  __entry->ev, __entry->pending)
+);
+
+TRACE_EVENT(cobalt_driver_event_wait,
+	TP_PROTO(struct rtdm_event *ev, struct xnthread *task),
+	TP_ARGS(ev, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_event *, ev)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->ev = ev;
+	),
+
+	TP_printk("event=%p task=%p(%s)",
+		  __entry->ev, __entry->task, __get_str(task_name))
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_signal,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_clear,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_pulse,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_destroy,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+TRACE_EVENT(cobalt_driver_sem_init,
+	TP_PROTO(struct rtdm_sem *sem, unsigned long value),
+	TP_ARGS(sem, value),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_sem *, sem)
+		__field(unsigned long, value)
+	),
+
+	TP_fast_assign(
+		__entry->sem = sem;
+		__entry->value = value;
+	),
+
+	TP_printk("sem=%p value=%lu",
+		  __entry->sem, __entry->value)
+);
+
+TRACE_EVENT(cobalt_driver_sem_wait,
+	TP_PROTO(struct rtdm_sem *sem, struct xnthread *task),
+	TP_ARGS(sem, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_sem *, sem)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->sem = sem;
+	),
+
+	TP_printk("sem=%p task=%p(%s)",
+		  __entry->sem, __entry->task, __get_str(task_name))
+);
+
+DEFINE_EVENT(sem_op, cobalt_driver_sem_up,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem)
+);
+
+DEFINE_EVENT(sem_op, cobalt_driver_sem_destroy,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_init,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_release,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_destroy,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+TRACE_EVENT(cobalt_driver_mutex_wait,
+	TP_PROTO(struct rtdm_mutex *mutex, struct xnthread *task),
+	TP_ARGS(mutex, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_mutex *, mutex)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->mutex = mutex;
+	),
+
+	TP_printk("mutex=%p task=%p(%s)",
+		  __entry->mutex, __entry->task, __get_str(task_name))
+);
+
+#endif /* _TRACE_COBALT_RTDM_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-rtdm
+#include <trace/define_trace.h>
+++ linux-patched/include/trace/events/cobalt-core.h	2022-03-21 12:58:28.896893769 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/init.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_core
+
+#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_CORE_H
+
+#include <linux/tracepoint.h>
+#include <linux/math64.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/uapi/kernel/types.h>
+
+struct xnsched;
+struct xnthread;
+struct xnsynch;
+struct xnsched_class;
+struct xnsched_quota_group;
+struct xnthread_init_attr;
+
+DECLARE_EVENT_CLASS(thread_event,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(unsigned long, state)
+		__field(unsigned long, info)
+	),
+
+	TP_fast_assign(
+		__entry->state = thread->state;
+		__entry->info = thread->info;
+		__entry->pid = xnthread_host_pid(thread);
+	),
+
+	TP_printk("pid=%d state=0x%lx info=0x%lx",
+		  __entry->pid, __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(curr_thread_event,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(unsigned long, state)
+		__field(unsigned long, info)
+	),
+
+	TP_fast_assign(
+		__entry->state = thread->state;
+		__entry->info = thread->info;
+	),
+
+	TP_printk("state=0x%lx info=0x%lx",
+		  __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(synch_wait_event,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch),
+
+	TP_STRUCT__entry(
+		__field(struct xnsynch *, synch)
+	),
+
+	TP_fast_assign(
+		__entry->synch = synch;
+	),
+
+	TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(synch_post_event,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch),
+
+	TP_STRUCT__entry(
+		__field(struct xnsynch *, synch)
+	),
+
+	TP_fast_assign(
+		__entry->synch = synch;
+	),
+
+	TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(irq_event,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, irq)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(clock_event,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, irq)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("clock_irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(timer_event,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+	),
+
+	TP_printk("timer=%p", __entry->timer)
+);
+
+DECLARE_EVENT_CLASS(registry_event,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr),
+
+	TP_STRUCT__entry(
+		__string(key, key ?: "(anon)")
+		__field(void *, addr)
+	),
+
+	TP_fast_assign(
+		__assign_str(key, key ?: "(anon)");
+		__entry->addr = addr;
+	),
+
+	TP_printk("key=%s, addr=%p", __get_str(key), __entry->addr)
+);
+
+TRACE_EVENT(cobalt_schedule,
+	TP_PROTO(struct xnsched *sched),
+	TP_ARGS(sched),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = sched->status;
+	),
+
+	TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_schedule_remote,
+	TP_PROTO(struct xnsched *sched),
+	TP_ARGS(sched),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = sched->status;
+	),
+
+	TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_switch_context,
+	TP_PROTO(struct xnthread *prev, struct xnthread *next),
+	TP_ARGS(prev, next),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, prev)
+		__string(prev_name, prev->name)
+		__field(pid_t, prev_pid)
+		__field(int, prev_prio)
+		__field(unsigned long, prev_state)
+		__field(struct xnthread *, next)
+		__string(next_name, next->name)
+		__field(pid_t, next_pid)
+		__field(int, next_prio)
+	),
+
+	TP_fast_assign(
+		__entry->prev = prev;
+		__assign_str(prev_name, prev->name);
+		__entry->prev_pid = xnthread_host_pid(prev);
+		__entry->prev_prio = xnthread_current_priority(prev);
+		__entry->prev_state = prev->state;
+		__entry->next = next;
+		__assign_str(next_name, next->name);
+		__entry->next_pid = xnthread_host_pid(next);
+		__entry->next_prio = xnthread_current_priority(next);
+	),
+
+	TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d",
+		  __get_str(prev_name), __entry->prev_pid,
+		  __entry->prev_prio, __entry->prev_state,
+		  __get_str(next_name), __entry->next_pid, __entry->next_prio)
+);
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+TRACE_EVENT(cobalt_schedquota_refill,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(int, dummy)
+	),
+
+	TP_fast_assign(
+		(void)dummy;
+	),
+
+	TP_printk("%s", "")
+);
+
+DECLARE_EVENT_CLASS(schedquota_group_event,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+	),
+
+	TP_printk("tgid=%d",
+		  __entry->tgid)
+);
+
+DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg)
+);
+
+DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg)
+);
+
+TRACE_EVENT(cobalt_schedquota_set_limit,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 int percent,
+		 int peak_percent),
+	TP_ARGS(tg, percent, peak_percent),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+		__field(int, percent)
+		__field(int, peak_percent)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+		__entry->percent = percent;
+		__entry->peak_percent = peak_percent;
+	),
+
+	TP_printk("tgid=%d percent=%d peak_percent=%d",
+		  __entry->tgid, __entry->percent, __entry->peak_percent)
+);
+
+DECLARE_EVENT_CLASS(schedquota_thread_event,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+	),
+
+	TP_printk("tgid=%d thread=%p pid=%d",
+		  __entry->tgid, __entry->thread, __entry->pid)
+);
+
+DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread)
+);
+
+DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread)
+);
+
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+TRACE_EVENT(cobalt_thread_init,
+	TP_PROTO(struct xnthread *thread,
+		 const struct xnthread_init_attr *attr,
+		 struct xnsched_class *sched_class),
+	TP_ARGS(thread, attr, sched_class),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__string(thread_name, thread->name)
+		__string(class_name, sched_class->name)
+		__field(unsigned long, flags)
+		__field(int, cprio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__assign_str(thread_name, thread->name);
+		__entry->flags = attr->flags;
+		__assign_str(class_name, sched_class->name);
+		__entry->cprio = thread->cprio;
+	),
+
+	TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d",
+		   __entry->thread, __get_str(thread_name), __entry->flags,
+		   __get_str(class_name), __entry->cprio)
+);
+
+TRACE_EVENT(cobalt_thread_suspend,
+	TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
+		 xntmode_t timeout_mode, struct xnsynch *wchan),
+	TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(unsigned long, mask)
+		__field(xnticks_t, timeout)
+		__field(xntmode_t, timeout_mode)
+		__field(struct xnsynch *, wchan)
+	),
+
+	TP_fast_assign(
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->mask = mask;
+		__entry->timeout = timeout;
+		__entry->timeout_mode = timeout_mode;
+		__entry->wchan = wchan;
+	),
+
+	TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
+		  __entry->pid, __entry->mask,
+		  __entry->timeout, __entry->timeout_mode, __entry->wchan)
+);
+
+TRACE_EVENT(cobalt_thread_resume,
+	TP_PROTO(struct xnthread *thread, unsigned long mask),
+	TP_ARGS(thread, mask),
+
+	TP_STRUCT__entry(
+		__string(name, thread->name)
+		__field(pid_t, pid)
+		__field(unsigned long, mask)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, thread->name);
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->mask = mask;
+	),
+
+	TP_printk("name=%s pid=%d mask=0x%lx",
+		  __get_str(name), __entry->pid, __entry->mask)
+);
+
+TRACE_EVENT(cobalt_thread_fault,
+	TP_PROTO(unsigned long ip, unsigned int type),
+	TP_ARGS(ip, type),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, ip)
+		__field(unsigned int, type)
+	),
+
+	TP_fast_assign(
+		__entry->ip = ip;
+		__entry->type = type;
+	),
+
+	TP_printk("ip=%#lx type=%#x",
+		  __entry->ip, __entry->type)
+);
+
+TRACE_EVENT(cobalt_thread_set_current_prio,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(int, cprio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->cprio = xnthread_current_priority(thread);
+	),
+
+	TP_printk("thread=%p pid=%d prio=%d",
+		  __entry->thread, __entry->pid, __entry->cprio)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_start,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_cancel,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_join,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_unblock,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_thread_migrate,
+	TP_PROTO(unsigned int cpu),
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("cpu=%u", __entry->cpu)
+);
+
+TRACE_EVENT(cobalt_thread_migrate_passive,
+	TP_PROTO(struct xnthread *thread, unsigned int cpu),
+	TP_ARGS(thread, cpu),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("thread=%p pid=%d cpu=%u",
+		  __entry->thread, __entry->pid, __entry->cpu)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+#define cobalt_print_relax_reason(reason)				\
+	__print_symbolic(reason,					\
+			 { SIGDEBUG_UNDEFINED,		"undefined" },	\
+			 { SIGDEBUG_MIGRATE_SIGNAL,	"signal" },	\
+			 { SIGDEBUG_MIGRATE_SYSCALL,	"syscall" },	\
+			 { SIGDEBUG_MIGRATE_FAULT,	"fault" })
+
+TRACE_EVENT(cobalt_shadow_gorelax,
+	TP_PROTO(int reason),
+	TP_ARGS(reason),
+
+	TP_STRUCT__entry(
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->reason = reason;
+	),
+
+	TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason))
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_shadow_map,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(int, prio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->prio = xnthread_base_priority(thread);
+	),
+
+	TP_printk("thread=%p pid=%d prio=%d",
+		  __entry->thread, __entry->pid, __entry->prio)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_lostage_request,
+        TP_PROTO(const char *type, struct task_struct *task),
+	TP_ARGS(type, task),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+		__field(const char *, type)
+	),
+
+	TP_fast_assign(
+		__entry->type = type;
+		__entry->pid = task_pid_nr(task);
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("request=%s pid=%d comm=%s",
+		  __entry->type, __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_wakeup,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+	),
+
+	TP_fast_assign(
+		__entry->pid = task_pid_nr(task);
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("pid=%d comm=%s",
+		  __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_signal,
+	TP_PROTO(struct task_struct *task, int sig),
+	TP_ARGS(task, sig),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+		__field(int, sig)
+	),
+
+	TP_fast_assign(
+		__entry->pid = task_pid_nr(task);
+		__entry->sig = sig;
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("pid=%d comm=%s sig=%d",
+		  __entry->pid, __entry->comm, __entry->sig)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_entry,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_exit,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_attach,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_detach,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_enable,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_disable,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_entry,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_exit,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_stop,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_expire,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer)
+);
+
+#define cobalt_print_timer_mode(mode)			\
+	__print_symbolic(mode,				\
+			 { XN_RELATIVE, "rel" },	\
+			 { XN_ABSOLUTE, "abs" },	\
+			 { XN_REALTIME, "rt" })
+
+TRACE_EVENT(cobalt_timer_start,
+	TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
+		 xntmode_t mode),
+	TP_ARGS(timer, value, interval, mode),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+#ifdef CONFIG_XENO_OPT_STATS
+		__string(name, timer->name)
+#endif
+		__field(xnticks_t, value)
+		__field(xnticks_t, interval)
+		__field(xntmode_t, mode)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+#ifdef CONFIG_XENO_OPT_STATS
+		__assign_str(name, timer->name);
+#endif
+		__entry->value = value;
+		__entry->interval = interval;
+		__entry->mode = mode;
+	),
+
+	TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
+		  __entry->timer,
+#ifdef CONFIG_XENO_OPT_STATS
+		  __get_str(name),
+#else
+		  "(anon)",
+#endif
+		  __entry->value, __entry->interval,
+		  cobalt_print_timer_mode(__entry->mode))
+);
+
+#ifdef CONFIG_SMP
+
+TRACE_EVENT(cobalt_timer_migrate,
+	TP_PROTO(struct xntimer *timer, unsigned int cpu),
+	TP_ARGS(timer, cpu),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("timer=%p cpu=%u",
+		  __entry->timer, __entry->cpu)
+);
+
+#endif /* CONFIG_SMP */
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_release,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_enter,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_remove,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_unlink,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+TRACE_EVENT(cobalt_tick_shot,
+	TP_PROTO(s64 delta),
+	TP_ARGS(delta),
+
+	TP_STRUCT__entry(
+		__field(u64, secs)
+		__field(u32, nsecs)
+		__field(s64, delta)
+	),
+
+	TP_fast_assign(
+		__entry->delta = delta;
+		__entry->secs = div_u64_rem(trace_clock_local() + delta,
+					    NSEC_PER_SEC, &__entry->nsecs);
+	),
+
+	TP_printk("next tick at %Lu.%06u (delay: %Ld us)",
+		  (unsigned long long)__entry->secs,
+		  __entry->nsecs / 1000, div_s64(__entry->delta, 1000))
+);
+
+TRACE_EVENT(cobalt_trace,
+	TP_PROTO(const char *msg),
+	TP_ARGS(msg),
+	TP_STRUCT__entry(
+		__string(msg, msg)
+	),
+	TP_fast_assign(
+		__assign_str(msg, msg);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(cobalt_trace_longval,
+	TP_PROTO(int id, u64 val),
+	TP_ARGS(id, val),
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(u64, val)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->val = val;
+	),
+	TP_printk("id=%#x, v=%llu", __entry->id, __entry->val)
+);
+
+TRACE_EVENT(cobalt_trace_pid,
+	TP_PROTO(pid_t pid, int prio),
+	TP_ARGS(pid, prio),
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+		__entry->prio = prio;
+	),
+	TP_printk("pid=%d, prio=%d", __entry->pid, __entry->prio)
+);
+
+TRACE_EVENT(cobalt_latpeak,
+	TP_PROTO(int latmax_ns),
+	TP_ARGS(latmax_ns),
+	TP_STRUCT__entry(
+		 __field(int, latmax_ns)
+	),
+	TP_fast_assign(
+		__entry->latmax_ns = latmax_ns;
+	),
+	TP_printk("** latency peak: %d.%.3d us **",
+		  __entry->latmax_ns / 1000,
+		  __entry->latmax_ns % 1000)
+);
+
+/* Basically cobalt_trace() + trigger point */
+TRACE_EVENT(cobalt_trigger,
+	TP_PROTO(const char *issuer),
+	TP_ARGS(issuer),
+	TP_STRUCT__entry(
+		__string(issuer, issuer)
+	),
+	TP_fast_assign(
+		__assign_str(issuer, issuer);
+	),
+	TP_printk("%s", __get_str(issuer))
+);
+
+#endif /* _TRACE_COBALT_CORE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-core
+#include <trace/define_trace.h>
+++ linux-patched/kernel/xenomai/pipeline/init.c	2022-03-21 12:58:29.144891350 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/sched.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/init.h>
+#include <pipeline/machine.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/assert.h>
+
+int __init pipeline_init(void)
+{
+	int ret;
+
+	if (cobalt_machine.init) {
+		ret = cobalt_machine.init();
+		if (ret)
+			return ret;
+	}
+
+	/* Enable the Xenomai out-of-band stage */
+	enable_oob_stage("Xenomai");
+
+	ret = xnclock_init();
+	if (ret)
+		goto fail_clock;
+
+	return 0;
+
+fail_clock:
+	if (cobalt_machine.cleanup)
+		cobalt_machine.cleanup();
+
+	return ret;
+}
+
+int __init pipeline_late_init(void)
+{
+	if (cobalt_machine.late_init)
+		return cobalt_machine.late_init();
+
+	return 0;
+}
+
+__init void pipeline_cleanup(void)
+{
+	/* Disable the Xenomai stage */
+	disable_oob_stage();
+
+	xnclock_cleanup();
+}
+++ linux-patched/kernel/xenomai/pipeline/sched.c	2022-03-21 12:58:29.141891380 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/intr.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2020 Philippe Gerum <rpm@xenomai.org>.
+ */
+
+#include <linux/cpuidle.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/sched.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+
+/* in-band stage, hard_irqs_disabled() */
+bool irq_cpuidle_control(struct cpuidle_device *dev,
+			struct cpuidle_state *state)
+{
+	/*
+	 * Deny entering sleep state if this entails stopping the
+	 * timer (i.e. C3STOP misfeature).
+	 */
+	if (state && (state->flags & CPUIDLE_FLAG_TIMER_STOP))
+		return false;
+
+	return true;
+}
+
+bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next,
+			bool leaving_inband)
+{
+	return dovetail_context_switch(&xnthread_archtcb(prev)->altsched,
+			&xnthread_archtcb(next)->altsched, leaving_inband);
+}
+
+void pipeline_init_shadow_tcb(struct xnthread *thread)
+{
+	/*
+	 * Initialize the alternate scheduling control block.
+	 */
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+
+	trace_cobalt_shadow_map(thread);
+}
+
+void pipeline_init_root_tcb(struct xnthread *thread)
+{
+	/*
+	 * Initialize the alternate scheduling control block.
+	 */
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+}
+
+int pipeline_leave_inband(void)
+{
+	return dovetail_leave_inband();
+}
+
+int pipeline_leave_oob_prepare(void)
+{
+	int suspmask = XNRELAX;
+	struct xnthread *curr = xnthread_current();
+
+	dovetail_leave_oob();
+	/*
+	 * If current is being debugged, record that it should migrate
+	 * back in case it resumes in userspace. If it resumes in
+	 * kernel space, i.e.  over a restarting syscall, the
+	 * associated hardening will clear XNCONTHI.
+	 */
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		xnthread_set_info(curr, XNCONTHI);
+		dovetail_request_ucall(current);
+		suspmask |= XNDBGSTOP;
+	}
+	return suspmask;
+}
+
+void pipeline_leave_oob_finish(void)
+{
+	dovetail_resume_inband();
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk)
+{
+	dovetail_send_mayday(tsk);
+}
+
+void pipeline_clear_mayday(void) /* May solely affect current. */
+{
+	clear_thread_flag(TIF_MAYDAY);
+}
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id)
+{
+	trace_cobalt_schedule_remote(xnsched_current());
+
+	/* Will reschedule from irq_exit_pipeline(). */
+
+	return IRQ_HANDLED;
+}
+++ linux-patched/kernel/xenomai/pipeline/intr.c	2022-03-21 12:58:29.137891419 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
+#include <linux/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/intr.h>
+
+void xnintr_host_tick(struct xnsched *sched) /* hard irqs off */
+{
+	sched->lflags &= ~XNHTICK;
+	tick_notify_proxy();
+}
+
+/*
+ * Low-level core clock irq handler. This one forwards ticks from the
+ * Xenomai platform timer to nkclock exclusively.
+ */
+void xnintr_core_clock_handler(void)
+{
+	struct xnsched *sched;
+
+	xnlock_get(&nklock);
+	xnclock_tick(&nkclock);
+	xnlock_put(&nklock);
+
+	/*
+	 * If the core clock interrupt preempted a real-time thread,
+	 * any transition to the root thread has already triggered a
+	 * host tick propagation from xnsched_run(), so at this point,
+	 * we only need to propagate the host tick in case the
+	 * interrupt preempted the root thread.
+	 */
+	sched = xnsched_current();
+	if ((sched->lflags & XNHTICK) &&
+	    xnthread_test_state(sched->curr, XNROOT))
+		xnintr_host_tick(sched);
+}
+
+static irqreturn_t xnintr_irq_handler(int irq, void *dev_id)
+{
+	struct xnintr *intr = dev_id;
+	int ret;
+
+	ret = intr->isr(intr);
+	XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0);
+
+	if (ret & XN_IRQ_DISABLE)
+		disable_irq(irq);
+	else if (ret & XN_IRQ_PROPAGATE)
+		irq_post_inband(irq);
+
+	return ret & XN_IRQ_NONE ? IRQ_NONE : IRQ_HANDLED;
+}
+
+int xnintr_init(struct xnintr *intr, const char *name,
+		unsigned int irq, xnisr_t isr, xniack_t iack,
+		int flags)
+{
+	secondary_mode_only();
+
+	intr->irq = irq;
+	intr->isr = isr;
+	intr->iack = NULL;	/* unused */
+	intr->cookie = NULL;
+	intr->name = name ? : "<unknown>";
+	intr->flags = flags;
+	intr->status = 0;
+	intr->unhandled = 0;	/* unused */
+	raw_spin_lock_init(&intr->lock); /* unused */
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnintr_init);
+
+void xnintr_destroy(struct xnintr *intr)
+{
+	secondary_mode_only();
+	xnintr_detach(intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_destroy);
+
+int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask)
+{
+	cpumask_t tmp_mask, *effective_mask;
+	int ret;
+
+	secondary_mode_only();
+
+	intr->cookie = cookie;
+
+	if (!cpumask) {
+		effective_mask = &xnsched_realtime_cpus;
+	} else {
+		effective_mask = &tmp_mask;
+		cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask);
+		if (cpumask_empty(effective_mask))
+			return -EINVAL;
+	}
+	ret = irq_set_affinity_hint(intr->irq, effective_mask);
+	if (ret)
+		return ret;
+
+	return request_irq(intr->irq, xnintr_irq_handler, IRQF_OOB,
+			intr->name, intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_attach);
+
+void xnintr_detach(struct xnintr *intr)
+{
+	secondary_mode_only();
+	irq_set_affinity_hint(intr->irq, NULL);
+	free_irq(intr->irq, intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_detach);
+
+void xnintr_enable(struct xnintr *intr)
+{
+}
+EXPORT_SYMBOL_GPL(xnintr_enable);
+
+void xnintr_disable(struct xnintr *intr)
+{
+}
+EXPORT_SYMBOL_GPL(xnintr_disable);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask)
+{
+	cpumask_t effective_mask;
+
+	secondary_mode_only();
+
+	cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask);
+	if (cpumask_empty(&effective_mask))
+		return -EINVAL;
+
+	return irq_set_affinity_hint(intr->irq, &effective_mask);
+}
+EXPORT_SYMBOL_GPL(xnintr_affinity);
+++ linux-patched/kernel/xenomai/pipeline/Makefile	2022-03-21 12:58:29.134891448 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/kernel/xenomai/pipeline/syscall.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/kernel
+
+obj-y +=	pipeline.o
+
+pipeline-y :=	init.o kevents.o sched.o tick.o syscall.o intr.o
+++ linux-patched/kernel/xenomai/pipeline/syscall.c	2022-03-21 12:58:29.130891487 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/tick.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <linux/irqstage.h>
+#include <pipeline/pipeline.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/assert.h>
+#include <xenomai/posix/syscall.h>
+
+int handle_pipelined_syscall(struct irq_stage *stage, struct pt_regs *regs)
+{
+	if (unlikely(running_inband()))
+		return handle_root_syscall(regs);
+
+	return handle_head_syscall(stage == &inband_stage, regs);
+}
+
+int handle_oob_syscall(struct pt_regs *regs)
+{
+	return handle_head_syscall(false, regs);
+}
+++ linux-patched/kernel/xenomai/pipeline/tick.c	2022-03-21 12:58:29.127891516 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/kevents.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <linux/tick.h>
+#include <linux/clockchips.h>
+#include <cobalt/kernel/intr.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+
+static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_device);
+
+const char *pipeline_timer_name(void)
+{
+	struct clock_proxy_device *dev = per_cpu(proxy_device, 0);
+	struct clock_event_device *real_dev = dev->real_device;
+
+	/*
+	 * Return the name of the current clock event chip, which is
+	 * the real device controlled by the proxy tick device.
+	 */
+	return real_dev->name;
+}
+
+void pipeline_set_timer_shot(unsigned long delay) /* ns */
+{
+	struct clock_proxy_device *dev = __this_cpu_read(proxy_device);
+	struct clock_event_device *real_dev = dev->real_device;
+	u64 cycles;
+	ktime_t t;
+	int ret;
+
+	if (real_dev->features & CLOCK_EVT_FEAT_KTIME) {
+		t = ktime_add(delay, xnclock_core_read_raw());
+		real_dev->set_next_ktime(t, real_dev);
+	} else {
+		if (delay <= 0) {
+			delay = real_dev->min_delta_ns;
+		} else {
+			delay = min_t(int64_t, delay,
+				real_dev->max_delta_ns);
+			delay = max_t(int64_t, delay,
+				real_dev->min_delta_ns);
+		}
+		cycles = ((u64)delay * real_dev->mult) >> real_dev->shift;
+		ret = real_dev->set_next_event(cycles, real_dev);
+		if (ret)
+			real_dev->set_next_event(real_dev->min_delta_ticks,
+						real_dev);
+	}
+}
+
+static int proxy_set_next_ktime(ktime_t expires,
+				struct clock_event_device *proxy_dev) /* hard irqs on/off */
+{
+	struct xnsched *sched;
+	unsigned long flags;
+	ktime_t delta;
+	int ret;
+
+	/*
+	 * Expiration dates of in-band timers are based on the common
+	 * monotonic time base. If the timeout date has already
+	 * elapsed, make sure xntimer_start() does not fail with
+	 * -ETIMEDOUT but programs the hardware for ticking
+	 * immediately instead.
+	 */
+	delta = ktime_sub(expires, ktime_get());
+	if (delta < 0)
+		delta = 0;
+
+	xnlock_get_irqsave(&nklock, flags);
+	sched = xnsched_current();
+	ret = xntimer_start(&sched->htimer, delta, XN_INFINITE, XN_RELATIVE);
+	xnlock_put_irqrestore(&nklock, flags);
+
+	return ret ? -ETIME : 0;
+}
+
+bool pipeline_must_force_program_tick(struct xnsched *sched)
+{
+	return sched->lflags & XNTSTOP;
+}
+
+static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev)
+{
+	struct clock_event_device *real_dev;
+	struct clock_proxy_device *dev;
+	struct xnsched *sched;
+	spl_t s;
+
+	dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device);
+
+	/*
+	 * In-band wants to disable the clock hardware on entering a
+	 * tickless state, so we have to stop our in-band tick
+	 * emulation. Propagate the request for shutting down the
+	 * hardware to the real device only if we have no outstanding
+	 * OOB timers. CAUTION: the in-band timer is counted when
+	 * assessing the RQ_IDLE condition, so we need to stop it
+	 * prior to testing the latter.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_current();
+	xntimer_stop(&sched->htimer);
+	sched->lflags |= XNTSTOP;
+
+	if (sched->lflags & XNIDLE) {
+		real_dev = dev->real_device;
+		real_dev->set_state_oneshot_stopped(real_dev);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static void setup_proxy(struct clock_proxy_device *dev)
+{
+	struct clock_event_device *proxy_dev = &dev->proxy_device;
+
+	dev->handle_oob_event = (typeof(dev->handle_oob_event))
+		xnintr_core_clock_handler;
+	proxy_dev->features |= CLOCK_EVT_FEAT_KTIME;
+	proxy_dev->set_next_ktime = proxy_set_next_ktime;
+	if (proxy_dev->set_state_oneshot_stopped)
+		proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped;
+	__this_cpu_write(proxy_device, dev);
+}
+
+#ifdef CONFIG_SMP
+static irqreturn_t tick_ipi_handler(int irq, void *dev_id)
+{
+	xnintr_core_clock_handler();
+
+	return IRQ_HANDLED;
+}
+#endif
+
+int pipeline_install_tick_proxy(void)
+{
+	int ret;
+
+#ifdef CONFIG_SMP
+	/*
+	 * We may be running a SMP kernel on a uniprocessor machine
+	 * whose interrupt controller provides no IPI: attempt to hook
+	 * the timer IPI only if the hardware can support multiple
+	 * CPUs.
+	 */
+	if (num_possible_cpus() > 1) {
+		ret = __request_percpu_irq(TIMER_OOB_IPI,
+					tick_ipi_handler,
+					IRQF_OOB, "Xenomai timer IPI",
+					&cobalt_machine_cpudata);
+		if (ret)
+			return ret;
+	}
+#endif
+
+	/* Install the proxy tick device */
+	ret = tick_install_proxy(setup_proxy, &xnsched_realtime_cpus);
+	if (ret)
+		goto fail_proxy;
+
+	return 0;
+
+fail_proxy:
+#ifdef CONFIG_SMP
+	if (num_possible_cpus() > 1)
+		free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata);
+#endif
+
+	return ret;
+}
+
+void pipeline_uninstall_tick_proxy(void)
+{
+	/* Uninstall the proxy tick device. */
+	tick_uninstall_proxy(&xnsched_realtime_cpus);
+
+#ifdef CONFIG_SMP
+	if (num_possible_cpus() > 1)
+		free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata);
+#endif
+}
+++ linux-patched/kernel/xenomai/pipeline/kevents.c	2022-03-21 12:58:29.123891555 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/fd.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ */
+
+#include <linux/ptrace.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/init.h>
+#include <rtdm/driver.h>
+#include <trace/events/cobalt-core.h>
+#include "../posix/process.h"
+#include "../posix/thread.h"
+#include "../posix/memory.h"
+
+void arch_inband_task_init(struct task_struct *tsk)
+{
+	struct cobalt_threadinfo *p = dovetail_task_state(tsk);
+
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+void handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs)
+{
+	struct xnthread *thread;
+	struct xnsched *sched;
+	spl_t s;
+
+	sched = xnsched_current();
+	thread = sched->curr;
+
+	/*
+	 * Enable back tracing.
+	 */
+	trace_cobalt_thread_fault(xnarch_fault_pc(regs), trapnr);
+
+	if (xnthread_test_state(thread, XNROOT))
+		return;
+
+	if (xnarch_fault_bp_p(trapnr) && user_mode(regs)) {
+		XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX));
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_set_info(thread, XNCONTHI);
+		dovetail_request_ucall(current);
+		cobalt_stop_debugged_process(thread);
+		xnlock_put_irqrestore(&nklock, s);
+		xnsched_run();
+	}
+
+	/*
+	 * If we experienced a trap on behalf of a shadow thread
+	 * running in primary mode, move it to the Linux domain,
+	 * leaving the kernel process the exception.
+	 */
+#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER)
+	if (!user_mode(regs)) {
+		xntrace_panic_freeze();
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u in "
+		       "kernel-space at 0x%lx (pid %d)\n", thread->name,
+		       trapnr,
+		       xnarch_fault_pc(regs),
+		       xnthread_host_pid(thread));
+		xntrace_panic_dump();
+	} else if (xnarch_fault_notify(trapnr)) /* Don't report debug traps */
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u from "
+		       "user-space at 0x%lx (pid %d)\n", thread->name,
+		       trapnr,
+		       xnarch_fault_pc(regs),
+		       xnthread_host_pid(thread));
+#endif
+
+	if (xnarch_fault_pf_p(trapnr))
+		/*
+		 * The page fault counter is not SMP-safe, but it's a
+		 * simple indicator that something went wrong wrt
+		 * memory locking anyway.
+		 */
+		xnstat_counter_inc(&thread->stat.pf);
+
+	xnthread_relax(xnarch_fault_notify(trapnr), SIGDEBUG_MIGRATE_FAULT);
+}
+
+static inline int handle_setaffinity_event(struct dovetail_migration_data *d)
+{
+	return cobalt_handle_setaffinity_event(d->task);
+}
+
+static inline int handle_taskexit_event(struct task_struct *p)
+{
+	return cobalt_handle_taskexit_event(p);
+}
+
+static inline int handle_user_return(struct task_struct *task)
+{
+	return cobalt_handle_user_return(task);
+}
+
+void handle_oob_mayday(struct pt_regs *regs)
+{
+	XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER));
+
+	xnthread_relax(0, 0);
+}
+
+static int handle_sigwake_event(struct task_struct *p)
+{
+	struct xnthread *thread;
+	sigset_t pending;
+	spl_t s;
+
+	thread = xnthread_from_task(p);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * CAUTION: __TASK_TRACED is not set in p->state yet. This
+	 * state bit will be set right after we return, when the task
+	 * is woken up.
+	 */
+	if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) {
+		/* We already own the siglock. */
+		sigorsets(&pending,
+			  &p->pending.signal,
+			  &p->signal->shared_pending.signal);
+
+		if (sigismember(&pending, SIGTRAP) ||
+		    sigismember(&pending, SIGSTOP)
+		    || sigismember(&pending, SIGINT))
+			cobalt_register_debugged_thread(thread);
+	}
+
+	if (xnthread_test_state(thread, XNRELAX))
+		goto out;
+
+	/*
+	 * Allow a thread stopped for debugging to resume briefly in order to
+	 * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP.
+	 */
+	if (xnthread_test_state(thread, XNDBGSTOP))
+		xnthread_resume(thread, XNDBGSTOP);
+
+	__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return KEVENT_PROPAGATE;
+}
+
+static inline int handle_cleanup_event(struct mm_struct *mm)
+{
+	return cobalt_handle_cleanup_event(mm);
+}
+
+void pipeline_cleanup_process(void)
+{
+	dovetail_stop_altsched();
+}
+
+int handle_ptrace_resume(struct task_struct *tracee)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(tracee);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (xnthread_test_state(thread, XNSSTEP)) {
+		xnlock_get_irqsave(&nklock, s);
+
+		xnthread_resume(thread, XNDBGSTOP);
+		cobalt_unregister_debugged_thread(thread);
+
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+static void handle_ptrace_cont(void)
+{
+	struct xnthread *curr = xnthread_current();
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		if (!xnthread_test_info(curr, XNCONTHI))
+			cobalt_unregister_debugged_thread(curr);
+
+		xnthread_set_localinfo(curr, XNHICCUP);
+
+		dovetail_request_ucall(current);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+void handle_inband_event(enum inband_event_type event, void *data)
+{
+	switch (event) {
+	case INBAND_TASK_SIGNAL:
+		handle_sigwake_event(data);
+		break;
+	case INBAND_TASK_MIGRATION:
+		handle_setaffinity_event(data);
+		break;
+	case INBAND_TASK_EXIT:
+		if (xnthread_current())
+			handle_taskexit_event(current);
+		break;
+	case INBAND_TASK_RETUSER:
+		handle_user_return(data);
+		break;
+	case INBAND_TASK_PTSTEP:
+		handle_ptrace_resume(data);
+		break;
+	case INBAND_TASK_PTCONT:
+		handle_ptrace_cont();
+		break;
+	case INBAND_TASK_PTSTOP:
+		break;
+	case INBAND_PROCESS_CLEANUP:
+		handle_cleanup_event(data);
+		break;
+	}
+}
+
+/*
+ * Called by the in-band kernel when the CLOCK_REALTIME epoch changes.
+ */
+void inband_clock_was_set(void)
+{
+	if (realtime_core_enabled())
+		xnclock_set_wallclock(ktime_get_real_fast_ns());
+}
+
+#ifdef CONFIG_MMU
+
+int pipeline_prepare_current(void)
+{
+	struct task_struct *p = current;
+	kernel_siginfo_t si;
+
+	if ((p->mm->def_flags & VM_LOCKED) == 0) {
+		memset(&si, 0, sizeof(si));
+		si.si_signo = SIGDEBUG;
+		si.si_code = SI_QUEUE;
+		si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker;
+		send_sig_info(SIGDEBUG, &si, p);
+	}
+
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	return PROT_READ|PROT_EXEC;
+}
+
+#else /* !CONFIG_MMU */
+
+int pipeline_prepare_current(void)
+{
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	/*
+	 * Until we stop backing /dev/mem with the mayday page, we
+	 * can't ask for PROT_EXEC since the former does not define
+	 * mmap capabilities, and default ones won't allow an
+	 * executable mapping with MAP_SHARED. In the NOMMU case, this
+	 * is (currently) not an issue.
+	 */
+	return PROT_READ;
+}
+
+#endif /* !CONFIG_MMU */
+
+void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */
+{
+	struct xnthread *thread = xnthread_from_task(p);
+
+	xnlock_get(&nklock);
+
+	/*
+	 * We fire the handler before the thread is migrated, so that
+	 * thread->sched does not change between paired invocations of
+	 * relax_thread/harden_thread handlers.
+	 */
+	xnthread_run_handler_stack(thread, harden_thread);
+
+	cobalt_adjust_affinity(p);
+
+	xnthread_resume(thread, XNRELAX);
+
+	/*
+	 * In case we migrated independently of the user return notifier, clear
+	 * XNCONTHI here and also disable the notifier - we are already done.
+	 */
+	if (unlikely(xnthread_test_info(thread, XNCONTHI))) {
+		xnthread_clear_info(thread, XNCONTHI);
+		dovetail_clear_ucall();
+	}
+
+	/* Unregister as debugged thread in case we postponed this. */
+	if (unlikely(xnthread_test_state(thread, XNSSTEP)))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnlock_put(&nklock);
+
+	xnsched_run();
+
+}
+
+void pipeline_attach_current(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = cobalt_search_process(current->mm);
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+}
+
+int pipeline_trap_kevents(void)
+{
+	dovetail_start();
+	return 0;
+}
+
+void pipeline_enable_kevents(void)
+{
+	dovetail_start_altsched();
+}
+++ linux-patched/kernel/xenomai/rtdm/fd.c	2022-03-21 12:58:29.118891604 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/internal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/kthread.h>
+#include <linux/fdtable.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/time.h>
+#include <pipeline/inband_work.h>
+#include <trace/events/cobalt-rtdm.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "posix/process.h"
+#include "posix/syscall.h"
+#include "posix/clock.h"
+
+#define RTDM_SETFL_MASK (O_NONBLOCK)
+
+DEFINE_PRIVATE_XNLOCK(fdtree_lock);
+static LIST_HEAD(rtdm_fd_cleanup_queue);
+static struct semaphore rtdm_fd_cleanup_sem;
+
+struct rtdm_fd_index {
+	struct xnid id;
+	struct rtdm_fd *fd;
+};
+
+static int enosys(void)
+{
+	return -ENOSYS;
+}
+
+static int enotty(void)
+{
+	return -ENOTTY;
+}
+
+static int ebadf(void)
+{
+	return -EBADF;
+}
+
+static int enodev(void)
+{
+	return -ENODEV;
+}
+
+static inline struct rtdm_fd_index *
+fetch_fd_index(struct cobalt_ppd *p, int ufd)
+{
+	struct xnid *id = xnid_fetch(&p->fds, ufd);
+	if (id == NULL)
+		return NULL;
+
+	return container_of(id, struct rtdm_fd_index, id);
+}
+
+static struct rtdm_fd *fetch_fd(struct cobalt_ppd *p, int ufd)
+{
+	struct rtdm_fd_index *idx = fetch_fd_index(p, ufd);
+	if (idx == NULL)
+		return NULL;
+
+	return idx->fd;
+}
+
+#define assign_invalid_handler(__handler, __invalid)			\
+	do								\
+		(__handler) = (typeof(__handler))__invalid;		\
+	while (0)
+
+/* Calling this handler should beget ENOSYS if not implemented. */
+#define assign_switch_handler(__handler)				\
+	do								\
+		if ((__handler) == NULL)				\
+			assign_invalid_handler(__handler, enosys);	\
+	while (0)
+
+#define assign_default_handler(__handler, __invalid)			\
+	do								\
+		if ((__handler) == NULL)				\
+			assign_invalid_handler(__handler, __invalid);	\
+	while (0)
+
+#define __rt(__handler)		__handler ## _rt
+#define __nrt(__handler)	__handler ## _nrt
+
+/*
+ * Install a placeholder returning EADV if none of the dual handlers
+ * are implemented, ENOSYS otherwise for NULL handlers to trigger the
+ * adaptive switch.
+ */
+#define assign_default_dual_handlers(__handler, __invalid_handler)	\
+	do								\
+		if (__rt(__handler) || __nrt(__handler)) {		\
+			assign_switch_handler(__rt(__handler));		\
+			assign_switch_handler(__nrt(__handler));	\
+		} else {						\
+			assign_invalid_handler(__rt(__handler),		\
+					       __invalid_handler);	\
+			assign_invalid_handler(__nrt(__handler),	\
+					       __invalid_handler);	\
+		}							\
+	while (0)
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+static inline void set_compat_bit(struct rtdm_fd *fd)
+{
+	struct pt_regs *regs;
+
+	if (cobalt_ppd_get(0) == &cobalt_kernel_ppd)
+		fd->compat = 0;
+	else {
+		regs = task_pt_regs(current);
+		XENO_BUG_ON(COBALT, !__xn_syscall_p(regs));
+		fd->compat = __COBALT_CALL_COMPAT(__xn_reg_sys(regs));
+	}
+}
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+static inline void set_compat_bit(struct rtdm_fd *fd)
+{
+}
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+int rtdm_fd_enter(struct rtdm_fd *fd, int ufd, unsigned int magic,
+		  struct rtdm_fd_ops *ops)
+{
+	struct cobalt_ppd *ppd;
+
+	secondary_mode_only();
+
+	if (magic == 0)
+		return -EINVAL;
+
+	assign_default_dual_handlers(ops->ioctl, enotty);
+	assign_default_dual_handlers(ops->read, ebadf);
+	assign_default_dual_handlers(ops->write, ebadf);
+	assign_default_dual_handlers(ops->recvmsg, ebadf);
+	assign_default_dual_handlers(ops->sendmsg, ebadf);
+	assign_default_handler(ops->select, ebadf);
+	assign_default_handler(ops->mmap, enodev);
+
+	ppd = cobalt_ppd_get(0);
+	fd->magic = magic;
+	fd->ops = ops;
+	fd->owner = ppd;
+	fd->ufd = ufd;
+	fd->refs = 1;
+	fd->stale = false;
+	set_compat_bit(fd);
+	INIT_LIST_HEAD(&fd->next);
+
+	return 0;
+}
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd)
+{
+	struct rtdm_fd_index *idx;
+	struct cobalt_ppd *ppd;
+	spl_t s;
+	int ret = 0;
+
+	ppd = cobalt_ppd_get(0);
+	idx = kmalloc(sizeof(*idx), GFP_KERNEL);
+	if (idx == NULL)
+		return -ENOMEM;
+
+	idx->fd = fd;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	ret = xnid_enter(&ppd->fds, &idx->id, ufd);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+	if (ret < 0) {
+		kfree(idx);
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+			struct rtdm_device *device)
+{
+	spl_t s;
+	int ret;
+
+	ret = rtdm_fd_register(fd, ufd);
+	if (ret < 0)
+		return ret;
+
+	trace_cobalt_fd_created(fd, ufd);
+	xnlock_get_irqsave(&fdtree_lock, s);
+	list_add(&fd->next, &device->openfd_list);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return 0;
+}
+
+/**
+ * @brief Retrieve and lock a RTDM file descriptor
+ *
+ * @param[in] ufd User-side file descriptor
+ * @param[in] magic Magic word for lookup validation
+ *
+ * @return Pointer to the RTDM file descriptor matching @a
+ * ufd. Otherwise:
+ *
+ * - ERR_PTR(-EADV) if the use-space handle is either invalid, or not
+ * managed by RTDM.
+ *
+ * - ERR_PTR(-EBADF) if the underlying device is being torn down at
+ * the time of the call.
+ *
+ * @note The file descriptor returned must be later released by a call
+ * to rtdm_fd_put().
+ *
+ * @coretags{unrestricted}
+ */
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic)
+{
+	struct cobalt_ppd *p = cobalt_ppd_get(0);
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	fd = fetch_fd(p, ufd);
+	if (fd == NULL || (magic != 0 && fd->magic != magic)) {
+		fd = ERR_PTR(-EADV);
+		goto out;
+	}
+
+	if (fd->stale) {
+		fd = ERR_PTR(-EBADF);
+		goto out;
+	}
+
+	++fd->refs;
+out:
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return fd;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_get);
+
+static int fd_cleanup_thread(void *data)
+{
+	struct rtdm_fd *fd;
+	int err;
+	spl_t s;
+
+	for (;;) {
+		set_cpus_allowed_ptr(current, cpu_online_mask);
+
+		do {
+			err = down_interruptible(&rtdm_fd_cleanup_sem);
+			if (kthread_should_stop())
+				return 0;
+		} while (err);
+
+		xnlock_get_irqsave(&fdtree_lock, s);
+		fd = list_first_entry(&rtdm_fd_cleanup_queue,
+				struct rtdm_fd, cleanup);
+		list_del(&fd->cleanup);
+		xnlock_put_irqrestore(&fdtree_lock, s);
+
+		fd->ops->close(fd);
+	}
+
+	return 0;
+}
+
+static void lostage_trigger_close(struct pipeline_inband_work *inband_work)
+{
+	up(&rtdm_fd_cleanup_sem);
+}
+
+static struct lostage_trigger_close {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+} fd_closework =  {
+	.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(fd_closework,
+						lostage_trigger_close),
+};
+
+static void __put_fd(struct rtdm_fd *fd, spl_t s)
+{
+	bool destroy, trigger;
+
+	XENO_WARN_ON(COBALT, fd->refs <= 0);
+	destroy = --fd->refs == 0;
+	if (destroy && !list_empty(&fd->next))
+		list_del_init(&fd->next);
+
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	if (!destroy)
+		return;
+
+	if (is_secondary_domain())
+		fd->ops->close(fd);
+	else {
+		xnlock_get_irqsave(&fdtree_lock, s);
+		trigger = list_empty(&rtdm_fd_cleanup_queue);
+		list_add_tail(&fd->cleanup, &rtdm_fd_cleanup_queue);
+		xnlock_put_irqrestore(&fdtree_lock, s);
+
+		if (trigger)
+			pipeline_post_inband_work(&fd_closework);
+	}
+}
+
+void rtdm_device_flush_fds(struct rtdm_device *dev)
+{
+	struct rtdm_driver *drv = dev->driver;
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+
+	while (!list_empty(&dev->openfd_list)) {
+		fd = list_get_entry_init(&dev->openfd_list, struct rtdm_fd, next);
+		fd->stale = true;
+		if (drv->ops.close) {
+			rtdm_fd_get_light(fd);
+			xnlock_put_irqrestore(&fdtree_lock, s);
+			drv->ops.close(fd);
+			rtdm_fd_put(fd);
+			xnlock_get_irqsave(&fdtree_lock, s);
+		}
+	}
+
+	xnlock_put_irqrestore(&fdtree_lock, s);
+}
+
+/**
+ * @brief Release a RTDM file descriptor obtained via rtdm_fd_get()
+ *
+ * @param[in] fd RTDM file descriptor to release
+ *
+ * @note Every call to rtdm_fd_get() must be matched by a call to
+ * rtdm_fd_put().
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_fd_put(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__put_fd(fd, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_put);
+
+/**
+ * @brief Hold a reference on a RTDM file descriptor
+ *
+ * @param[in] fd Target file descriptor
+ *
+ * @note rtdm_fd_lock() increments the reference counter of @a fd. You
+ * only need to call this function in special scenarios, e.g. when
+ * keeping additional references to the file descriptor that have
+ * different lifetimes. Only use rtdm_fd_lock() on descriptors that
+ * are currently locked via an earlier rtdm_fd_get()/rtdm_fd_lock() or
+ * while running a device operation handler.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_fd_lock(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	if (fd->refs == 0) {
+		xnlock_put_irqrestore(&fdtree_lock, s);
+		return -EIDRM;
+	}
+	++fd->refs;
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_lock);
+
+/**
+ * @brief Drop a reference on a RTDM file descriptor
+ *
+ * @param[in] fd Target file descriptor
+ *
+ * @note Every call to rtdm_fd_lock() must be matched by a call to
+ * rtdm_fd_unlock().
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_fd_unlock(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__put_fd(fd, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_unlock);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...)
+{
+	struct rtdm_fd *fd;
+	va_list ap;
+	long arg;
+	int ret;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return PTR_ERR(fd);
+
+	va_start(ap, cmd);
+	arg = va_arg(ap, long);
+	va_end(ap);
+
+	switch (cmd) {
+	case F_GETFL:
+		ret = fd->oflags;
+		break;
+	case F_SETFL:
+		fd->oflags = (fd->oflags & ~RTDM_SETFL_MASK) |
+			(arg & RTDM_SETFL_MASK);
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	rtdm_fd_put(fd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_fcntl);
+
+static struct rtdm_fd *get_fd_fixup_mode(int ufd)
+{
+	struct xnthread *thread;
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return fd;
+
+	/*
+	 * Mode is selected according to the following convention:
+	 *
+	 * - Cobalt threads must try running the syscall from primary
+	 * mode as a first attempt, regardless of their scheduling
+	 * class. The driver handler may ask for demoting the caller
+	 * to secondary mode by returning -ENOSYS.
+	 *
+	 * - Regular threads (i.e. not bound to Cobalt) may only run
+	 * the syscall from secondary mode.
+	 */
+	thread = xnthread_current();
+	if (unlikely(is_secondary_domain())) {
+		if (thread == NULL ||
+		    xnthread_test_localinfo(thread, XNDESCENT))
+			return fd;
+	} else if (likely(thread))
+		return fd;
+
+	/*
+	 * We need to switch to the converse mode. Since all callers
+	 * bear the "adaptive" tag, we just pass -ENOSYS back to the
+	 * syscall dispatcher to get switched to the next mode.
+	 */
+	rtdm_fd_put(fd);
+
+	return ERR_PTR(-ENOSYS);
+}
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...)
+{
+	struct rtdm_fd *fd;
+	void __user *arg;
+	va_list args;
+	int err, ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		err = PTR_ERR(fd);
+		goto out;
+	}
+
+	va_start(args, request);
+	arg = va_arg(args, void __user *);
+	va_end(args);
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_ioctl(current, fd, ufd, request);
+
+	if (is_secondary_domain())
+		err = fd->ops->ioctl_nrt(fd, request, arg);
+	else
+		err = fd->ops->ioctl_rt(fd, request, arg);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	if (err < 0) {
+		ret = __rtdm_dev_ioctl_core(fd, request, arg);
+		if (ret != -EADV)
+			err = ret;
+	}
+
+	rtdm_fd_put(fd);
+  out:
+	if (err < 0)
+		trace_cobalt_fd_ioctl_status(current, fd, ufd, err);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_ioctl);
+
+ssize_t
+rtdm_fd_read(int ufd, void __user *buf, size_t size)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_read(current, fd, ufd, size);
+
+	if (is_secondary_domain())
+		ret = fd->ops->read_nrt(fd, buf, size);
+	else
+		ret = fd->ops->read_rt(fd, buf, size);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		    splnone();
+
+	rtdm_fd_put(fd);
+
+  out:
+	if (ret < 0)
+		trace_cobalt_fd_read_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_read);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_write(current, fd, ufd, size);
+
+	if (is_secondary_domain())
+		ret = fd->ops->write_nrt(fd, buf, size);
+	else
+		ret = fd->ops->write_rt(fd, buf, size);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+
+  out:
+	if (ret < 0)
+		trace_cobalt_fd_write_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_write);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_recvmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	if (is_secondary_domain())
+		ret = fd->ops->recvmsg_nrt(fd, msg, flags);
+	else
+		ret = fd->ops->recvmsg_rt(fd, msg, flags);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+out:
+	if (ret < 0)
+		trace_cobalt_fd_recvmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_recvmsg);
+
+struct cobalt_recvmmsg_timer {
+	struct xntimer timer;
+	struct xnthread *waiter;
+};
+
+static void recvmmsg_timeout_handler(struct xntimer *timer)
+{
+	struct cobalt_recvmmsg_timer *rq;
+
+	rq = container_of(timer, struct cobalt_recvmmsg_timer, timer);
+	xnthread_set_info(rq->waiter, XNTIMEO);
+	xnthread_resume(rq->waiter, XNDELAY);
+}
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts))
+{
+	struct cobalt_recvmmsg_timer rq;
+	xntmode_t tmode = XN_RELATIVE;
+	struct timespec64 ts = { 0 };
+	int ret = 0, datagrams = 0;
+	xnticks_t timeout = 0;
+	struct mmsghdr mmsg;
+	struct rtdm_fd *fd;
+	void __user *u_p;
+	ssize_t len;
+	spl_t s;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_recvmmsg(current, fd, ufd, flags);
+
+	if (u_timeout) {
+		ret = get_timespec(&ts, u_timeout);
+		if (ret)
+			goto fail;
+
+		if (!timespec64_valid(&ts)) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		tmode = XN_ABSOLUTE;
+		timeout = ts2ns(&ts);
+		if (timeout == 0)
+			flags |= MSG_DONTWAIT;
+		else {
+			timeout += xnclock_read_monotonic(&nkclock);
+			rq.waiter = xnthread_current();
+			xntimer_init(&rq.timer, &nkclock,
+				     recvmmsg_timeout_handler,
+				     NULL, XNTIMER_IGRAVITY);
+			xnlock_get_irqsave(&nklock, s);
+			ret = xntimer_start(&rq.timer, timeout,
+					    XN_INFINITE, tmode);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	}
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	for (u_p = u_msgvec; vlen > 0; vlen--) {
+		ret = get_mmsg(&mmsg, u_p);
+		if (ret)
+			break;
+		len = fd->ops->recvmsg_rt(fd, &mmsg.msg_hdr, flags);
+		if (len < 0) {
+			ret = len;
+			break;
+		}
+		mmsg.msg_len = (unsigned int)len;
+		ret = put_mmsg(&u_p, &mmsg);
+		if (ret)
+			break;
+		datagrams++;
+		/* OOB data requires immediate handling. */
+		if (mmsg.msg_hdr.msg_flags & MSG_OOB)
+			break;
+		if (flags & MSG_WAITFORONE)
+			flags |= MSG_DONTWAIT;
+	}
+
+	if (timeout) {
+		xnlock_get_irqsave(&nklock, s);
+		xntimer_destroy(&rq.timer);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+fail:
+	rtdm_fd_put(fd);
+
+	if (datagrams > 0)
+		ret = datagrams;
+
+out:
+	trace_cobalt_fd_recvmmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+static inline int __rtdm_fetch_timeout64(struct timespec64 *ts,
+					 const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg,
+					 void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p,
+					 const struct mmsghdr *mmsg))
+{
+	return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg, put_mmsg, __rtdm_fetch_timeout64);
+}
+
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, int flags)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_sendmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	if (is_secondary_domain())
+		ret = fd->ops->sendmsg_nrt(fd, msg, flags);
+	else
+		ret = fd->ops->sendmsg_rt(fd, msg, flags);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+out:
+	if (ret < 0)
+		trace_cobalt_fd_sendmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_sendmsg);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg))
+{
+	int ret = 0, datagrams = 0;
+	struct mmsghdr mmsg;
+	struct rtdm_fd *fd;
+	void __user *u_p;
+	ssize_t len;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_sendmmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	for (u_p = u_msgvec; vlen > 0; vlen--) {
+		ret = get_mmsg(&mmsg, u_p);
+		if (ret)
+			break;
+		len = fd->ops->sendmsg_rt(fd, &mmsg.msg_hdr, flags);
+		if (len < 0) {
+			ret = len;
+			break;
+		}
+		mmsg.msg_len = (unsigned int)len;
+		ret = put_mmsg(&u_p, &mmsg);
+		if (ret)
+			break;
+		datagrams++;
+	}
+
+	rtdm_fd_put(fd);
+
+	if (datagrams > 0)
+		ret = datagrams;
+
+out:
+	trace_cobalt_fd_sendmmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+static void
+__fd_close(struct cobalt_ppd *p, struct rtdm_fd_index *idx, spl_t s)
+{
+	xnid_remove(&p->fds, &idx->id);
+	__put_fd(idx->fd, s);
+
+	kfree(idx);
+}
+
+int rtdm_fd_close(int ufd, unsigned int magic)
+{
+	struct rtdm_fd_index *idx;
+	struct cobalt_ppd *ppd;
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	secondary_mode_only();
+
+	ppd = cobalt_ppd_get(0);
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	idx = fetch_fd_index(ppd, ufd);
+	if (idx == NULL)
+		goto eadv;
+
+	fd = idx->fd;
+	if (magic != 0 && fd->magic != magic) {
+eadv:
+		xnlock_put_irqrestore(&fdtree_lock, s);
+		return -EADV;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_close(current, fd, ufd, fd->refs);
+
+	/*
+	 * In dual kernel mode, the linux-side fdtable and the RTDM
+	 * ->close() handler are asynchronously managed, i.e.  the
+	 * handler execution may be deferred after the regular file
+	 * descriptor was removed from the fdtable if some refs on
+	 * rtdm_fd are still pending.
+	 */
+	__fd_close(ppd, idx, s);
+	close_fd(ufd);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_close);
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp)
+{
+	struct rtdm_fd *fd;
+	int ret;
+
+	secondary_mode_only();
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_mmap(current, fd, ufd, rma);
+
+	if (rma->flags & (MAP_FIXED|MAP_ANONYMOUS)) {
+		ret = -EADV;
+		goto unlock;
+	}
+
+	ret = __rtdm_mmap_from_fdop(fd, rma->length, rma->offset,
+				    rma->prot, rma->flags, u_addrp);
+unlock:
+	rtdm_fd_put(fd);
+out:
+	if (ret)
+		trace_cobalt_fd_mmap_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+int rtdm_fd_valid_p(int ufd)
+{
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	fd = fetch_fd(cobalt_ppd_get(0), ufd);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return fd != NULL;
+}
+
+/**
+ * @brief Bind a selector to specified event types of a given file descriptor
+ * @internal
+ *
+ * This function is invoked by higher RTOS layers implementing select-like
+ * services. It shall not be called directly by RTDM drivers.
+ *
+ * @param[in] ufd User-side file descriptor to bind to
+ * @param[in,out] selector Selector object that shall be bound to the given
+ * event
+ * @param[in] type Event type the caller is interested in
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EADV is returned if the file descriptor @a ufd cannot be resolved.
+ *
+ * - -EBADF is returned if the underlying device is being torn down at the time
+ *   of the call.
+ *
+ * - -EINVAL is returned if @a type is invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type)
+{
+	struct rtdm_fd *fd;
+	int ret;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return PTR_ERR(fd);
+
+	set_compat_bit(fd);
+
+	ret = fd->ops->select(fd, selector, type, ufd);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+
+	return ret;
+}
+
+static void destroy_fd(void *cookie, struct xnid *id)
+{
+	struct cobalt_ppd *p = cookie;
+	struct rtdm_fd_index *idx;
+	spl_t s;
+
+	idx = container_of(id, struct rtdm_fd_index, id);
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__fd_close(p, idx, 0);
+}
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p)
+{
+	/*
+	 * This is called on behalf of a (userland) task exit handler,
+	 * so we don't have to deal with the regular file descriptors,
+	 * we only have to empty our own index.
+	 */
+	xntree_cleanup(&p->fds, p, destroy_fd);
+}
+
+void rtdm_fd_init(void)
+{
+	sema_init(&rtdm_fd_cleanup_sem, 0);
+	kthread_run(fd_cleanup_thread, NULL, "rtdm_fd");
+}
+
+static inline void warn_user(struct file *file, const char *call)
+{
+	struct dentry *dentry = file->f_path.dentry;
+	
+	printk(XENO_WARNING
+	       "%s[%d] called regular %s() on /dev/rtdm/%s\n",
+	       current->comm, task_pid_nr(current), call + 5, dentry->d_name.name);
+}
+
+static ssize_t dumb_read(struct file *file, char  __user *buf,
+			 size_t count, loff_t __user *ppos)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static ssize_t dumb_write(struct file *file,  const char __user *buf,
+			  size_t count, loff_t __user *ppos)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static unsigned int dumb_poll(struct file *file, poll_table *pt)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static long dumb_ioctl(struct file *file, unsigned int cmd,
+		       unsigned long arg)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+const struct file_operations rtdm_dumb_fops = {
+	.read		= dumb_read,
+	.write		= dumb_write,
+	.poll		= dumb_poll,
+	.unlocked_ioctl	= dumb_ioctl,
+};
+++ linux-patched/kernel/xenomai/rtdm/internal.h	2022-03-21 12:58:29.114891643 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/core.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _RTDM_INTERNAL_H
+#define _RTDM_INTERNAL_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sem.h>
+#include <linux/file.h>
+#include <linux/atomic.h>
+#include <cobalt/kernel/tree.h>
+#include <cobalt/kernel/lock.h>
+#include <rtdm/driver.h>
+
+static inline void __rtdm_get_device(struct rtdm_device *device)
+{
+	atomic_inc(&device->refcount);
+}
+
+void __rtdm_put_device(struct rtdm_device *device);
+
+struct rtdm_device *__rtdm_get_namedev(const char *path);
+
+struct rtdm_device *__rtdm_get_protodev(int protocol_family,
+					int socket_type);
+
+void __rtdm_dev_close(struct rtdm_fd *fd);
+
+int __rtdm_dev_ioctl_core(struct rtdm_fd *fd,
+			  unsigned int request, void __user *arg);
+
+int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
+			  int prot, int flags, void **pptr);
+
+/* nklock held, irqs off. */
+static inline void rtdm_fd_get_light(struct rtdm_fd *fd)
+{
+	++fd->refs;
+}
+
+int rtdm_init(void);
+
+void rtdm_cleanup(void);
+
+extern const struct file_operations rtdm_dumb_fops;
+
+#endif /* _RTDM_INTERNAL_H */
+++ linux-patched/kernel/xenomai/rtdm/core.c	2022-03-21 12:58:29.111891672 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/drvlib.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/fdtable.h>
+#include <linux/anon_inodes.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/heap.h>
+#include "rtdm/internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-rtdm.h>
+#include "posix/process.h"
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_driver_interface Driver programming interface
+ * RTDM driver programming interface
+ * @{
+ */
+
+static void cleanup_instance(struct rtdm_device *dev,
+			     struct rtdm_dev_context *context)
+{
+	if (context)
+		kfree(context);
+
+	__rtdm_put_device(dev);
+}
+
+void __rtdm_dev_close(struct rtdm_fd *fd)
+{
+	struct rtdm_dev_context *context = rtdm_fd_to_context(fd);
+	struct rtdm_device *dev = context->device;
+	struct rtdm_driver *drv = dev->driver;
+
+	if (!fd->stale && drv->ops.close)
+		drv->ops.close(fd);
+
+	cleanup_instance(dev, context);
+}
+
+int __rtdm_anon_getfd(const char *name, int flags)
+{
+	return anon_inode_getfd(name, &rtdm_dumb_fops, NULL, flags);
+}
+
+void __rtdm_anon_putfd(int ufd)
+{
+	close_fd(ufd);
+}
+
+static int create_instance(int ufd, struct rtdm_device *dev,
+			   struct rtdm_dev_context **context_ptr)
+{
+	struct rtdm_driver *drv = dev->driver;
+	struct rtdm_dev_context *context;
+
+	/*
+	 * Reset to NULL so that we can always use cleanup_files/instance to
+	 * revert also partially successful allocations.
+	 */
+	*context_ptr = NULL;
+
+	if ((drv->device_flags & RTDM_EXCLUSIVE) != 0 &&
+	    atomic_read(&dev->refcount) > 1)
+		return -EBUSY;
+
+	context = kzalloc(sizeof(struct rtdm_dev_context) +
+			  drv->context_size, GFP_KERNEL);
+	if (unlikely(context == NULL))
+		return -ENOMEM;
+
+	context->device = dev;
+	*context_ptr = context;
+
+	return rtdm_fd_enter(&context->fd, ufd, RTDM_FD_MAGIC, &dev->ops);
+}
+
+#ifdef CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE
+
+static inline struct file *
+open_devnode(struct rtdm_device *dev, const char *path, int oflag)
+{
+	struct file *filp;
+	char *filename;
+
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY) &&
+	    strncmp(path, "/dev/rtdm/", 10))
+		printk(XENO_WARNING
+		       "%s[%d] opens obsolete device path: %s\n",
+		       current->comm, task_pid_nr(current), path);
+
+	filename = kasprintf(GFP_KERNEL, "/dev/rtdm/%s", dev->name);
+	if (filename == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	filp = filp_open(filename, oflag, 0);
+	kfree(filename);
+
+	return filp;
+}
+
+#else /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */
+
+static inline struct file *
+open_devnode(struct rtdm_device *dev, const char *path, int oflag)
+{
+	return filp_open(path, oflag, 0);
+}
+
+#endif /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */
+
+int __rtdm_dev_open(const char *path, int oflag)
+{
+	struct rtdm_dev_context *context;
+	struct rtdm_device *dev;
+	struct file *filp;
+	int ufd, ret;
+
+	secondary_mode_only();
+
+	/*
+	 * CAUTION: we do want a lookup into the registry to happen
+	 * before any attempt is made to open the devnode, so that we
+	 * don't inadvertently open a regular (i.e. non-RTDM) device.
+	 * Reason is that opening, then closing a device - because we
+	 * don't manage it - may incur side-effects we don't want,
+	 * e.g. opening then closing one end of a pipe would cause the
+	 * other side to read the EOF condition.  This is basically
+	 * why we keep a RTDM registry for named devices, so that we
+	 * can figure out whether an open() request is going to be
+	 * valid, without having to open the devnode yet.
+	 */
+	dev = __rtdm_get_namedev(path);
+	if (dev == NULL)
+		return -EADV;
+
+	ufd = get_unused_fd_flags(oflag);
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_fd;
+	}
+
+	filp = open_devnode(dev, path, oflag);
+	if (IS_ERR(filp)) {
+		ret = PTR_ERR(filp);
+		goto fail_fopen;
+	}
+
+	ret = create_instance(ufd, dev, &context);
+	if (ret < 0)
+		goto fail_create;
+
+	context->fd.minor = dev->minor;
+	context->fd.oflags = oflag;
+
+	trace_cobalt_fd_open(current, &context->fd, ufd, oflag);
+
+	if (dev->ops.open) {
+		ret = dev->ops.open(&context->fd, oflag);
+		if (!XENO_ASSERT(COBALT, !spltest()))
+			splnone();
+		if (ret < 0)
+			goto fail_open;
+	}
+
+	ret = rtdm_device_new_fd(&context->fd, ufd, context->device);
+	if (ret < 0)
+		goto fail_open;
+
+	fd_install(ufd, filp);
+
+	return ufd;
+
+fail_open:
+	cleanup_instance(dev, context);
+fail_create:
+	filp_close(filp, current->files);
+fail_fopen:
+	put_unused_fd(ufd);
+fail_fd:
+	__rtdm_put_device(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__rtdm_dev_open);
+
+int __rtdm_dev_socket(int protocol_family, int socket_type,
+		      int protocol)
+{
+	struct rtdm_dev_context *context;
+	struct rtdm_device *dev;
+	int ufd, ret;
+
+	secondary_mode_only();
+
+	dev = __rtdm_get_protodev(protocol_family, socket_type);
+	if (dev == NULL)
+		return -EAFNOSUPPORT;
+
+	ufd = __rtdm_anon_getfd("[rtdm-socket]", O_RDWR);
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_getfd;
+	}
+
+	ret = create_instance(ufd, dev, &context);
+	if (ret < 0)
+		goto fail_create;
+
+	trace_cobalt_fd_socket(current, &context->fd, ufd, protocol_family);
+
+	if (dev->ops.socket) {
+		ret = dev->ops.socket(&context->fd, protocol);
+		if (!XENO_ASSERT(COBALT, !spltest()))
+			splnone();
+		if (ret < 0)
+			goto fail_socket;
+	}
+
+	ret = rtdm_device_new_fd(&context->fd, ufd, context->device);
+	if (ret < 0)
+		goto fail_socket;
+
+	return ufd;
+
+fail_socket:
+	cleanup_instance(dev, context);
+fail_create:
+	close_fd(ufd);
+fail_getfd:
+	__rtdm_put_device(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__rtdm_dev_socket);
+
+int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, unsigned int request,
+			  void __user *arg)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_driver *drv;
+	struct rtdm_device_info dev_info;
+
+	if (fd->magic != RTDM_FD_MAGIC || request != RTIOC_DEVICE_INFO)
+		return -EADV;
+
+	drv = dev->driver;
+	dev_info.device_flags = drv->device_flags;
+	dev_info.device_class = drv->profile_info.class_id;
+	dev_info.device_sub_class = drv->profile_info.subclass_id;
+	dev_info.profile_version = drv->profile_info.version;
+
+	return rtdm_safe_copy_to_user(fd, arg, &dev_info,  sizeof(dev_info));
+}
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @addtogroup rtdm_sync
+ *@{
+ */
+
+/**
+ * @fn void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+ * @brief  Initialize a RTDM wait queue
+ *
+ * Sets up a wait queue structure for further use.
+ *
+ * @param wq waitqueue to initialize.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_waitqueue_init(struct rtdm_waitqueue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+ * @brief  Deletes a RTDM wait queue
+ *
+ * Dismantles a wait queue structure, releasing all resources attached
+ * to it.
+ *
+ * @param wq waitqueue to delete.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq);
+
+/**
+ * @fn rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true or a timeout occurs. The condition is checked each time the
+ * waitqueue @a wq is signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition,
+				nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition)
+ * @brief Sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true. The condition is checked each time the waitqueue @a wq is
+ * signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition);
+
+/**
+ * @fn rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true or a timeout occurs. The condition is checked each time the
+ * waitqueue @a wq is signaled.
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition,
+			 nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn void rtdm_timedwait(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_timedwait(struct rtdm_wait_queue *wq,
+		    nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_timedwait_locked(struct rtdm_wait_queue *wq,
+			   nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition)
+ * @brief Sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true. The condition is checked each time the waitqueue @a wq is
+ * signaled.
+ *
+ * @param wq waitqueue to wait on
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition);
+
+/**
+ * @fn void rtdm_wait(struct rtdm_wait_queue *wq)
+ * @brief Sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_wait(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_wait_locked(struct rtdm_wait_queue *wq)
+ * @brief Sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_wait_locked(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context)
+ * @brief Lock a waitqueue
+ *
+ * Acquires the lock on the waitqueue @a wq.
+ *
+ * @param wq waitqueue to lock.
+ *
+ * @param context name of local variable to store the context in.
+ *
+ * @note Recursive locking might lead to unexpected behavior,
+ * including lock up.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context);
+
+/**
+ * @fn void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context)
+ * @brief Unlock a waitqueue
+ *
+ * Releases the lock on the waitqueue @a wq.
+ *
+ * @param wq waitqueue to unlock.
+ *
+ * @param context name of local variable to retrieve the context from.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context);
+
+/**
+ * @fn void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq)
+ * @brief Signal a waitqueue
+ *
+ * Signals the waitqueue @a wq, waking up a single waiter (if
+ * any).
+ *
+ * @param wq waitqueue to signal.
+ *
+ * @return non-zero if a task has been readied as a result of this
+ * call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq)
+ * @brief Broadcast a waitqueue
+ *
+ * Broadcast the waitqueue @a wq, waking up all waiters. Each
+ * readied task may assume to have received the wake up event.
+ *
+ * @param wq waitqueue to broadcast.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq)
+ * @brief Flush a waitqueue
+ *
+ * Flushes the waitqueue @a wq, unblocking all waiters with an error
+ * status (-EINTR).
+ *
+ * @param wq waitqueue to flush.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter)
+ * @brief Signal a particular waiter on a waitqueue
+ *
+ * Signals the waitqueue @a wq, waking up waiter @a waiter only,
+ * which must be currently sleeping on the waitqueue.
+ *
+ * @param wq waitqueue to signal.
+ *
+ * @param waiter RTDM task to wake up.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter);
+
+/**
+ * @fn rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq)
+ * @brief Simple iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a wq, assigning each RTDM task pointer to the cursor variable
+ * @a pos, which must be of type rtdm_task_t.
+ *
+ * @a wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param wq waitqueue to scan.
+ *
+ * @note The waitqueue should not be signaled, broadcast or flushed
+ * during the traversal, unless the loop is aborted immediately
+ * after. Should multiple waiters be readied while iterating, the safe
+ * form rtdm_for_each_waiter_safe() must be used for traversal
+ * instead.
+ *
+ * @coretags{unrestricted}
+ */
+rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq);
+
+/**
+ * @fn rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq)
+ * @brief Safe iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a wq, assigning each RTDM task pointer to the cursor variable
+ * @a pos, which must be of type rtdm_task_t.
+ *
+ * Unlike with rtdm_for_each_waiter(), the waitqueue may be signaled,
+ * broadcast or flushed during the traversal.
+ *
+ * @a wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param tmp temporary cursor variable.
+ *
+ * @param wq waitqueue to scan.
+ *
+ * @coretags{unrestricted}
+ */
+rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq);
+
+/** @} rtdm_sync */
+
+/**
+ * @defgroup rtdm_interdriver_api Driver to driver services
+ * Inter-driver interface
+ *@{
+ */
+
+/**
+ * @brief Open a device
+ *
+ * Refer to rtdm_open() for parameters and return values
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_open(const char *path, int oflag, ...);
+
+/**
+ * @brief Create a socket
+ *
+ * Refer to rtdm_socket() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_socket(int protocol_family, int socket_type, int protocol);
+
+/**
+ * @brief Close a device or socket
+ *
+ * Refer to rtdm_close() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_close(int fd);
+
+/**
+ * @brief Issue an IOCTL
+ *
+ * Refer to rtdm_ioctl() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_ioctl(int fd, int request, ...);
+
+/**
+ * @brief Read from device
+ *
+ * Refer to rtdm_read() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_read(int fd, void *buf, size_t nbyte);
+
+/**
+ * @brief Write to device
+ *
+ * Refer to rtdm_write() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_write(int fd, const void *buf, size_t nbyte);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recvmsg() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recvfrom() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags,
+		      struct sockaddr *from, socklen_t *fromlen);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recv() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_sendmsg() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_sendto() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags,
+		    const struct sockaddr *to, socklen_t tolen);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_send() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags);
+
+/**
+ * @brief Bind to local address
+ *
+ * Refer to rtdm_bind() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen);
+
+/**
+ * @brief Connect to remote address
+ *
+ * Refer to rtdm_connect() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen);
+
+/**
+ * @brief Listen to incoming connection requests
+ *
+ * Refer to rtdm_listen() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_listen(int fd, int backlog);
+
+/**
+ * @brief Accept a connection request
+ *
+ * Refer to rtdm_accept() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen);
+
+/**
+ * @brief Shut down parts of a connection
+ *
+ * Refer to rtdm_shutdown() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_shutdown(int fd, int how);
+
+/**
+ * @brief Get socket option
+ *
+ * Refer to rtdm_getsockopt() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockopt(int fd, int level, int optname, void *optval,
+		    socklen_t *optlen);
+
+/**
+ * @brief Set socket option
+ *
+ * Refer to rtdm_setsockopt() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_setsockopt(int fd, int level, int optname, const void *optval,
+		    socklen_t optlen);
+
+/**
+ * @brief Get local socket address
+ *
+ * Refer to rtdm_getsockname() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/**
+ * @brief Get socket destination address
+ *
+ * Refer to rtdm_getpeername() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/** @} Inter-driver calls */
+
+/** @} */
+
+/*!
+ * @addtogroup rtdm_user_api
+ * @{
+ */
+
+/**
+ * @brief Open a device
+ *
+ * @param[in] path Device name
+ * @param[in] oflag Open flags
+ * @param ... Further parameters will be ignored.
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_open(const char *path, int oflag, ...);
+
+/**
+ * @brief Create a socket
+ *
+ * @param[in] protocol_family Protocol family (@c PF_xxx)
+ * @param[in] socket_type Socket type (@c SOCK_xxx)
+ * @param[in] protocol Protocol ID, 0 for default
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_socket(int protocol_family, int socket_type, int protocol);
+
+/**
+ * @brief Close a device or socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket()
+ *
+ * @return 0 on success, otherwise a negative error code.
+ *
+ * @note If the matching rtdm_open() or rtdm_socket() call took place in
+ * non-real-time context, rtdm_close() must be issued within non-real-time
+ * as well. Otherwise, the call will fail.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_close(int fd);
+
+/**
+ * @brief Issue an IOCTL
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket()
+ * @param[in] request IOCTL code
+ * @param ... Optional third argument, depending on IOCTL function
+ * (@c void @c * or @c unsigned @c long)
+ *
+ * @return Positiv value on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_ioctl(int fd, int request, ...);
+
+/**
+ * @brief Read from device
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open()
+ * @param[out] buf Input buffer
+ * @param[in] nbyte Number of bytes to read
+ *
+ * @return Number of bytes read, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_read(int fd, void *buf, size_t nbyte);
+
+/**
+ * @brief Write to device
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open()
+ * @param[in] buf Output buffer
+ * @param[in] nbyte Number of bytes to write
+ *
+ * @return Number of bytes written, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_write(int fd, const void *buf, size_t nbyte);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in,out] msg Message descriptor
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ * @param[out] from Buffer for message sender address
+ * @param[in,out] fromlen Address buffer size
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recvfrom() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags,
+		      struct sockaddr *from, socklen_t *fromlen);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recv() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] msg Message descriptor
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ * @param[in] to Buffer for message destination address
+ * @param[in] tolen Address buffer size
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c sendto() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags,
+		    const struct sockaddr *to, socklen_t tolen);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c send() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags);
+
+/**
+ * @brief Bind to local address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] my_addr Address buffer
+ * @param[in] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c bind() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen);
+
+/**
+ * @brief Connect to remote address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] serv_addr Address buffer
+ * @param[in] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c connect() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_connect(int fd, const struct sockaddr *serv_addr,
+		 socklen_t addrlen);
+
+/**
+ * @brief Listen for incomming connection requests
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] backlog Maximum queue length
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c listen() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_listen(int fd, int backlog);
+
+/**
+ * @brief Accept connection requests
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] addr Buffer for remote address
+ * @param[in,out] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c accept() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen);
+
+/**
+ * @brief Shut down parts of a connection
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] how Specifies the part to be shut down (@c SHUT_xxx)
+*
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c shutdown() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_shutdown(int fd, int how);
+
+/**
+ * @brief Get socket option
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] level Addressed stack level
+ * @param[in] optname Option name ID
+ * @param[out] optval Value buffer
+ * @param[in,out] optlen Value buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockopt(int fd, int level, int optname, void *optval,
+		      socklen_t *optlen);
+
+/**
+ * @brief Set socket option
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] level Addressed stack level
+ * @param[in] optname Option name ID
+ * @param[in] optval Value buffer
+ * @param[in] optlen Value buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c setsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_setsockopt(int fd, int level, int optname, const void *optval,
+		    socklen_t optlen);
+
+/**
+ * @brief Get local socket address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] name Address buffer
+ * @param[in,out] namelen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getsockname() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/**
+ * @brief Get socket destination address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] name Address buffer
+ * @param[in,out] namelen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getpeername() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen);
+
+#endif /* DOXYGEN_CPP */
+
+/** @} */
+++ linux-patched/kernel/xenomai/rtdm/drvlib.c	2022-03-21 12:58:29.107891711 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Real-Time Driver Model for Xenomai, driver library
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mman.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <linux/highmem.h>
+#include <linux/err.h>
+#include <linux/anon_inodes.h>
+#include <rtdm/driver.h>
+#include <rtdm/compat.h>
+#include <pipeline/inband_work.h>
+#include "internal.h"
+#include <trace/events/cobalt-rtdm.h>
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_clock Clock Services
+ * @{
+ */
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @brief Get system time
+ *
+ * @return The system time in nanoseconds is returned
+ *
+ * @note The resolution of this service depends on the system timer. In
+ * particular, if the system timer is running in periodic mode, the return
+ * value will be limited to multiples of the timer tick period.
+ *
+ * @note The system timer may have to be started to obtain valid results.
+ * Whether this happens automatically (as on Xenomai) or is controlled by the
+ * application depends on the RTDM host environment.
+ *
+ * @coretags{unrestricted}
+ */
+nanosecs_abs_t rtdm_clock_read(void);
+
+/**
+ * @brief Get monotonic time
+ *
+ * @return The monotonic time in nanoseconds is returned
+ *
+ * @note The resolution of this service depends on the system timer. In
+ * particular, if the system timer is running in periodic mode, the return
+ * value will be limited to multiples of the timer tick period.
+ *
+ * @note The system timer may have to be started to obtain valid results.
+ * Whether this happens automatically (as on Xenomai) or is controlled by the
+ * application depends on the RTDM host environment.
+ *
+ * @coretags{unrestricted}
+ */
+nanosecs_abs_t rtdm_clock_read_monotonic(void);
+#endif /* DOXYGEN_CPP */
+/** @} */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_task Task Services
+ * @{
+ */
+
+/**
+ * @brief Initialise and start a real-time task
+ *
+ * After initialising a task, the task handle remains valid and can be
+ * passed to RTDM services until either rtdm_task_destroy() or
+ * rtdm_task_join() was invoked.
+ *
+ * @param[in,out] task Task handle
+ * @param[in] name Optional task name
+ * @param[in] task_proc Procedure to be executed by the task
+ * @param[in] arg Custom argument passed to @c task_proc() on entry
+ * @param[in] priority Priority of the task, see also
+ * @ref rtdmtaskprio "Task Priority Range"
+ * @param[in] period Period in nanoseconds of a cyclic task, 0 for non-cyclic
+ * mode. Waiting for the first and subsequent periodic events is
+ * done using rtdm_task_wait_period().
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period)
+{
+	union xnsched_policy_param param;
+	struct xnthread_start_attr sattr;
+	struct xnthread_init_attr iattr;
+	int err;
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	iattr.name = name;
+	iattr.flags = 0;
+	iattr.personality = &xenomai_personality;
+	iattr.affinity = CPU_MASK_ALL;
+	param.rt.prio = priority;
+
+	err = xnthread_init(task, &iattr, &xnsched_class_rt, &param);
+	if (err)
+		return err;
+
+	/* We need an anonymous registry entry to obtain a handle for fast
+	   mutex locking. */
+	err = xnthread_register(task, "");
+	if (err)
+		goto cleanup_out;
+
+	if (period > 0) {
+		err = xnthread_set_periodic(task, XN_INFINITE,
+					    XN_RELATIVE, period);
+		if (err)
+			goto cleanup_out;
+	}
+
+	sattr.mode = 0;
+	sattr.entry = task_proc;
+	sattr.cookie = arg;
+	err = xnthread_start(task, &sattr);
+	if (err)
+		goto cleanup_out;
+
+	return 0;
+
+      cleanup_out:
+	xnthread_cancel(task);
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_init);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Destroy a real-time task
+ *
+ * This call sends a termination request to @a task, then waits for it
+ * to exit. All RTDM task should check for pending termination
+ * requests by calling rtdm_task_should_stop() from their work loop.
+ *
+ * If @a task is current, rtdm_task_destroy() terminates the current
+ * context, and does not return to the caller.
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ *
+ * @note Passing the same task handle to RTDM services after the completion of
+ * this function is not allowed.
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+void rtdm_task_destroy(rtdm_task_t *task);
+
+/**
+ * @brief Check for pending termination request
+ *
+ * Check whether a termination request was received by the current
+ * RTDM task. Termination requests are sent by calling
+ * rtdm_task_destroy().
+ *
+ * @return Non-zero indicates that a termination request is pending,
+ * in which case the caller should wrap up and exit.
+ *
+ * @coretags{rtdm-task, might-switch}
+ */
+int rtdm_task_should_stop(void);
+
+/**
+ * @brief Adjust real-time task priority
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ * @param[in] priority New priority of the task, see also
+ * @ref rtdmtaskprio "Task Priority Range"
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_task_set_priority(rtdm_task_t *task, int priority);
+
+/**
+ * @brief Adjust real-time task period
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init(), or
+ * NULL for referring to the current RTDM task or Cobalt thread.
+ *
+ * @param[in] start_date The initial (absolute) date of the first
+ * release point, expressed in nanoseconds.  @a task will be delayed
+ * by the first call to rtdm_task_wait_period() until this point is
+ * reached. If @a start_date is zero, the first release point is set
+ * to @a period nanoseconds after the current date.
+
+ * @param[in] period New period in nanoseconds of a cyclic task, zero
+ * to disable cyclic mode for @a task.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_task_set_period(rtdm_task_t *task, nanosecs_abs_t start_date,
+			 nanosecs_rel_t period);
+
+/**
+ * @brief Wait on next real-time task period
+ *
+ * @param[in] overruns_r Address of a long word receiving the count of
+ * overruns if -ETIMEDOUT is returned, or NULL if the caller don't
+ * need that information.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if calling task is not in periodic mode.
+ *
+ * - -ETIMEDOUT is returned if a timer overrun occurred, which indicates
+ * that a previous release point has been missed by the calling task.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_wait_period(unsigned long *overruns_r);
+
+/**
+ * @brief Activate a blocked real-time task
+ *
+ * @return Non-zero is returned if the task was actually unblocked from a
+ * pending wait state, 0 otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+int rtdm_task_unblock(rtdm_task_t *task);
+
+/**
+ * @brief Get current real-time task
+ *
+ * @return Pointer to task handle
+ *
+ * @coretags{mode-unrestricted}
+ */
+rtdm_task_t *rtdm_task_current(void);
+
+/**
+ * @brief Sleep a specified amount of time
+ *
+ * @param[in] delay Delay in nanoseconds, see @ref RTDM_TIMEOUT_xxx for
+ * special values.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep(nanosecs_rel_t delay);
+
+/**
+ * @brief Sleep until a specified absolute time
+ *
+ * @deprecated Use rtdm_task_sleep_abs instead!
+ *
+ * @param[in] wakeup_time Absolute timeout in nanoseconds
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep_until(nanosecs_abs_t wakeup_time);
+
+/**
+ * @brief Sleep until a specified absolute time
+ *
+ * @param[in] wakeup_time Absolute timeout in nanoseconds
+ * @param[in] mode Selects the timer mode, see RTDM_TIMERMODE_xxx for details
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum rtdm_timer_mode mode);
+
+/**
+ * @brief Safe busy waiting
+ *
+ * This service alternates active spinning and sleeping within a wait
+ * loop, until a condition is satisfied. While sleeping, a task is
+ * scheduled out and does not consume any CPU time.
+ *
+ * rtdm_task_busy_wait() is particularly useful for waiting for a
+ * state change reading an I/O register, which usually happens shortly
+ * after the wait starts, without incurring the adverse effects of
+ * long busy waiting if it doesn't.
+ *
+ * @param[in] condition The C expression to be tested for detecting
+ * completion.
+ * @param[in] spin_ns The time to spin on @a condition before
+ * sleeping, expressed as a count of nanoseconds.
+ * @param[in] sleep_ns The time to sleep for before spinning again,
+ * expressed as a count of nanoseconds.
+ *
+ * @return 0 on success if @a condition is satisfied, otherwise:
+ *
+ * - -EINTR is returned if the calling task has been unblocked by a
+ * Linux signal or explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_busy_wait(bool condition, nanosecs_rel_t spin_ns,
+			nanosecs_rel_t sleep_ns);
+
+/**
+ * @brief Register wait context
+ *
+ * rtdm_wait_prepare() registers a wait context structure for the
+ * caller, which can be later retrieved by a call to
+ * rtdm_wait_get_context(). This call is normally issued before the
+ * current task blocks on a wait object, waiting for some (producer)
+ * code to wake it up. Arbitrary data can be exchanged between both
+ * sites via the wait context structure, which is allocated by the
+ * waiter (consumer) side.
+ *
+ * @a wc is the address of an anchor object which is commonly embedded
+ * into a larger structure with arbitrary contents, which needs to be
+ * shared between the consumer (waiter) and the producer for
+ * implementing the wait code.
+ *
+ * A typical implementation pattern for the wait side is:
+ *
+ * @code
+ * struct rtdm_waitqueue wq;
+ * struct some_wait_context {
+ *    int input_value;
+ *    int output_value;
+ *    struct rtdm_wait_context wc;
+ * } wait_context;
+ *
+ * wait_context.input_value = 42;
+ * rtdm_wait_prepare(&wait_context);
+ * ret = rtdm_wait_condition(&wq, rtdm_wait_is_completed(&wait_context));
+ * if (ret)
+ *     goto wait_failed;
+ * handle_event(wait_context.output_value);
+ * @endcode
+ *
+ * On the producer side, the implementation would look like:
+ *
+ * @code
+ * struct rtdm_waitqueue wq;
+ * struct some_wait_context {
+ *    int input_value;
+ *    int output_value;
+ *    struct rtdm_wait_context wc;
+ * } *wait_context_ptr;
+ * struct rtdm_wait_context *wc;
+ * rtdm_task_t *task;
+ *
+ * rtdm_for_each_waiter(task, &wq) {
+ *    wc = rtdm_wait_get_context(task);
+ *    wait_context_ptr = container_of(wc, struct some_wait_context, wc);
+ *    wait_context_ptr->output_value = 12;
+ * }
+ * rtdm_waitqueue_broadcast(&wq);
+ * @endcode
+ *
+ * @param wc Wait context to register.
+ */
+void rtdm_wait_prepare(struct rtdm_wait_context *wc);
+
+/**
+ * @brief Mark completion for a wait context
+ *
+ * rtdm_complete_wait() marks a wait context as completed, so that
+ * rtdm_wait_is_completed() returns true for such context.
+ *
+ * @param wc Wait context to complete.
+ */
+void rtdm_wait_complete(struct rtdm_wait_context *wc);
+
+/**
+ * @brief Test completion of a wait context
+ *
+ * rtdm_wait_is_completed() returns true if rtdm_complete_wait() was
+ * called for @a wc. The completion mark is reset each time
+ * rtdm_wait_prepare() is called for a wait context.
+ *
+ * @param wc Wait context to check for completion.
+ *
+ * @return non-zero/true if rtdm_wait_complete() was called for @a wc,
+ * zero otherwise.
+ */
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc);
+
+#endif /* DOXYGEN_CPP */
+
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode)
+{
+	struct xnthread *thread;
+
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+
+	thread = xnthread_current();
+	xnthread_suspend(thread, XNDELAY, timeout, mode, NULL);
+
+	return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtdm_task_sleep);
+
+/**
+ * @brief Wait on a real-time task to terminate
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ *
+ * @note Passing the same task handle to RTDM services after the
+ * completion of this function is not allowed.
+ *
+ * @note This service does not trigger the termination of the targeted
+ * task.  The user has to take of this, otherwise rtdm_task_join()
+ * will never return.
+ *
+ * @coretags{mode-unrestricted}
+ */
+void rtdm_task_join(rtdm_task_t *task)
+{
+	trace_cobalt_driver_task_join(task);
+
+	xnthread_join(task, true);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_join);
+
+/**
+ * @brief Busy-wait a specified amount of time
+ *
+ * This service does not schedule out the caller, but rather spins in
+ * a tight loop, burning CPU cycles until the timeout elapses.
+ *
+ * @param[in] delay Delay in nanoseconds. Note that a zero delay does @b not
+ * have the meaning of @c RTDM_TIMEOUT_INFINITE here.
+ *
+ * @note The caller must not be migratable to different CPUs while executing
+ * this service. Otherwise, the actual delay will be undefined.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_task_busy_sleep(nanosecs_rel_t delay)
+{
+	xnticks_t wakeup;
+
+	wakeup = xnclock_read_raw(&nkclock) +
+		xnclock_ns_to_ticks(&nkclock, delay);
+
+	while ((xnsticks_t)(xnclock_read_raw(&nkclock) - wakeup) < 0)
+		cpu_relax();
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_busy_sleep);
+/** @} */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_timer Timer Services
+ * @{
+ */
+
+/**
+ * @brief Initialise a timer
+ *
+ * @param[in,out] timer Timer handle
+ * @param[in] handler Handler to be called on timer expiry
+ * @param[in] name Optional timer name
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name)
+{
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	xntimer_init((timer), &nkclock, handler, NULL, XNTIMER_IGRAVITY);
+	xntimer_set_name((timer), (name));
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_init);
+
+/**
+ * @brief Destroy a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_timer_destroy(rtdm_timer_t *timer)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_destroy(timer);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_destroy);
+
+/**
+ * @brief Start a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ * @param[in] expiry Firing time of the timer, @c mode defines if relative or
+ * absolute
+ * @param[in] interval Relative reload value, > 0 if the timer shall work in
+ * periodic mode with the specific interval, 0 for one-shot timers
+ * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for
+ * possible values
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if @c expiry describes an absolute date in
+ * the past. In such an event, the timer is nevertheless armed for the
+ * next shot in the timeline if @a interval is non-zero.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode)
+{
+	spl_t s;
+	int err;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_start);
+
+/**
+ * @brief Stop a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_timer_stop(rtdm_timer_t *timer)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_stop(timer);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_stop);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Start a timer from inside a timer handler
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ * @param[in] expiry Firing time of the timer, @c mode defines if relative or
+ * absolute
+ * @param[in] interval Relative reload value, > 0 if the timer shall work in
+ * periodic mode with the specific interval, 0 for one-shot timers
+ * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for
+ * possible values
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if @c expiry describes an absolute date in the
+ * past.
+ *
+ * @coretags{coreirq-only}
+ */
+int rtdm_timer_start_in_handler(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+				nanosecs_rel_t interval,
+				enum rtdm_timer_mode mode);
+
+/**
+ * @brief Stop a timer from inside a timer handler
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{coreirq-only}
+ */
+void rtdm_timer_stop_in_handler(rtdm_timer_t *timer);
+#endif /* DOXYGEN_CPP */
+/** @} */
+
+/* --- IPC cleanup helper --- */
+
+#define RTDM_SYNCH_DELETED          XNSYNCH_SPARE0
+
+void __rtdm_synch_flush(struct xnsynch *synch, unsigned long reason)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (reason == XNRMID)
+		xnsynch_set_status(synch, RTDM_SYNCH_DELETED);
+
+	if (likely(xnsynch_flush(synch, reason) == XNSYNCH_RESCHED))
+		xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(__rtdm_synch_flush);
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_sync Synchronisation Services
+ * @{
+ */
+
+/*!
+ * @name Timeout Sequence Management
+ * @{
+ */
+
+/**
+ * @brief Initialise a timeout sequence
+ *
+ * This service initialises a timeout sequence handle according to the given
+ * timeout value. Timeout sequences allow to maintain a continuous @a timeout
+ * across multiple calls of blocking synchronisation services. A typical
+ * application scenario is given below.
+ *
+ * @param[in,out] timeout_seq Timeout sequence handle
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ *
+ * Application Scenario:
+ * @code
+int device_service_routine(...)
+{
+	rtdm_toseq_t timeout_seq;
+	...
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+	...
+	while (received < requested) {
+		ret = rtdm_event_timedwait(&data_available, timeout, &timeout_seq);
+		if (ret < 0) // including -ETIMEDOUT
+			break;
+
+		// receive some data
+		...
+	}
+	...
+}
+ * @endcode
+ * Using a timeout sequence in such a scenario avoids that the user-provided
+ * relative @c timeout is restarted on every call to rtdm_event_timedwait(),
+ * potentially causing an overall delay that is larger than specified by
+ * @c timeout. Moreover, all functions supporting timeout sequences also
+ * interpret special timeout values (infinite and non-blocking),
+ * disburdening the driver developer from handling them separately.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout)
+{
+	XENO_WARN_ON(COBALT, xnsched_unblockable_p()); /* only warn here */
+
+	*timeout_seq = xnclock_read_monotonic(&nkclock) + timeout;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_toseq_init);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_event Event Services
+ * @{
+ */
+
+/**
+ * @brief Initialise an event
+ *
+ * @param[in,out] event Event handle
+ * @param[in] pending Non-zero if event shall be initialised as set, 0 otherwise
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending)
+{
+	spl_t s;
+
+	trace_cobalt_driver_event_init(event, pending);
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_init(&event->synch_base, XNSYNCH_PRIO, NULL);
+	if (pending)
+		xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING);
+	xnselect_init(&event->select_block);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_init);
+
+/**
+ * @brief Destroy an event
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_event_destroy(rtdm_event_t *event)
+{
+	trace_cobalt_driver_event_destroy(event);
+	if (realtime_core_enabled()) {
+		__rtdm_synch_flush(&event->synch_base, XNRMID);
+		xnselect_destroy(&event->select_block);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdm_event_destroy);
+
+/**
+ * @brief Signal an event occurrence to currently listening waiters
+ *
+ * This function wakes up all current waiters of the given event, but it does
+ * not change the event state. Subsequently callers of rtdm_event_wait() or
+ * rtdm_event_timedwait() will therefore be blocked first.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_event_pulse(rtdm_event_t *event)
+{
+	trace_cobalt_driver_event_pulse(event);
+	__rtdm_synch_flush(&event->synch_base, 0);
+}
+EXPORT_SYMBOL_GPL(rtdm_event_pulse);
+
+/**
+ * @brief Signal an event occurrence
+ *
+ * This function sets the given event and wakes up all current waiters. If no
+ * waiter is presently registered, the next call to rtdm_event_wait() or
+ * rtdm_event_timedwait() will return immediately.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_event_signal(rtdm_event_t *event)
+{
+	int resched = 0;
+	spl_t s;
+
+	trace_cobalt_driver_event_signal(event);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING);
+	if (xnsynch_flush(&event->synch_base, 0))
+		resched = 1;
+	if (xnselect_signal(&event->select_block, 1))
+		resched = 1;
+	if (resched)
+		xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_signal);
+
+/**
+ * @brief Wait on event occurrence
+ *
+ * This is the light-weight version of rtdm_event_timedwait(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a event has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_event_wait(rtdm_event_t *event)
+{
+	return rtdm_event_timedwait(event, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_wait);
+
+/**
+ * @brief Wait on event occurrence with timeout
+ *
+ * This function waits or tests for the occurence of the given event, taking
+ * the provided timeout into account. On successful return, the event is
+ * reset.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a event has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * - -EWOULDBLOCK is returned if a negative @a timeout (i.e., non-blocking
+ * operation) has been specified.
+ *
+ * @coretags{primary-timed, might-switch}
+ */
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *thread;
+	int err = 0, ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p()))
+		return -EPERM;
+
+	trace_cobalt_driver_event_wait(event, xnthread_current());
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(event->synch_base.status & RTDM_SYNCH_DELETED))
+		err = -EIDRM;
+	else if (likely(event->synch_base.status & RTDM_EVENT_PENDING)) {
+		xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING);
+		xnselect_signal(&event->select_block, 0);
+	} else {
+		/* non-blocking mode */
+		if (timeout < 0) {
+			err = -EWOULDBLOCK;
+			goto unlock_out;
+		}
+
+		thread = xnthread_current();
+
+		if (timeout_seq && (timeout > 0))
+			/* timeout sequence */
+			ret = xnsynch_sleep_on(&event->synch_base, *timeout_seq,
+					       XN_ABSOLUTE);
+		else
+			/* infinite or relative timeout */
+			ret = xnsynch_sleep_on(&event->synch_base, timeout, XN_RELATIVE);
+
+		if (likely(ret == 0)) {
+			xnsynch_clear_status(&event->synch_base,
+					    RTDM_EVENT_PENDING);
+			xnselect_signal(&event->select_block, 0);
+		} else if (ret & XNTIMEO)
+			err = -ETIMEDOUT;
+		else if (ret & XNRMID)
+			err = -EIDRM;
+		else /* XNBREAK */
+			err = -EINTR;
+	}
+
+unlock_out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_timedwait);
+
+/**
+ * @brief Clear event state
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_event_clear(rtdm_event_t *event)
+{
+	spl_t s;
+
+	trace_cobalt_driver_event_clear(event);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING);
+	xnselect_signal(&event->select_block, 0);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_clear);
+
+/**
+ * @brief Bind a selector to an event
+ *
+ * This functions binds the given selector to an event so that the former is
+ * notified when the event state changes. Typically the select binding handler
+ * will invoke this service.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ * @param[in,out] selector Selector as passed to the select binding handler
+ * @param[in] type Type of the bound event as passed to the select binding handler
+ * @param[in] fd_index File descriptor index as passed to the select binding
+ * handler
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ENOMEM is returned if there is insufficient memory to establish the
+ * dynamic binding.
+ *
+ * - -EINVAL is returned if @a type or @a fd_index are invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned int fd_index)
+{
+	struct xnselect_binding *binding;
+	int err;
+	spl_t s;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (!binding)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xnselect_bind(&event->select_block,
+			    binding, selector, type, fd_index,
+			    event->synch_base.status & (RTDM_SYNCH_DELETED |
+						       RTDM_EVENT_PENDING));
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err)
+		xnfree(binding);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_event_select);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_sem Semaphore Services
+ * @{
+ */
+
+/**
+ * @brief Initialise a semaphore
+ *
+ * @param[in,out] sem Semaphore handle
+ * @param[in] value Initial value of the semaphore
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value)
+{
+	spl_t s;
+
+	trace_cobalt_driver_sem_init(sem, value);
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+
+	sem->value = value;
+	xnsynch_init(&sem->synch_base, XNSYNCH_PRIO, NULL);
+	xnselect_init(&sem->select_block);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_init);
+
+/**
+ * @brief Destroy a semaphore
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_sem_destroy(rtdm_sem_t *sem)
+{
+	trace_cobalt_driver_sem_destroy(sem);
+	if (realtime_core_enabled()) {
+		__rtdm_synch_flush(&sem->synch_base, XNRMID);
+		xnselect_destroy(&sem->select_block);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdm_sem_destroy);
+
+/**
+ * @brief Decrement a semaphore
+ *
+ * This is the light-weight version of rtdm_sem_timeddown(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a sem has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_sem_down(rtdm_sem_t *sem)
+{
+	return rtdm_sem_timeddown(sem, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_down);
+
+/**
+ * @brief Decrement a semaphore with timeout
+ *
+ * This function tries to decrement the given semphore's value if it is
+ * positive on entry. If not, the caller is blocked unless non-blocking
+ * operation was selected.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore
+ * value is currently not positive.
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a sem has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-timed, might-switch}
+ */
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *thread;
+	int err = 0, ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p()))
+		return -EPERM;
+
+	trace_cobalt_driver_sem_wait(sem, xnthread_current());
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(sem->synch_base.status & RTDM_SYNCH_DELETED))
+		err = -EIDRM;
+	else if (sem->value > 0) {
+		if(!--sem->value)
+			xnselect_signal(&sem->select_block, 0);
+	} else if (timeout < 0) /* non-blocking mode */
+		err = -EWOULDBLOCK;
+	else {
+		thread = xnthread_current();
+
+		if (timeout_seq && timeout > 0)
+			/* timeout sequence */
+			ret = xnsynch_sleep_on(&sem->synch_base, *timeout_seq,
+					       XN_ABSOLUTE);
+		else
+			/* infinite or relative timeout */
+			ret = xnsynch_sleep_on(&sem->synch_base, timeout, XN_RELATIVE);
+
+		if (ret) {
+			if (ret & XNTIMEO)
+				err = -ETIMEDOUT;
+			else if (ret & XNRMID)
+				err = -EIDRM;
+			else /* XNBREAK */
+				err = -EINTR;
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_timeddown);
+
+/**
+ * @brief Increment a semaphore
+ *
+ * This function increments the given semphore's value, waking up a potential
+ * waiter which was blocked upon rtdm_sem_down().
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_sem_up(rtdm_sem_t *sem)
+{
+	spl_t s;
+
+	trace_cobalt_driver_sem_up(sem);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnsynch_wakeup_one_sleeper(&sem->synch_base))
+		xnsched_run();
+	else
+		if (sem->value++ == 0
+		    && xnselect_signal(&sem->select_block, 1))
+			xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_up);
+
+/**
+ * @brief Bind a selector to a semaphore
+ *
+ * This functions binds the given selector to the semaphore so that the former
+ * is notified when the semaphore state changes. Typically the select binding
+ * handler will invoke this service.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ * @param[in,out] selector Selector as passed to the select binding handler
+ * @param[in] type Type of the bound event as passed to the select binding handler
+ * @param[in] fd_index File descriptor index as passed to the select binding
+ * handler
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ENOMEM is returned if there is insufficient memory to establish the
+ * dynamic binding.
+ *
+ * - -EINVAL is returned if @a type or @a fd_index are invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned int fd_index)
+{
+	struct xnselect_binding *binding;
+	int err;
+	spl_t s;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (!binding)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xnselect_bind(&sem->select_block, binding, selector,
+			    type, fd_index,
+			    (sem->value > 0) ||
+			    sem->synch_base.status & RTDM_SYNCH_DELETED);
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err)
+		xnfree(binding);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_sem_select);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_mutex Mutex services
+ * @{
+ */
+
+/**
+ * @brief Initialise a mutex
+ *
+ * This function initalises a basic mutex with priority inversion protection.
+ * "Basic", as it does not allow a mutex owner to recursively lock the same
+ * mutex again.
+ *
+ * @param[in,out] mutex Mutex handle
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_mutex_init(rtdm_mutex_t *mutex)
+{
+	spl_t s;
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+	xnsynch_init(&mutex->synch_base, XNSYNCH_PI, &mutex->fastlock);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_init);
+
+/**
+ * @brief Destroy a mutex
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
+{
+	trace_cobalt_driver_mutex_destroy(mutex);
+
+	if (realtime_core_enabled())
+		__rtdm_synch_flush(&mutex->synch_base, XNRMID);
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_destroy);
+
+/**
+ * @brief Release a mutex
+ *
+ * This function releases the given mutex, waking up a potential waiter which
+ * was blocked upon rtdm_mutex_lock() or rtdm_mutex_timedlock().
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return;
+
+	trace_cobalt_driver_mutex_release(mutex);
+
+	if (unlikely(xnsynch_release(&mutex->synch_base,
+				     xnsched_current_thread())))
+		xnsched_run();
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_unlock);
+
+/**
+ * @brief Request a mutex
+ *
+ * This is the light-weight version of rtdm_mutex_timedlock(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EIDRM is returned if @a mutex has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_mutex_lock(rtdm_mutex_t *mutex)
+{
+	return rtdm_mutex_timedlock(mutex, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_mutex_lock);
+
+/**
+ * @brief Request a mutex with timeout
+ *
+ * This function tries to acquire the given mutex. If it is not available, the
+ * caller is blocked unless non-blocking operation was selected.
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore
+ * value is currently not positive.
+ *
+ * - -EIDRM is returned if @a mutex has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *curr;
+	int ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+
+	curr = xnthread_current();
+	trace_cobalt_driver_mutex_wait(mutex, curr);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED)) {
+		ret = -EIDRM;
+		goto out;
+	}
+
+	ret = xnsynch_try_acquire(&mutex->synch_base);
+	if (ret != -EBUSY)
+		goto out;
+
+	if (timeout < 0) {
+		ret = -EWOULDBLOCK;
+		goto out;
+	}
+
+	for (;;) {
+		if (timeout_seq && timeout > 0) /* timeout sequence */
+			ret = xnsynch_acquire(&mutex->synch_base, *timeout_seq,
+					      XN_ABSOLUTE);
+		else		/* infinite or relative timeout */
+			ret = xnsynch_acquire(&mutex->synch_base, timeout,
+					      XN_RELATIVE);
+		if (ret == 0)
+			break;
+		if (ret & XNBREAK)
+			continue;
+		ret = ret & XNTIMEO ? -ETIMEDOUT : -EIDRM;
+		break;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_mutex_timedlock);
+/** @} */
+
+/** @} Synchronisation services */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_irq Interrupt Management Services
+ * @{
+ */
+
+/**
+ * @brief Register an interrupt handler
+ *
+ * This function registers the provided handler with an IRQ line and enables
+ * the line.
+ *
+ * @param[in,out] irq_handle IRQ handle
+ * @param[in] irq_no Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details
+ * @param[in] device_name Device name to show up in real-time IRQ lists
+ * @param[in] arg Pointer to be passed to the interrupt handler on invocation
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * - -EBUSY is returned if the specified IRQ line is already in use.
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg)
+{
+	return rtdm_irq_request_affine(irq_handle, irq_no, handler, flags,
+				       device_name, arg, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_irq_request);
+
+/**
+ * @brief Register an interrupt handler
+ *
+ * This function registers the provided handler with an IRQ line and enables
+ * the line.
+ *
+ * @param[in,out] irq_handle IRQ handle
+ * @param[in] irq_no Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details
+ * @param[in] device_name Device name to show up in real-time IRQ lists
+ * @param[in] arg Pointer to be passed to the interrupt handler on invocation
+ * @param[in] cpumask CPU affinity of the interrupt
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * - -EBUSY is returned if the specified IRQ line is already in use.
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask)
+{
+	int err;
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	err = xnintr_init(irq_handle, device_name, irq_no, handler, NULL, flags);
+	if (err)
+		return err;
+
+	err = xnintr_attach(irq_handle, arg, cpumask);
+	if (err) {
+		xnintr_destroy(irq_handle);
+		return err;
+	}
+
+	xnintr_enable(irq_handle);
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_irq_request_affine);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Release an interrupt handler
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note The caller is responsible for shutting down the IRQ source at device
+ * level before invoking this service. In turn, rtdm_irq_free ensures that any
+ * pending event on the given IRQ line is fully processed on return from this
+ * service.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_free(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Enable interrupt line
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note This service is for exceptional use only. Drivers should
+ * always prefer interrupt masking at device level (via corresponding
+ * control registers etc.)  over masking at line level. Keep in mind
+ * that the latter is incompatible with IRQ line sharing and can also
+ * be more costly as interrupt controller access requires broader
+ * synchronization. Also, such service is solely available from
+ * secondary mode. The caller is responsible for excluding such
+ * conflicts.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_enable(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Disable interrupt line
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note This service is for exceptional use only. Drivers should
+ * always prefer interrupt masking at device level (via corresponding
+ * control registers etc.)  over masking at line level. Keep in mind
+ * that the latter is incompatible with IRQ line sharing and can also
+ * be more costly as interrupt controller access requires broader
+ * synchronization.  Also, such service is solely available from
+ * secondary mode.  The caller is responsible for excluding such
+ * conflicts.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_disable(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Set interrupt affinity
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @param[in] cpumask The new CPU affinity of the interrupt
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, const cpumask_t *cpumask);
+#endif /* DOXYGEN_CPP */
+
+/** @} Interrupt Management Services */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_nrtsignal Non-Real-Time Signalling Services
+ *
+ * These services provide a mechanism to request the execution of a specified
+ * handler in non-real-time context. The triggering can safely be performed in
+ * real-time context without suffering from unknown delays. The handler
+ * execution will be deferred until the next time the real-time subsystem
+ * releases the CPU to the non-real-time part.
+ * @{
+ */
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @brief Register a non-real-time signal handler
+ *
+ * @param[in,out] nrt_sig Signal handle
+ * @param[in] handler Non-real-time signal handler
+ * @param[in] arg Custom argument passed to @c handler() on each invocation
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EAGAIN is returned if no free signal slot is available.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, rtdm_nrtsig_handler_t handler,
+		     void *arg);
+
+/**
+ * @brief Release a non-realtime signal handler
+ *
+ * @param[in,out] nrt_sig Signal handle
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig);
+#endif /* DOXYGEN_CPP */
+
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work)
+{
+	struct rtdm_nrtsig *nrt_sig;
+
+	nrt_sig = container_of(inband_work, typeof(*nrt_sig), inband_work);
+	nrt_sig->handler(nrt_sig, nrt_sig->arg);
+}
+EXPORT_SYMBOL_GPL(__rtdm_nrtsig_execute);
+
+/**
+ * Trigger non-real-time signal
+ *
+ * @param[in,out] nrt_sig Signal handle
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
+{
+	pipeline_post_inband_work(nrt_sig);
+}
+EXPORT_SYMBOL_GPL(rtdm_nrtsig_pend);
+
+static LIST_HEAD(nrt_work_list);
+DEFINE_PRIVATE_XNLOCK(nrt_work_lock);
+
+static void lostage_schedule_work(struct pipeline_inband_work *inband_work)
+{
+	struct work_struct *lostage_work;
+	spl_t s;
+
+	xnlock_get_irqsave(&nrt_work_lock, s);
+
+	while (!list_empty(&nrt_work_list)) {
+		lostage_work = list_first_entry(&nrt_work_list,
+						struct work_struct, entry);
+		list_del_init(&lostage_work->entry);
+
+		xnlock_put_irqrestore(&nrt_work_lock, s);
+
+		schedule_work(lostage_work);
+
+		xnlock_get_irqsave(&nrt_work_lock, s);
+	}
+
+	xnlock_put_irqrestore(&nrt_work_lock, s);
+}
+
+static struct lostage_trigger_work {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+} nrt_work =  {
+	.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(nrt_work,
+							lostage_schedule_work),
+};
+
+/**
+ * Put a work task in Linux non real-time global workqueue from primary mode.
+ *
+ * @param lostage_work
+ */
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work)
+{
+	spl_t s;
+
+	if (is_secondary_domain()) {
+		schedule_work(lostage_work);
+		return;
+	}
+
+	xnlock_get_irqsave(&nrt_work_lock, s);
+
+	list_add_tail(&lostage_work->entry, &nrt_work_list);
+	pipeline_post_inband_work(&nrt_work);
+
+	xnlock_put_irqrestore(&nrt_work_lock, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_schedule_nrt_work);
+
+/** @} Non-Real-Time Signalling Services */
+
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_util Utility Services
+ * @{
+ */
+
+struct mmap_tramp_data {
+	struct rtdm_fd *fd;
+	struct file_operations *fops;
+	int (*mmap_handler)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+};
+
+struct mmap_helper_data {
+	void *src_vaddr;
+	phys_addr_t src_paddr;
+	struct vm_operations_struct *vm_ops;
+	void *vm_private_data;
+	struct mmap_tramp_data tramp_data;
+};
+
+static int mmap_kmem_helper(struct vm_area_struct *vma, void *va)
+{
+	unsigned long addr, len, pfn, to;
+	int ret = 0;
+
+	to = (unsigned long)va;
+	addr = vma->vm_start;
+	len = vma->vm_end - vma->vm_start;
+
+	if (to != PAGE_ALIGN(to) || (len & ~PAGE_MASK) != 0)
+		return -EINVAL;
+
+#ifndef CONFIG_MMU
+	pfn = __pa(to) >> PAGE_SHIFT;
+	ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED);
+#else
+	if (to < VMALLOC_START || to >= VMALLOC_END) {
+		/* logical address. */
+		pfn = __pa(to) >> PAGE_SHIFT;
+		ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED);
+		if (ret)
+			return ret;
+	} else {
+		/* vmalloc memory. */
+		while (len > 0) {
+			struct page *page = vmalloc_to_page((void *)to);
+			if (vm_insert_page(vma, addr, page))
+				return -EAGAIN;
+			addr += PAGE_SIZE;
+			to += PAGE_SIZE;
+			len -= PAGE_SIZE;
+		}
+	}
+
+	if (cobalt_machine.prefault)
+		cobalt_machine.prefault(vma);
+#endif
+
+	return ret;
+}
+
+static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa)
+{
+	pgprot_t prot = PAGE_SHARED;
+	unsigned long len;
+
+	len = vma->vm_end - vma->vm_start;
+#ifndef CONFIG_MMU
+	vma->vm_pgoff = pa >> PAGE_SHIFT;
+#endif /* CONFIG_MMU */
+
+#ifdef __HAVE_PHYS_MEM_ACCESS_PROT
+	if (vma->vm_file)
+		prot = phys_mem_access_prot(vma->vm_file, pa >> PAGE_SHIFT,
+					    len, prot);
+#endif
+	vma->vm_page_prot = pgprot_noncached(prot);
+
+	return remap_pfn_range(vma, vma->vm_start, pa >> PAGE_SHIFT,
+			       len, vma->vm_page_prot);
+}
+
+static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct mmap_tramp_data *tramp_data = vma->vm_private_data;
+	struct mmap_helper_data *helper_data;
+	int ret;
+
+	helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
+	vma->vm_ops = helper_data->vm_ops;
+	vma->vm_private_data = helper_data->vm_private_data;
+
+	if (helper_data->src_paddr)
+		ret = mmap_iomem_helper(vma, helper_data->src_paddr);
+	else
+		ret = mmap_kmem_helper(vma, helper_data->src_vaddr);
+
+	return ret;
+}
+
+static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	int ret;
+
+	vma->vm_private_data = tramp_data;
+
+	ret = tramp_data->mmap_handler(tramp_data->fd, vma);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#ifndef CONFIG_MMU
+
+static unsigned long
+internal_get_unmapped_area(struct file *filp,
+			   unsigned long addr, unsigned long len,
+			   unsigned long pgoff, unsigned long flags)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	struct mmap_helper_data *helper_data;
+	unsigned long pa;
+
+	helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
+	pa = helper_data->src_paddr;
+	if (pa)
+		return (unsigned long)__va(pa);
+
+	return (unsigned long)helper_data->src_vaddr;
+}
+
+static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data,
+			size_t len, off_t offset, int prot, int flags,
+			void **pptr)
+{
+	const struct file_operations *old_fops;
+	unsigned long u_addr;
+	struct file *filp;
+
+	filp = filp_open("/dev/mem", O_RDWR, 0);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	old_fops = filp->f_op;
+	filp->f_op = tramp_data->fops;
+	filp->private_data = tramp_data;
+	u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
+	filp_close(filp, current->files);
+	filp->f_op = old_fops;
+
+	if (IS_ERR_VALUE(u_addr))
+		return (int)u_addr;
+
+	*pptr = (void *)u_addr;
+
+	return 0;
+}
+
+#else /* CONFIG_MMU */
+
+static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data,
+			size_t len, off_t offset, int prot, int flags,
+			void **pptr)
+{
+	unsigned long u_addr;
+	struct file *filp;
+
+	filp = anon_inode_getfile("[rtdm]", tramp_data->fops, tramp_data, O_RDWR);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
+	filp_close(filp, current->files);
+
+	if (IS_ERR_VALUE(u_addr))
+		return (int)u_addr;
+
+	*pptr = (void *)u_addr;
+
+	return 0;
+}
+
+#define internal_get_unmapped_area  NULL
+
+#endif /* CONFIG_MMU */
+
+static struct file_operations internal_mmap_fops = {
+	.mmap = mmap_trampoline,
+	.get_unmapped_area = internal_get_unmapped_area
+};
+
+static unsigned long
+driver_get_unmapped_area(struct file *filp,
+			 unsigned long addr, unsigned long len,
+			 unsigned long pgoff, unsigned long flags)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	struct rtdm_fd *fd = tramp_data->fd;
+
+	if (fd->ops->get_unmapped_area)
+		return fd->ops->get_unmapped_area(fd, len, pgoff, flags);
+
+#ifdef CONFIG_MMU
+	/* Run default handler. */
+	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+#else
+	return -ENODEV;
+#endif
+}
+
+static struct file_operations driver_mmap_fops = {
+	.mmap = mmap_trampoline,
+	.get_unmapped_area = driver_get_unmapped_area
+};
+
+int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
+			  int prot, int flags, void **pptr)
+{
+	struct mmap_tramp_data tramp_data = {
+		.fd = fd,
+		.fops = &driver_mmap_fops,
+		.mmap_handler = fd->ops->mmap,
+	};
+
+#ifndef CONFIG_MMU
+	/*
+	 * XXX: A .get_unmapped_area handler must be provided in the
+	 * nommu case. We use this to force the memory management code
+	 * not to share VM regions for distinct areas to map to, as it
+	 * would otherwise do since all requests currently apply to
+	 * the same file (i.e. from /dev/mem, see do_mmap_pgoff() in
+	 * the nommu case).
+	 */
+	if (fd->ops->get_unmapped_area)
+		offset = fd->ops->get_unmapped_area(fd, len, 0, flags);
+#endif
+
+	return do_rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr);
+}
+
+/**
+ * Map a kernel memory range into the address space of the user.
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] src_addr Kernel virtual address to be mapped
+ * @param[in] len Length of the memory range
+ * @param[in] prot Protection flags for the user's memory range, typically
+ * either PROT_READ or PROT_READ|PROT_WRITE
+ * @param[in,out] pptr Address of a pointer containing the desired user
+ * address or NULL on entry and the finally assigned address on return
+ * @param[in] vm_ops vm_operations to be executed on the vm_area of the
+ * user memory range or NULL
+ * @param[in] vm_private_data Private data to be stored in the vm_area,
+ * primarily useful for vm_operation handlers
+ *
+ * @return 0 on success, otherwise (most common values):
+ *
+ * - -EINVAL is returned if an invalid start address, size, or destination
+ * address was passed.
+ *
+ * - -ENOMEM is returned if there is insufficient free memory or the limit of
+ * memory mapping for the user process was reached.
+ *
+ * - -EAGAIN is returned if too much memory has been already locked by the
+ * user process.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @note This service only works on memory regions allocated via kmalloc() or
+ * vmalloc(). To map physical I/O memory to user-space use
+ * rtdm_iomap_to_user() instead.
+ *
+ * @note RTDM supports two models for unmapping the memory area:
+ * - manual unmapping via rtdm_munmap(), which may be issued from a
+ * driver in response to an IOCTL call, or by a call to the regular
+ * munmap() call from the application.
+ * - automatic unmapping, triggered by the termination of the process
+ *   which owns the mapping.
+ * To track the number of references pending on the resource mapped,
+ * the driver can pass the address of a close handler for the vm_area
+ * considered, in the @a vm_ops descriptor. See the relevant Linux
+ * kernel programming documentation (e.g. Linux Device Drivers book)
+ * on virtual memory management for details.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data)
+{
+	struct mmap_helper_data helper_data = {
+		.tramp_data = {
+			.fd = fd,
+			.fops = &internal_mmap_fops,
+			.mmap_handler = mmap_buffer_helper,
+		},
+		.src_vaddr = src_addr,
+		.src_paddr = 0,
+		.vm_ops = vm_ops,
+		.vm_private_data = vm_private_data
+	};
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_to_user);
+
+/**
+ * Map an I/O memory range into the address space of the user.
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] src_addr physical I/O address to be mapped
+ * @param[in] len Length of the memory range
+ * @param[in] prot Protection flags for the user's memory range, typically
+ * either PROT_READ or PROT_READ|PROT_WRITE
+ * @param[in,out] pptr Address of a pointer containing the desired user
+ * address or NULL on entry and the finally assigned address on return
+ * @param[in] vm_ops vm_operations to be executed on the vm_area of the
+ * user memory range or NULL
+ * @param[in] vm_private_data Private data to be stored in the vm_area,
+ * primarily useful for vm_operation handlers
+ *
+ * @return 0 on success, otherwise (most common values):
+ *
+ * - -EINVAL is returned if an invalid start address, size, or destination
+ * address was passed.
+ *
+ * - -ENOMEM is returned if there is insufficient free memory or the limit of
+ * memory mapping for the user process was reached.
+ *
+ * - -EAGAIN is returned if too much memory has been already locked by the
+ * user process.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @note RTDM supports two models for unmapping the memory area:
+ * - manual unmapping via rtdm_munmap(), which may be issued from a
+ * driver in response to an IOCTL call, or by a call to the regular
+ * munmap() call from the application.
+ * - automatic unmapping, triggered by the termination of the process
+ *   which owns the mapping.
+ * To track the number of references pending on the resource mapped,
+ * the driver can pass the address of a close handler for the vm_area
+ * considered, in the @a vm_ops descriptor. See the relevant Linux
+ * kernel programming documentation (e.g. Linux Device Drivers book)
+ * on virtual memory management for details.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data)
+{
+	struct mmap_helper_data helper_data = {
+		.tramp_data = {
+			.fd = fd,
+			.fops = &internal_mmap_fops,
+			.mmap_handler = mmap_buffer_helper,
+		},
+		.src_vaddr = NULL,
+		.src_paddr = src_addr,
+		.vm_ops = vm_ops,
+		.vm_private_data = vm_private_data
+	};
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
+}
+EXPORT_SYMBOL_GPL(rtdm_iomap_to_user);
+
+/**
+ * Map a kernel logical memory range to a virtual user area.
+ *
+ * This routine is commonly used from a ->mmap() handler of a RTDM
+ * driver, for mapping a virtual memory area with a direct physical
+ * mapping over the user address space referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] va The kernel logical address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note This service works on memory regions allocated via
+ * kmalloc(). To map a chunk of virtual space with no direct physical
+ * mapping, or a physical I/O memory to a VMA, call rtdm_mmap_vmem()
+ * or rtdm_mmap_iomem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va)
+{
+	return mmap_kmem_helper(vma, va);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_kmem);
+
+/**
+ * Map a virtual memory range to a virtual user area.
+ *
+ * This routine is commonly used from a ->mmap() handler of a RTDM
+ * driver, for mapping a purely virtual memory area over the user
+ * address space referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] va The virtual address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note This service works on memory regions allocated via
+ * vmalloc(). To map a chunk of logical space obtained from kmalloc(),
+ * or a physical I/O memory to a VMA, call rtdm_mmap_kmem() or
+ * rtdm_mmap_iomem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va)
+{
+	/*
+	 * Our helper handles both of directly mapped to physical and
+	 * purely virtual memory ranges.
+	 */
+	return mmap_kmem_helper(vma, va);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_vmem);
+
+/**
+ * Map an I/O memory range to a virtual user area.
+ *
+ * This routine is commonly used from a ->mmap() handler of a RTDM
+ * driver, for mapping an I/O memory area over the user address space
+ * referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] pa The physical I/O address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note To map a chunk of logical space obtained from kmalloc(), or a
+ * purely virtual area with no direct physical mapping to a VMA, call
+ * rtdm_mmap_kmem() or rtdm_mmap_vmem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa)
+{
+	return mmap_iomem_helper(vma, pa);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_iomem);
+
+/**
+ * Unmap a user memory range.
+ *
+ * @param[in] ptr User address or the memory range
+ * @param[in] len Length of the memory range
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid address or size was passed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_munmap(void *ptr, size_t len)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return vm_munmap((unsigned long)ptr, len);
+}
+EXPORT_SYMBOL_GPL(rtdm_munmap);
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iovp,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast)
+{
+	size_t len = sizeof(struct iovec) * msg->msg_iovlen;
+	struct iovec *iov = iov_fast;
+
+	/*
+	 * If the I/O vector doesn't fit in the fast memory, allocate
+	 * a chunk from the system heap which is large enough to hold
+	 * it.
+	 */
+	if (msg->msg_iovlen > RTDM_IOV_FASTMAX) {
+		iov = xnmalloc(len);
+		if (iov == NULL)
+			return -ENOMEM;
+	}
+
+	*iovp = iov;
+
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(iov, msg->msg_iov, len);
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd))
+		return sys32_get_iovec(iov,
+			       (struct compat_iovec __user *)msg->msg_iov,
+			       msg->msg_iovlen);
+#endif
+
+	return rtdm_copy_from_user(fd, iov, msg->msg_iov, len);
+}
+EXPORT_SYMBOL_GPL(rtdm_get_iovec);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast)
+{
+	size_t len = sizeof(iov[0]) * msg->msg_iovlen;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(msg->msg_iov, iov, len);
+		ret = 0;
+	} else
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd))
+			ret = sys32_put_iovec((struct compat_iovec __user *)msg->msg_iov,
+					      iov, msg->msg_iovlen);
+		else
+#endif
+			ret = rtdm_copy_to_user(fd, msg->msg_iov, iov, len);
+
+	if (iov != iov_fast)
+		xnfree(iov);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_put_iovec);
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen)
+{
+	ssize_t len;
+	int nvec;
+
+	/* Return the flattened vector length. */
+	for (len = 0, nvec = 0; nvec < iovlen; nvec++) {
+		ssize_t l = iov[nvec].iov_len;
+		if (l < 0 || len + l < len) /* SuS wants this. */
+			return -EINVAL;
+		len += l;
+	}
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(rtdm_get_iov_flatlen);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * Real-time safe rate-limited message printing on kernel console
+ *
+ * @param[in] format Format string (conforming standard @c printf())
+ * @param ... Arguments referred by @a format
+ *
+ * @return On success, this service returns the number of characters printed.
+ * Otherwise, a negative error code is returned.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_printk_ratelimited(const char *format, ...);
+
+/**
+ * Real-time safe message printing on kernel console
+ *
+ * @param[in] format Format string (conforming standard @c printf())
+ * @param ... Arguments referred by @a format
+ *
+ * @return On success, this service returns the number of characters printed.
+ * Otherwise, a negative error code is returned.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_printk(const char *format, ...);
+
+/**
+ * Allocate memory block
+ *
+ * @param[in] size Requested size of the memory block
+ *
+ * @return The pointer to the allocated block is returned on success, NULL
+ * otherwise.
+ *
+ * @coretags{unrestricted}
+ */
+void *rtdm_malloc(size_t size);
+
+/**
+ * Release real-time memory block
+ *
+ * @param[in] ptr Pointer to memory block as returned by rtdm_malloc()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_free(void *ptr);
+
+/**
+ * Check if read access to user-space memory block is safe
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] ptr Address of the user-provided memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return Non-zero is return when it is safe to read from the specified
+ * memory block, 0 otherwise.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_read_user_ok(struct rtdm_fd *fd, const void __user *ptr,
+		      size_t size);
+
+/**
+ * Check if read/write access to user-space memory block is safe
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] ptr Address of the user-provided memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return Non-zero is return when it is safe to read from or write to the
+ * specified memory block, 0 otherwise.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_rw_user_ok(struct rtdm_fd *fd, const void __user *ptr,
+		    size_t size);
+
+/**
+ * Copy user-space memory block to specified buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note Before invoking this service, verify via rtdm_read_user_ok() that the
+ * provided user-space address can securely be accessed.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_copy_from_user(struct rtdm_fd *fd, void *dst,
+			const void __user *src, size_t size);
+
+/**
+ * Check if read access to user-space memory block and copy it to specified
+ * buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This service is a combination of rtdm_read_user_ok and
+ * rtdm_copy_from_user.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst,
+			     const void __user *src, size_t size);
+
+/**
+ * Copy specified buffer to user-space memory block
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Address of the user-space memory block
+ * @param[in] src Source buffer address
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note Before invoking this service, verify via rtdm_rw_user_ok() that the
+ * provided user-space address can securely be accessed.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_copy_to_user(struct rtdm_fd *fd, void __user *dst,
+		      const void *src, size_t size);
+
+/**
+ * Check if read/write access to user-space memory block is safe and copy
+ * specified buffer to it
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Address of the user-space memory block
+ * @param[in] src Source buffer address
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This service is a combination of rtdm_rw_user_ok and
+ * rtdm_copy_to_user.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst,
+			   const void *src, size_t size);
+
+/**
+ * Copy user-space string to specified buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space string
+ * @param[in] count Maximum number of bytes to copy, including the trailing
+ * '0'
+ *
+ * @return Length of the string on success (not including the trailing '0'),
+ * otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This services already includes a check of the source address,
+ * calling rtdm_read_user_ok() for @a src explicitly is not required.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_strncpy_from_user(struct rtdm_fd *fd, char *dst,
+			   const char __user *src, size_t count);
+
+/**
+ * Test if running in a real-time task
+ *
+ * @return Non-zero is returned if the caller resides in real-time context, 0
+ * otherwise.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_in_rt_context(void);
+
+/**
+ * Test if the caller is capable of running in real-time context
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ *
+ * @return Non-zero is returned if the caller is able to execute in real-time
+ * context (independent of its current execution mode), 0 otherwise.
+ *
+ * @note This function can be used by drivers that provide different
+ * implementations for the same service depending on the execution mode of
+ * the caller. If a caller requests such a service in non-real-time context
+ * but is capable of running in real-time as well, it might be appropriate
+ * for the driver to reject the request via -ENOSYS so that RTDM can switch
+ * the caller and restart the request in real-time context.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_rt_capable(struct rtdm_fd *fd);
+
+/**
+ * Test if the real-time core is available
+ *
+ * @return True if the real-time is available, false if it is disabled or in
+ * error state.
+ *
+ * @note Drivers should query the core state during initialization if they
+ * perform hardware setup operations or interact with RTDM services such as
+ * locks prior to calling an RTDM service that has a built-in state check of
+ * the real-time core (e.g. rtdm_dev_register() or rtdm_task_init()).
+ *
+ * @coretags{unrestricted}
+ */
+bool rtdm_available(void);
+
+#endif /* DOXYGEN_CPP */
+
+/** @} Utility Services */
+++ linux-patched/kernel/xenomai/rtdm/Makefile	2022-03-21 12:58:29.104891741 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/device.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y :=	core.o		\
+		device.o	\
+		drvlib.o	\
+		fd.o		\
+		wrappers.o
+
+ccflags-y += -I$(srctree)/$(src)/.. -I$(srctree)/kernel
+++ linux-patched/kernel/xenomai/rtdm/device.c	2022-03-21 12:58:29.101891770 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/wrappers.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Real-Time Driver Model for Xenomai, device management
+ *
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include "rtdm/internal.h"
+#include <cobalt/kernel/init.h>
+#include <trace/events/cobalt-rtdm.h>
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_profiles Device Profiles
+ *
+ * Pre-defined classes of real-time devices
+ *
+ * Device profiles define which operation handlers a driver of a
+ * certain class of devices has to implement, which name or protocol
+ * it has to register, which IOCTLs it has to provide, and further
+ * details. Sub-classes can be defined in order to extend a device
+ * profile with more hardware-specific functions.
+ */
+
+/**
+ * @addtogroup rtdm_driver_interface
+ * @{
+ */
+
+#define RTDM_DEVICE_MAGIC	0x82846877
+
+static struct rb_root protocol_devices;
+
+static DEFINE_MUTEX(register_lock);
+static DECLARE_BITMAP(protocol_devices_minor_map, RTDM_MAX_MINOR);
+
+static struct class *rtdm_class;
+
+static int enosys(void)
+{
+	return -ENOSYS;
+}
+
+void __rtdm_put_device(struct rtdm_device *dev)
+{
+	secondary_mode_only();
+
+	if (atomic_dec_and_test(&dev->refcount))
+		wake_up(&dev->putwq);
+}
+
+static inline xnkey_t get_proto_id(int pf, int type)
+{
+	xnkey_t llpf = (unsigned int)pf;
+	return (llpf << 32) | (unsigned int)type;
+}
+
+struct rtdm_device *__rtdm_get_namedev(const char *path)
+{
+	struct rtdm_device *dev;
+	xnhandle_t handle;
+	int ret;
+
+	secondary_mode_only();
+
+	/* skip common /dev prefix */
+	if (strncmp(path, "/dev/", 5) == 0)
+		path += 5;
+
+	/* skip RTDM devnode root */
+	if (strncmp(path, "rtdm/", 5) == 0)
+		path += 5;
+
+	ret = xnregistry_bind(path, XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (ret)
+		return NULL;
+
+	mutex_lock(&register_lock);
+
+	dev = xnregistry_lookup(handle, NULL);
+	if (dev && dev->magic == RTDM_DEVICE_MAGIC)
+		__rtdm_get_device(dev);
+	else
+		dev = NULL;
+
+	mutex_unlock(&register_lock);
+
+	return dev;
+}
+
+struct rtdm_device *__rtdm_get_protodev(int protocol_family, int socket_type)
+{
+	struct rtdm_device *dev = NULL;
+	struct xnid *xnid;
+	xnkey_t id;
+
+	secondary_mode_only();
+
+	id = get_proto_id(protocol_family, socket_type);
+
+	mutex_lock(&register_lock);
+
+	xnid = xnid_fetch(&protocol_devices, id);
+	if (xnid) {
+		dev = container_of(xnid, struct rtdm_device, proto.id);
+		__rtdm_get_device(dev);
+	}
+
+	mutex_unlock(&register_lock);
+
+	return dev;
+}
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_device_register Device Registration Services
+ * @{
+ */
+
+static char *rtdm_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s", dev_name(dev));
+}
+
+static ssize_t profile_show(struct device *kdev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+
+	return sprintf(buf, "%d,%d\n",
+		       dev->driver->profile_info.class_id,
+		       dev->driver->profile_info.subclass_id);
+}
+
+static ssize_t refcount_show(struct device *kdev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+
+	return sprintf(buf, "%d\n", atomic_read(&dev->refcount));
+}
+
+#define cat_count(__buf, __str)			\
+	({					\
+		int __ret = sizeof(__str) - 1;	\
+		strcat(__buf, __str);		\
+		__ret;				\
+	})
+
+static ssize_t flags_show(struct device *kdev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+	struct rtdm_driver *drv = dev->driver;
+
+	return sprintf(buf, "%#x\n", drv->device_flags);
+
+}
+
+static ssize_t type_show(struct device *kdev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+	struct rtdm_driver *drv = dev->driver;
+	int ret;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE)
+		ret = cat_count(buf, "named\n");
+	else
+		ret = cat_count(buf, "protocol\n");
+
+	return ret;
+
+}
+
+#ifdef ATTRIBUTE_GROUPS
+
+static DEVICE_ATTR_RO(profile);
+static DEVICE_ATTR_RO(refcount);
+static DEVICE_ATTR_RO(flags);
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *rtdm_attrs[] = {
+	&dev_attr_profile.attr,
+	&dev_attr_refcount.attr,
+	&dev_attr_flags.attr,
+	&dev_attr_type.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(rtdm);
+
+#else /* !ATTRIBUTE_GROUPS */
+
+/*
+ * Cope with legacy sysfs attributes. Scheduled for removal when 3.10
+ * is at EOL for us.
+ */
+static struct device_attribute rtdm_attrs[] = {
+	DEVICE_ATTR_RO(profile),
+	DEVICE_ATTR_RO(refcount),
+	DEVICE_ATTR_RO(flags),
+	DEVICE_ATTR_RO(type),
+	__ATTR_NULL 
+};
+
+#define dev_groups   dev_attrs
+#define rtdm_groups  rtdm_attrs
+
+#endif /* !ATTRIBUTE_GROUPS */
+
+static int state_change_notifier(struct notifier_block *nb,
+				 unsigned long action, void *data)
+{
+	struct rtdm_driver *drv;
+	int ret;
+
+	drv = container_of(nb, struct rtdm_driver, nb_statechange);
+
+	switch (action) {
+	case COBALT_STATE_WARMUP:
+		if (drv->smops.start == NULL)
+			return NOTIFY_DONE;
+		ret = drv->smops.start(drv);
+		if (ret)
+			printk(XENO_WARNING
+			       "failed starting driver %s (%d)\n",
+			       drv->profile_info.name, ret);
+		break;
+	case COBALT_STATE_TEARDOWN:
+		if (drv->smops.stop == NULL)
+			return NOTIFY_DONE;
+		ret = drv->smops.stop(drv);
+		if (ret)
+			printk(XENO_WARNING
+			       "failed stopping driver %s (%d)\n",
+			       drv->profile_info.name, ret);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int register_driver(struct rtdm_driver *drv)
+{
+	dev_t rdev;
+	int ret;
+
+	if (drv->profile_info.magic == RTDM_CLASS_MAGIC) {
+		atomic_inc(&drv->refcount);
+		return 0;
+	}
+
+	if (drv->profile_info.magic != ~RTDM_CLASS_MAGIC) {
+		XENO_WARN_ON_ONCE(COBALT, 1);
+		return -EINVAL;
+	}
+
+	switch (drv->device_flags & RTDM_DEVICE_TYPE_MASK) {
+	case RTDM_NAMED_DEVICE:
+	case RTDM_PROTOCOL_DEVICE:
+		break;
+	default:
+		printk(XENO_WARNING "%s has invalid device type (%#x)\n",
+		       drv->profile_info.name,
+		       drv->device_flags & RTDM_DEVICE_TYPE_MASK);
+		return -EINVAL;
+	}
+
+	if (drv->device_count <= 0 ||
+	    drv->device_count > RTDM_MAX_MINOR) {
+		printk(XENO_WARNING "%s has invalid device count (%d)\n",
+		       drv->profile_info.name, drv->device_count);
+		return -EINVAL;
+	}
+
+	if ((drv->device_flags & RTDM_NAMED_DEVICE) == 0)
+		goto done;
+
+	if (drv->base_minor < 0 ||
+	    drv->base_minor >= RTDM_MAX_MINOR) {
+		printk(XENO_WARNING "%s has invalid base minor (%d)\n",
+		       drv->profile_info.name, drv->base_minor);
+		return -EINVAL;
+	}
+
+	ret = alloc_chrdev_region(&rdev, drv->base_minor, drv->device_count,
+				  drv->profile_info.name);
+	if (ret) {
+		printk(XENO_WARNING "cannot allocate chrdev region %s[%d..%d]\n",
+		       drv->profile_info.name, drv->base_minor,
+		       drv->base_minor + drv->device_count - 1);
+		return ret;
+	}
+
+	cdev_init(&drv->named.cdev, &rtdm_dumb_fops);
+	ret = cdev_add(&drv->named.cdev, rdev, drv->device_count);
+	if (ret) {
+		printk(XENO_WARNING "cannot create cdev series for %s\n",
+		       drv->profile_info.name);
+		goto fail_cdev;
+	}
+
+	drv->named.major = MAJOR(rdev);
+	bitmap_zero(drv->minor_map, RTDM_MAX_MINOR);
+
+done:
+	atomic_set(&drv->refcount, 1);
+	drv->nb_statechange.notifier_call = state_change_notifier;
+	drv->nb_statechange.priority = 0;
+	cobalt_add_state_chain(&drv->nb_statechange);
+	drv->profile_info.magic = RTDM_CLASS_MAGIC;
+
+	return 0;
+
+fail_cdev:
+	unregister_chrdev_region(rdev, drv->device_count);
+
+	return ret;
+}
+
+static void unregister_driver(struct rtdm_driver *drv)
+{
+	XENO_BUG_ON(COBALT, drv->profile_info.magic != RTDM_CLASS_MAGIC);
+
+	if (!atomic_dec_and_test(&drv->refcount))
+		return;
+
+	cobalt_remove_state_chain(&drv->nb_statechange);
+
+	drv->profile_info.magic = ~RTDM_CLASS_MAGIC;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		cdev_del(&drv->named.cdev);
+		unregister_chrdev_region(MKDEV(drv->named.major, drv->base_minor),
+					 drv->device_count);
+	}
+}
+
+/**
+ * @brief Register a RTDM device
+ *
+ * Registers a device in the RTDM namespace.
+ *
+ * @param[in] dev Device descriptor.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if the descriptor contains invalid
+ * entries. RTDM_PROFILE_INFO() must appear in the list of
+ * initializers for the driver properties.
+ *
+ * - -EEXIST is returned if the specified device name of protocol ID is
+ * already in use.
+ *
+ * - -ENOMEM is returned if a memory allocation failed in the process
+ * of registering the device.
+ *
+ * - -EAGAIN is returned if no registry slot is available (check/raise
+ * CONFIG_XENO_OPT_REGISTRY_NRSLOTS).
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * - -ENXIO is returned if no valid minor could be assigned
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_dev_register(struct rtdm_device *dev)
+{
+	struct class *kdev_class = rtdm_class;
+	struct device *kdev = NULL;
+	struct rtdm_driver *drv;
+	int ret, major, minor;
+	xnkey_t id;
+	dev_t rdev;
+	const char *dev_name;
+
+	secondary_mode_only();
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	mutex_lock(&register_lock);
+
+	dev->name = NULL;
+	drv = dev->driver;
+	ret = register_driver(drv);
+	if (ret) {
+		mutex_unlock(&register_lock);
+		return ret;
+	}
+
+	dev->ops = drv->ops;
+	if (drv->device_flags & RTDM_NAMED_DEVICE)
+		dev->ops.socket = (typeof(dev->ops.socket))enosys;
+	else
+		dev->ops.open = (typeof(dev->ops.open))enosys;
+
+	INIT_LIST_HEAD(&dev->openfd_list);
+	init_waitqueue_head(&dev->putwq);
+	dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */
+	atomic_set(&dev->refcount, 0);
+
+	if (drv->profile_info.kdev_class)
+		kdev_class = drv->profile_info.kdev_class;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		if (drv->device_flags & RTDM_FIXED_MINOR) {
+			minor = dev->minor;
+			if (minor < 0 ||
+			    minor >= drv->base_minor + drv->device_count) {
+				ret = -ENXIO;
+				goto fail;
+			}
+		} else {
+			minor = find_first_zero_bit(drv->minor_map, RTDM_MAX_MINOR);
+			if (minor >= RTDM_MAX_MINOR) {
+				ret = -ENXIO;
+				goto fail;
+			}
+			dev->minor = minor;
+		}
+
+		major = drv->named.major;
+		dev->name = kasformat(dev->label, minor);
+		if (dev->name == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+		if (dev->name[0] == '/') {
+			dev_name = dev->name+1;
+		} else {
+			dev_name = dev->name;
+		}
+		ret = xnregistry_enter(dev_name, dev,
+				       &dev->named.handle, NULL);
+		if (ret)
+			goto fail;
+
+		rdev = MKDEV(major, minor);
+		kdev = device_create(kdev_class, NULL, rdev,
+				     dev, kbasename(dev->label), minor);
+		if (IS_ERR(kdev)) {
+			xnregistry_remove(dev->named.handle);
+			ret = PTR_ERR(kdev);
+			goto fail2;
+		}
+		__set_bit(minor, drv->minor_map);
+	} else {
+		minor = find_first_zero_bit(protocol_devices_minor_map,
+					RTDM_MAX_MINOR);
+		if (minor >= RTDM_MAX_MINOR) {
+			ret = -ENXIO;
+			goto fail;
+		}
+		dev->minor = minor;
+
+		dev->name = kstrdup(dev->label, GFP_KERNEL);
+		if (dev->name == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		rdev = MKDEV(0, minor);
+		kdev = device_create(kdev_class, NULL, rdev,
+				     dev, dev->name);
+		if (IS_ERR(kdev)) {
+			ret = PTR_ERR(kdev);
+			goto fail2;
+		}
+
+		id = get_proto_id(drv->protocol_family, drv->socket_type);
+		ret = xnid_enter(&protocol_devices, &dev->proto.id, id);
+		if (ret < 0)
+			goto fail;
+		__set_bit(minor, protocol_devices_minor_map);
+	}
+
+	dev->rdev = rdev;
+	dev->kdev = kdev;
+	dev->magic = RTDM_DEVICE_MAGIC;
+	dev->kdev_class = kdev_class;
+
+	mutex_unlock(&register_lock);
+
+	trace_cobalt_device_register(dev);
+
+	return 0;
+fail:
+	if (kdev)
+		device_destroy(kdev_class, rdev);
+fail2:
+	unregister_driver(drv);
+
+	mutex_unlock(&register_lock);
+
+	if (dev->name)
+		kfree(dev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_dev_register);
+
+/**
+ * @brief Unregister a RTDM device
+ *
+ * Removes the device from the RTDM namespace. This routine first
+ * attempts to teardown all active connections to the @a device prior
+ * to unregistering.
+ *
+ * @param[in] dev Device descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void rtdm_dev_unregister(struct rtdm_device *dev)
+{
+	struct rtdm_driver *drv = dev->driver;
+
+	secondary_mode_only();
+
+	trace_cobalt_device_unregister(dev);
+
+	/* Lock out any further connection. */
+	dev->magic = ~RTDM_DEVICE_MAGIC;
+
+	/* Flush all fds from this device. */
+	rtdm_device_flush_fds(dev);
+
+	/* Then wait for the ongoing connections to finish. */
+	wait_event(dev->putwq,
+		   atomic_read(&dev->refcount) == 0);
+
+	mutex_lock(&register_lock);
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		xnregistry_remove(dev->named.handle);
+		__clear_bit(dev->minor, drv->minor_map);
+	} else {
+		xnid_remove(&protocol_devices, &dev->proto.id);
+		__clear_bit(dev->minor, protocol_devices_minor_map);
+	}
+
+	device_destroy(dev->kdev_class, dev->rdev);
+
+	unregister_driver(drv);
+
+	mutex_unlock(&register_lock);
+
+	kfree(dev->name);
+}
+EXPORT_SYMBOL_GPL(rtdm_dev_unregister);
+
+/**
+ * @brief Set the kernel device class of a RTDM driver.
+ *
+ * Set the kernel device class assigned to the RTDM driver. By
+ * default, RTDM drivers belong to Linux's "rtdm" device class,
+ * creating a device node hierarchy rooted at /dev/rtdm, and sysfs
+ * nodes under /sys/class/rtdm.
+ *
+ * This call assigns a user-defined kernel device class to the RTDM
+ * driver, so that its devices are created into a different system
+ * hierarchy.
+ *
+ * rtdm_drv_set_sysclass() is meaningful only before the first device
+ * which is attached to @a drv is registered by a call to
+ * rtdm_dev_register().
+ *
+ * @param[in] drv Address of the RTDM driver descriptor.
+ *
+ * @param[in] cls Pointer to the kernel device class. NULL is allowed
+ * to clear a previous setting, switching back to the default "rtdm"
+ * device class.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if the kernel device class has already been
+ * set for @a drv, or some device(s) attached to @a drv are currently
+ * registered.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @attention The kernel device class set by this call is not related to
+ * the RTDM class identification as defined by the @ref rtdm_profiles
+ * "RTDM profiles" in any way. This is strictly related to the Linux
+ * kernel device hierarchy.
+ */
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls)
+{
+	if ((cls && drv->profile_info.kdev_class) ||
+	    atomic_read(&drv->refcount))
+		return -EBUSY;
+
+	drv->profile_info.kdev_class = cls;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_drv_set_sysclass);
+
+/** @} */
+
+int __init rtdm_init(void)
+{
+	xntree_init(&protocol_devices);
+
+	rtdm_class = class_create(THIS_MODULE, "rtdm");
+	if (IS_ERR(rtdm_class)) {
+		printk(XENO_ERR "cannot create RTDM sysfs class\n");
+		return PTR_ERR(rtdm_class);
+	}
+	rtdm_class->dev_groups = rtdm_groups;
+	rtdm_class->devnode = rtdm_devnode;
+
+	bitmap_zero(protocol_devices_minor_map, RTDM_MAX_MINOR);
+
+	return 0;
+}
+
+void rtdm_cleanup(void)
+{
+	class_destroy(rtdm_class);
+	/*
+	 * NOTE: no need to flush the cleanup_queue as no device is
+	 * allowed to unregister as long as there are references.
+	 */
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/rtdm/wrappers.c	2022-03-21 12:58:29.097891809 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mqueue.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (c) 2013  Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (c) 2013  Hannes Frederic Sowa <hannes@stressinduktion.org>
+ * Copyright (c) 2014  Luis R. Rodriguez <mcgrof@do-not-panic.com>
+ *
+ * Backport functionality introduced in Linux 3.13.
+ *
+ * Copyright (c) 2014  Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Backport functionality introduced in Linux 3.14.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <asm/xenomai/wrappers.h>
+
+/*
+ * Same rules as kernel/cobalt/include/asm-generic/xenomai/wrappers.h
+ * apply to reduce #ifdefery.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			struct msix_entry *entries,
+			int minvec, int maxvec)
+{
+	int nvec = maxvec;
+	int rc;
+
+	if (maxvec < minvec)
+		return -ERANGE;
+
+	do {
+		rc = pci_enable_msix(dev, entries, nvec);
+		if (rc < 0) {
+			return rc;
+		} else if (rc > 0) {
+			if (rc < minvec)
+				return -ENOSPC;
+			nvec = rc;
+		}
+	} while (rc);
+
+	return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
+#endif
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#ifdef CONFIG_HWMON
+struct device*
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups)
+{
+	struct device *hwdev;
+
+	hwdev = hwmon_device_register(dev);
+	hwdev->groups = groups;
+	dev_set_drvdata(hwdev, drvdata);
+	return hwdev;
+}
+
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+	struct device *hwdev = *(struct device **)res;
+
+	hwmon_device_unregister(hwdev);
+}
+
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups)
+{
+	struct device **ptr, *hwdev;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+	if (IS_ERR(hwdev))
+		goto error;
+
+	*ptr = hwdev;
+	devres_add(dev, ptr);
+	return hwdev;
+
+error:
+	devres_free(ptr);
+	return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+#endif
+#endif /* < 3.13 */
+++ linux-patched/kernel/xenomai/posix/mqueue.h	2022-03-21 12:58:29.092891858 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MQUEUE_H
+#define _COBALT_POSIX_MQUEUE_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <xenomai/posix/syscall.h>
+
+struct mq_attr {
+	long mq_flags;
+	long mq_maxmsg;
+	long mq_msgsize;
+	long mq_curmsgs;
+};
+
+int __cobalt_mq_open(const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr *attr);
+
+int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr);
+
+int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio, const void __user *u_ts,
+			  int (*fetch_timeout)(struct timespec64 *ts,
+					       const void __user *u_ts));
+
+int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len,
+			    unsigned int prio, const void __user *u_ts);
+
+int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf,
+			     ssize_t *lenp,
+			     unsigned int __user *u_prio,
+			     const void __user *u_ts,
+			     int (*fetch_timeout)(struct timespec64 *ts,
+						  const void __user *u_ts));
+
+int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf,
+			       ssize_t __user *u_len,
+			       unsigned int __user *u_prio,
+			       const void __user *u_ts);
+
+int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp);
+
+COBALT_SYSCALL_DECL(mq_open,
+		    (const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mq_close, (mqd_t uqd));
+
+COBALT_SYSCALL_DECL(mq_unlink, (const char __user *u_name));
+
+COBALT_SYSCALL_DECL(mq_getattr, (mqd_t uqd, struct mq_attr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mq_timedsend,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio, const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedsend64,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedreceive,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedreceive64,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_notify,
+		    (mqd_t fd, const struct sigevent *__user evp));
+
+#endif /* !_COBALT_POSIX_MQUEUE_H */
+++ linux-patched/kernel/xenomai/posix/sched.h	2022-03-21 12:58:29.088891896 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/internal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SCHED_H
+#define _COBALT_POSIX_SCHED_H
+
+#include <linux/list.h>
+#include <cobalt/kernel/sched.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_sched_group {
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group quota;
+#endif
+	struct cobalt_resources *scope;
+	int pshared;
+	struct list_head next;
+};
+
+int __cobalt_sched_weightprio(int policy,
+			      const struct sched_param_ex *param_ex);
+
+int __cobalt_sched_setconfig_np(int cpu, int policy,
+				void __user *u_config,
+				size_t len,
+				union sched_config *(*fetch_config)
+				(int policy, const void __user *u_config,
+				 size_t *len),
+				int (*ack_config)(int policy,
+						  const union sched_config *config,
+						  void __user *u_config));
+
+ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
+				    void __user *u_config,
+				    size_t len,
+				    union sched_config *(*fetch_config)
+				    (int policy, const void __user *u_config,
+				     size_t *len),
+				    ssize_t (*put_config)(int policy,
+							  void __user *u_config, size_t u_len,
+							  const union sched_config *config,
+							  size_t len));
+int cobalt_sched_setscheduler_ex(pid_t pid,
+				 int policy,
+				 const struct sched_param_ex *param_ex,
+				 __u32 __user *u_winoff,
+				 int __user *u_promoted);
+
+int cobalt_sched_getscheduler_ex(pid_t pid,
+				 int *policy_r,
+				 struct sched_param_ex *param_ex);
+
+struct xnsched_class *
+cobalt_sched_policy_param(union xnsched_policy_param *param,
+			  int u_policy, const struct sched_param_ex *param_ex,
+			  xnticks_t *tslice_r);
+
+COBALT_SYSCALL_DECL(sched_yield, (void));
+
+COBALT_SYSCALL_DECL(sched_weightprio,
+		    (int policy, const struct sched_param_ex __user *u_param));
+
+COBALT_SYSCALL_DECL(sched_minprio, (int policy));
+
+COBALT_SYSCALL_DECL(sched_maxprio, (int policy));
+
+COBALT_SYSCALL_DECL(sched_setconfig_np,
+		    (int cpu,
+		     int policy,
+		     union sched_config __user *u_config,
+		     size_t len));
+
+COBALT_SYSCALL_DECL(sched_getconfig_np,
+		    (int cpu, int policy,
+		     union sched_config __user *u_config,
+		     size_t len));
+
+COBALT_SYSCALL_DECL(sched_setscheduler_ex,
+		    (pid_t pid,
+		     int policy,
+		     const struct sched_param_ex __user *u_param,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+COBALT_SYSCALL_DECL(sched_getscheduler_ex,
+		    (pid_t pid,
+		     int __user *u_policy,
+		     struct sched_param_ex __user *u_param));
+
+void cobalt_sched_reclaim(struct cobalt_process *process);
+
+#endif /* !_COBALT_POSIX_SCHED_H */
+++ linux-patched/kernel/xenomai/posix/internal.h	2022-03-21 12:58:29.085891926 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mutex.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_INTERNAL_H
+#define _COBALT_POSIX_INTERNAL_H
+
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/arith.h>
+#include <asm/xenomai/syscall.h>
+#include "process.h"
+#include "extension.h"
+#include "syscall.h"
+#include "memory.h"
+
+#define COBALT_MAXNAME		64
+#define COBALT_PERMS_MASK	(O_RDONLY | O_WRONLY | O_RDWR)
+
+#define COBALT_MAGIC(n)		(0x8686##n##n)
+#define COBALT_ANY_MAGIC	COBALT_MAGIC(00)
+#define COBALT_THREAD_MAGIC	COBALT_MAGIC(01)
+#define COBALT_MQ_MAGIC		COBALT_MAGIC(0A)
+#define COBALT_MQD_MAGIC	COBALT_MAGIC(0B)
+#define COBALT_EVENT_MAGIC	COBALT_MAGIC(0F)
+#define COBALT_MONITOR_MAGIC	COBALT_MAGIC(10)
+#define COBALT_TIMERFD_MAGIC	COBALT_MAGIC(11)
+
+#define cobalt_obj_active(h,m,t)	\
+	((h) && ((t *)(h))->magic == (m))
+
+#define cobalt_mark_deleted(t) ((t)->magic = ~(t)->magic)
+
+extern struct xnptree posix_ptree;
+
+static inline xnhandle_t cobalt_get_handle_from_user(xnhandle_t *u_h)
+{
+	xnhandle_t handle;
+	return __xn_get_user(handle, u_h) ? 0 : handle;
+}
+
+int cobalt_init(void);
+
+long cobalt_restart_syscall_placeholder(struct restart_block *param);
+
+#endif /* !_COBALT_POSIX_INTERNAL_H */
+++ linux-patched/kernel/xenomai/posix/mutex.h	2022-03-21 12:58:29.081891965 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/signal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MUTEX_H
+#define _COBALT_POSIX_MUTEX_H
+
+#include "thread.h"
+#include <cobalt/uapi/mutex.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_process;
+
+struct cobalt_mutex {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	/** cobalt_mutexq */
+	struct list_head conds;
+	struct cobalt_mutexattr attr;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts,
+				   int (*fetch_timeout)(struct timespec64 *ts,
+							const void __user *u_ts));
+
+int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts);
+
+int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
+				     struct cobalt_mutex *mutex,
+				     const struct timespec64 *ts);
+
+COBALT_SYSCALL_DECL(mutex_check_init,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_init,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct cobalt_mutexattr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mutex_destroy,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_trylock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_lock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_timedlock,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mutex_timedlock64,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mutex_unlock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+int cobalt_mutex_release(struct xnthread *cur,
+			 struct cobalt_mutex *mutex);
+
+void cobalt_mutex_reclaim(struct cobalt_resnode *node,
+			  spl_t s);
+
+#endif /* !_COBALT_POSIX_MUTEX_H */
+++ linux-patched/kernel/xenomai/posix/signal.h	2022-03-21 12:58:29.078891994 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timerfd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SIGNAL_H
+#define _COBALT_POSIX_SIGNAL_H
+
+#include <linux/signal.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/signal.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_thread;
+
+struct cobalt_sigpending {
+	struct siginfo si;
+	struct list_head next;
+};
+
+static inline
+void cobalt_copy_siginfo(int code,
+			 struct siginfo *__restrict__ dst,
+			 const struct siginfo *__restrict__ src)
+{
+	dst->si_signo = src->si_signo;
+	dst->si_errno = src->si_errno;
+	dst->si_code = code;
+
+	switch (code) {
+	case SI_TIMER:
+		dst->si_tid = src->si_tid;
+		dst->si_overrun = src->si_overrun;
+		dst->si_value = src->si_value;
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		dst->si_value = src->si_value;
+		fallthrough;
+	case SI_USER:
+		dst->si_pid = src->si_pid;
+		dst->si_uid = src->si_uid;
+	}
+}
+
+int __cobalt_sigwait(sigset_t *set);
+
+int __cobalt_sigtimedwait(sigset_t *set,
+			  const struct timespec64 *timeout,
+			  void __user *u_si,
+			  bool compat);
+
+int __cobalt_sigwaitinfo(sigset_t *set,
+			 void __user *u_si,
+			 bool compat);
+
+int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value);
+
+int cobalt_signal_send(struct cobalt_thread *thread,
+		       struct cobalt_sigpending *sigp,
+		       int group);
+
+int cobalt_signal_send_pid(pid_t pid,
+			   struct cobalt_sigpending *sigp);
+
+struct cobalt_sigpending *cobalt_signal_alloc(void);
+
+void cobalt_signal_free(struct cobalt_sigpending *sigp);
+
+void cobalt_signal_flush(struct cobalt_thread *thread);
+
+int cobalt_signal_wait(sigset_t *set, struct siginfo *si,
+		       xnticks_t timeout, xntmode_t tmode);
+
+int __cobalt_kill(struct cobalt_thread *thread,
+		  int sig, int group);
+
+COBALT_SYSCALL_DECL(sigwait,
+		    (const sigset_t __user *u_set, int __user *u_sig));
+
+COBALT_SYSCALL_DECL(sigtimedwait,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si,
+		     const struct __user_old_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sigtimedwait64,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si,
+		     const struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sigwaitinfo,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si));
+
+COBALT_SYSCALL_DECL(sigpending,
+		    (old_sigset_t __user *u_set));
+
+COBALT_SYSCALL_DECL(kill, (pid_t pid, int sig));
+
+COBALT_SYSCALL_DECL(sigqueue,
+		    (pid_t pid, int sig, const union sigval __user *u_value));
+
+int cobalt_signal_init(void);
+
+void cobalt_signal_cleanup(void);
+
+#endif /* !_COBALT_POSIX_SIGNAL_H */
+++ linux-patched/kernel/xenomai/posix/timerfd.h	2022-03-21 12:58:29.074892033 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/process.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef TIMERFD_H
+#define TIMERFD_H
+
+#include <linux/time.h>
+#include <xenomai/posix/syscall.h>
+
+int __cobalt_timerfd_settime(int fd, int flags,
+			     const struct itimerspec64 *new_value,
+			     struct itimerspec64 *old_value);
+
+int __cobalt_timerfd_gettime(int fd,
+			     struct itimerspec64 *value);
+
+COBALT_SYSCALL_DECL(timerfd_create,
+		    (int clockid, int flags));
+
+COBALT_SYSCALL_DECL(timerfd_settime,
+		    (int fd, int flags,
+		     const struct __user_old_itimerspec __user *new_value,
+		     struct __user_old_itimerspec __user *old_value));
+
+COBALT_SYSCALL_DECL(timerfd_gettime,
+		    (int fd, struct __user_old_itimerspec __user *curr_value));
+
+#endif /* TIMERFD_H */
+++ linux-patched/kernel/xenomai/posix/process.h	2022-03-21 12:58:29.071892062 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sched.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_PROCESS_H
+#define _COBALT_POSIX_PROCESS_H
+
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <pipeline/thread.h>
+#include <cobalt/kernel/ppd.h>
+
+#define NR_PERSONALITIES  4
+#if BITS_PER_LONG < NR_PERSONALITIES
+#error "NR_PERSONALITIES overflows internal bitmap"
+#endif
+
+struct mm_struct;
+struct xnthread_personality;
+struct cobalt_timer;
+
+struct cobalt_resources {
+	struct list_head condq;
+	struct list_head mutexq;
+	struct list_head semq;
+	struct list_head monitorq;
+	struct list_head eventq;
+	struct list_head schedq;
+};
+
+struct cobalt_process {
+	struct mm_struct *mm;
+	struct hlist_node hlink;
+	struct cobalt_ppd sys_ppd;
+	unsigned long permap;
+	struct rb_root usems;
+	struct list_head sigwaiters;
+	struct cobalt_resources resources;
+	struct list_head thread_list;
+	DECLARE_BITMAP(timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	struct cobalt_timer *timers[CONFIG_XENO_OPT_NRTIMERS];
+	void *priv[NR_PERSONALITIES];
+	int ufeatures;
+	unsigned int debugged_threads;
+};
+
+struct cobalt_resnode {
+	struct cobalt_resources *scope;
+	struct cobalt_process *owner;
+	struct list_head next;
+	xnhandle_t handle;
+};
+
+int cobalt_register_personality(struct xnthread_personality *personality);
+
+int cobalt_unregister_personality(int xid);
+
+struct xnthread_personality *cobalt_push_personality(int xid);
+
+void cobalt_pop_personality(struct xnthread_personality *prev);
+
+int cobalt_bind_core(int ufeatures);
+
+int cobalt_bind_personality(unsigned int magic);
+
+struct cobalt_process *cobalt_search_process(struct mm_struct *mm);
+
+int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff);
+
+void *cobalt_get_context(int xid);
+
+int cobalt_yield(xnticks_t min, xnticks_t max);
+
+int cobalt_process_init(void);
+
+extern struct list_head cobalt_global_thread_list;
+
+extern struct cobalt_resources cobalt_global_resources;
+
+static inline struct cobalt_process *cobalt_current_process(void)
+{
+	return pipeline_current()->process;
+}
+
+static inline struct cobalt_process *
+cobalt_set_process(struct cobalt_process *process)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct cobalt_process *old;
+
+	old = p->process;
+	p->process = process;
+
+	return old;
+}
+
+static inline struct cobalt_ppd *cobalt_ppd_get(int global)
+{
+	struct cobalt_process *process;
+
+	if (global || (process = cobalt_current_process()) == NULL)
+		return &cobalt_kernel_ppd;
+
+	return &process->sys_ppd;
+}
+
+static inline struct cobalt_resources *cobalt_current_resources(int pshared)
+{
+	struct cobalt_process *process;
+
+	if (pshared || (process = cobalt_current_process()) == NULL)
+		return &cobalt_global_resources;
+
+	return &process->resources;
+}
+
+static inline
+void __cobalt_add_resource(struct cobalt_resnode *node, int pshared)
+{
+	node->owner = cobalt_current_process();
+	node->scope = cobalt_current_resources(pshared);
+}
+
+#define cobalt_add_resource(__node, __type, __pshared)			\
+	do {								\
+		__cobalt_add_resource(__node, __pshared);		\
+		list_add_tail(&(__node)->next,				\
+			      &((__node)->scope)->__type ## q);		\
+	} while (0)
+
+static inline
+void cobalt_del_resource(struct cobalt_resnode *node)
+{
+	list_del(&node->next);
+}
+
+void cobalt_remove_process(struct cobalt_process *process);
+
+void cobalt_signal_yield(void);
+
+void cobalt_stop_debugged_process(struct xnthread *thread);
+
+void cobalt_register_debugged_thread(struct xnthread *thread);
+
+void cobalt_unregister_debugged_thread(struct xnthread *thread);
+
+extern struct xnthread_personality *cobalt_personalities[];
+
+extern struct xnthread_personality cobalt_personality;
+
+int cobalt_handle_setaffinity_event(struct task_struct *task);
+
+#ifdef CONFIG_SMP
+void cobalt_adjust_affinity(struct task_struct *task);
+#else
+static inline void cobalt_adjust_affinity(struct task_struct *task) { }
+#endif
+
+int cobalt_handle_taskexit_event(struct task_struct *task);
+
+int cobalt_handle_cleanup_event(struct mm_struct *mm);
+
+int cobalt_handle_user_return(struct task_struct *task);
+
+#endif /* !_COBALT_POSIX_PROCESS_H */
+++ linux-patched/kernel/xenomai/posix/sched.c	2022-03-21 12:58:29.067892101 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sem.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+
+struct xnsched_class *
+cobalt_sched_policy_param(union xnsched_policy_param *param,
+			  int u_policy, const struct sched_param_ex *param_ex,
+			  xnticks_t *tslice_r)
+{
+	struct xnsched_class *sched_class;
+	int prio, policy;
+	xnticks_t tslice;
+
+	prio = param_ex->sched_priority;
+	tslice = XN_INFINITE;
+	policy = u_policy;
+
+	/*
+	 * NOTE: The user-defined policy may be different than ours,
+	 * e.g. SCHED_FIFO,prio=-7 from userland would be interpreted
+	 * as SCHED_WEAK,prio=7 in kernel space.
+	 */
+	if (prio < 0) {
+		prio = -prio;
+		policy = SCHED_WEAK;
+	}
+	sched_class = &xnsched_class_rt;
+	param->rt.prio = prio;
+
+	switch (policy) {
+	case SCHED_NORMAL:
+		if (prio)
+			return NULL;
+		/*
+		 * When the weak scheduling class is compiled in,
+		 * SCHED_WEAK and SCHED_NORMAL threads are scheduled
+		 * by xnsched_class_weak, at their respective priority
+		 * levels. Otherwise, SCHED_NORMAL is scheduled by
+		 * xnsched_class_rt at priority level #0.
+		 */
+		fallthrough;
+	case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+		if (prio < XNSCHED_WEAK_MIN_PRIO ||
+		    prio > XNSCHED_WEAK_MAX_PRIO)
+			return NULL;
+		param->weak.prio = prio;
+		sched_class = &xnsched_class_weak;
+#else
+		if (prio)
+			return NULL;
+#endif
+		break;
+	case SCHED_RR:
+		/* if unspecified, use current one. */
+		tslice = u_ts2ns(&param_ex->sched_rr_quantum);
+		if (tslice == XN_INFINITE && tslice_r)
+			tslice = *tslice_r;
+		fallthrough;
+	case SCHED_FIFO:
+		if (prio < XNSCHED_FIFO_MIN_PRIO ||
+		    prio > XNSCHED_FIFO_MAX_PRIO)
+			return NULL;
+		break;
+	case SCHED_COBALT:
+		if (prio < XNSCHED_CORE_MIN_PRIO ||
+		    prio > XNSCHED_CORE_MAX_PRIO)
+			return NULL;
+		break;
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	case SCHED_SPORADIC:
+		param->pss.normal_prio = param_ex->sched_priority;
+		param->pss.low_prio = param_ex->sched_ss_low_priority;
+		param->pss.current_prio = param->pss.normal_prio;
+		param->pss.init_budget = u_ts2ns(&param_ex->sched_ss_init_budget);
+		param->pss.repl_period = u_ts2ns(&param_ex->sched_ss_repl_period);
+		param->pss.max_repl = param_ex->sched_ss_max_repl;
+		sched_class = &xnsched_class_sporadic;
+		break;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	case SCHED_TP:
+		param->tp.prio = param_ex->sched_priority;
+		param->tp.ptid = param_ex->sched_tp_partition;
+		sched_class = &xnsched_class_tp;
+		break;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	case SCHED_QUOTA:
+		param->quota.prio = param_ex->sched_priority;
+		param->quota.tgid = param_ex->sched_quota_group;
+		sched_class = &xnsched_class_quota;
+		break;
+#endif
+	default:
+		return NULL;
+	}
+
+	if (tslice_r)
+		*tslice_r = tslice;
+
+	return sched_class;
+}
+
+COBALT_SYSCALL(sched_minprio, current, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+	case SCHED_SPORADIC:
+	case SCHED_TP:
+	case SCHED_QUOTA:
+		ret = XNSCHED_FIFO_MIN_PRIO;
+		break;
+	case SCHED_COBALT:
+		ret = XNSCHED_CORE_MIN_PRIO;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_WEAK:
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_min_prio(policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_maxprio, current, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+	case SCHED_SPORADIC:
+	case SCHED_TP:
+	case SCHED_QUOTA:
+		ret = XNSCHED_FIFO_MAX_PRIO;
+		break;
+	case SCHED_COBALT:
+		ret = XNSCHED_CORE_MAX_PRIO;
+		break;
+	case SCHED_NORMAL:
+		ret = 0;
+		break;
+	case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+		ret = XNSCHED_FIFO_MAX_PRIO;
+#else
+		ret = 0;
+#endif
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_max_prio(policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_yield, primary, (void))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+	int ret = 0;
+
+	trace_cobalt_pthread_yield(0);
+
+	/* Maybe some extension wants to handle this. */
+  	if (cobalt_call_extension(sched_yield, &curr->extref, ret) && ret)
+		return ret > 0 ? 0 : ret;
+
+	xnthread_resume(&curr->threadbase, 0);
+	if (xnsched_run())
+		return 0;
+
+	/*
+	 * If the round-robin move did not beget any context switch to
+	 * a thread running in primary mode, then wait for the next
+	 * linux context switch to happen.
+	 *
+	 * Rationale: it is most probably unexpected that
+	 * sched_yield() does not cause any context switch, since this
+	 * service is commonly used for implementing a poor man's
+	 * cooperative scheduling. By waiting for a context switch to
+	 * happen in the regular kernel, we guarantee that the CPU has
+	 * been relinquished for a while.
+	 *
+	 * Typically, this behavior allows a thread running in primary
+	 * mode to effectively yield the CPU to a thread of
+	 * same/higher priority stuck in secondary mode.
+	 *
+	 * NOTE: calling cobalt_yield() with no timeout
+	 * (i.e. XN_INFINITE) is probably never a good idea. This
+	 * means that a SCHED_FIFO non-rt thread stuck in a tight loop
+	 * would prevent the caller from waking up, since no
+	 * linux-originated schedule event would happen for unblocking
+	 * it on the current CPU. For this reason, we pass the
+	 * arbitrary TICK_NSEC value to limit the wait time to a
+	 * reasonable amount.
+	 */
+	return cobalt_yield(TICK_NSEC, TICK_NSEC);
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+static inline
+int set_tp_config(int cpu, union sched_config *config, size_t len)
+{
+	xnticks_t offset, duration, next_offset;
+	struct xnsched_tp_schedule *gps, *ogps;
+	struct xnsched_tp_window *w;
+	struct sched_tp_window *p;
+	struct xnsched *sched;
+	spl_t s;
+	int n;
+
+	if (len < sizeof(config->tp))
+		return -EINVAL;
+
+	sched = xnsched_struct(cpu);
+
+	switch (config->tp.op) {
+	case sched_tp_install:
+		if (config->tp.nr_windows > 0)
+			break;
+		fallthrough;
+	case sched_tp_uninstall:
+		gps = NULL;
+		goto set_schedule;
+	case sched_tp_start:
+		xnlock_get_irqsave(&nklock, s);
+		xnsched_tp_start_schedule(sched);
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	case sched_tp_stop:
+		xnlock_get_irqsave(&nklock, s);
+		xnsched_tp_stop_schedule(sched);
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	/* Install a new TP schedule on CPU. */
+
+	gps = xnmalloc(sizeof(*gps) + config->tp.nr_windows * sizeof(*w));
+	if (gps == NULL)
+		return -ENOMEM;
+
+	for (n = 0, p = config->tp.windows, w = gps->pwins, next_offset = 0;
+	     n < config->tp.nr_windows; n++, p++, w++) {
+		/*
+		 * Time windows must be strictly contiguous. Holes may
+		 * be defined using windows assigned to the pseudo
+		 * partition #-1.
+		 */
+		offset = u_ts2ns(&p->offset);
+		if (offset != next_offset)
+			goto cleanup_and_fail;
+
+		duration = u_ts2ns(&p->duration);
+		if (duration <= 0)
+			goto cleanup_and_fail;
+
+		if (p->ptid < -1 ||
+		    p->ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART)
+			goto cleanup_and_fail;
+
+		w->w_offset = next_offset;
+		w->w_part = p->ptid;
+		next_offset += duration;
+	}
+
+	atomic_set(&gps->refcount, 1);
+	gps->pwin_nr = n;
+	gps->tf_duration = next_offset;
+set_schedule:
+	xnlock_get_irqsave(&nklock, s);
+	ogps = xnsched_tp_set_schedule(sched, gps);
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (ogps)
+		xnsched_tp_put_schedule(ogps);
+
+	return 0;
+
+cleanup_and_fail:
+	xnfree(gps);
+
+	return -EINVAL;
+}
+
+static inline
+ssize_t get_tp_config(int cpu, void __user *u_config, size_t len,
+		      union sched_config *(*fetch_config)
+		      (int policy, const void __user *u_config,
+		       size_t *len),
+		      ssize_t (*put_config)(int policy, void __user *u_config,
+					    size_t u_len,
+					    const union sched_config *config,
+					    size_t len))
+{
+	struct xnsched_tp_window *pw, *w;
+	struct xnsched_tp_schedule *gps;
+	struct sched_tp_window *pp, *p;
+	union sched_config *config;
+	struct xnsched *sched;
+	ssize_t ret, elen;
+	spl_t s;
+	int n;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = xnsched_struct(cpu);
+	gps = xnsched_tp_get_schedule(sched);
+	if (gps == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	elen = sched_tp_confsz(gps->pwin_nr);
+	config = xnmalloc(elen);
+	if (config == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	config->tp.op = sched_tp_install;
+	config->tp.nr_windows = gps->pwin_nr;
+	for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins;
+	     n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) {
+		u_ns2ts(&p->offset, w->w_offset);
+		u_ns2ts(&pp->duration, w->w_offset - pw->w_offset);
+		p->ptid = w->w_part;
+	}
+	u_ns2ts(&pp->duration, gps->tf_duration - pw->w_offset);
+	ret = put_config(SCHED_TP, u_config, len, config, elen);
+	xnfree(config);
+out:
+	xnsched_tp_put_schedule(gps);
+
+	return ret;
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_TP */
+
+static inline int
+set_tp_config(int cpu, union sched_config *config, size_t len)
+{
+	return -EINVAL;
+}
+
+static inline ssize_t
+get_tp_config(int cpu, union sched_config __user *u_config, size_t len,
+	      union sched_config *(*fetch_config)
+	      (int policy, const void __user *u_config,
+	       size_t *len),
+	      ssize_t (*put_config)(int policy, void __user *u_config,
+				    size_t u_len,
+				    const union sched_config *config,
+				    size_t len))
+{
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_TP */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+static inline
+int set_quota_config(int cpu, union sched_config *config, size_t len)
+{
+	struct __sched_config_quota *p = &config->quota;
+	struct __sched_quota_info *iq = &p->info;
+	struct cobalt_sched_group *group;
+	struct xnsched_quota_group *tg;
+	struct xnsched *sched;
+	int ret, quota_sum;
+	spl_t s;
+
+	if (len < sizeof(*p))
+		return -EINVAL;
+
+	switch (p->op) {
+	case sched_quota_add:
+		group = xnmalloc(sizeof(*group));
+		if (group == NULL)
+			return -ENOMEM;
+		tg = &group->quota;
+		group->pshared = p->add.pshared != 0;
+		group->scope = cobalt_current_resources(group->pshared);
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		ret = xnsched_quota_create_group(tg, sched, &quota_sum);
+		if (ret) {
+			xnlock_put_irqrestore(&nklock, s);
+			xnfree(group);
+			return ret;
+		}
+		list_add(&group->next, &group->scope->schedq);
+		xnlock_put_irqrestore(&nklock, s);
+		break;
+	case sched_quota_remove:
+	case sched_quota_force_remove:
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		tg = xnsched_quota_find_group(sched, p->remove.tgid);
+		if (tg == NULL)
+			goto bad_tgid;
+		group = container_of(tg, struct cobalt_sched_group, quota);
+		if (group->scope != cobalt_current_resources(group->pshared))
+			goto bad_tgid;
+		ret = xnsched_quota_destroy_group(tg,
+						  p->op == sched_quota_force_remove,
+						  &quota_sum);
+		if (ret) {
+			xnlock_put_irqrestore(&nklock, s);
+			return ret;
+		}
+		list_del(&group->next);
+		xnlock_put_irqrestore(&nklock, s);
+		iq->tgid = tg->tgid;
+		iq->quota = tg->quota_percent;
+		iq->quota_peak = tg->quota_peak_percent;
+		iq->quota_sum = quota_sum;
+		xnfree(group);
+		return 0;
+	case sched_quota_set:
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		tg = xnsched_quota_find_group(sched, p->set.tgid);
+		if (tg == NULL)
+			goto bad_tgid;
+		group = container_of(tg, struct cobalt_sched_group, quota);
+		if (group->scope != cobalt_current_resources(group->pshared))
+			goto bad_tgid;
+		xnsched_quota_set_limit(tg, p->set.quota, p->set.quota_peak,
+					&quota_sum);
+		xnlock_put_irqrestore(&nklock, s);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	iq->tgid = tg->tgid;
+	iq->quota = tg->quota_percent;
+	iq->quota_peak = tg->quota_peak_percent;
+	iq->quota_sum = quota_sum;
+
+	return 0;
+bad_tgid:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -ESRCH;
+}
+
+static inline
+ssize_t get_quota_config(int cpu, void __user *u_config, size_t len,
+			 union sched_config *(*fetch_config)
+			 (int policy, const void __user *u_config,
+			  size_t *len),
+			 ssize_t (*put_config)(int policy, void __user *u_config,
+					       size_t u_len,
+					       const union sched_config *config,
+					       size_t len))
+{
+	struct cobalt_sched_group *group;
+	struct xnsched_quota_group *tg;
+	union sched_config *config;
+	struct xnsched *sched;
+	ssize_t ret;
+	spl_t s;
+
+	config = fetch_config(SCHED_QUOTA, u_config, &len);
+	if (IS_ERR(config))
+		return PTR_ERR(config);
+
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_struct(cpu);
+	tg = xnsched_quota_find_group(sched, config->quota.get.tgid);
+	if (tg == NULL)
+		goto bad_tgid;
+
+	group = container_of(tg, struct cobalt_sched_group, quota);
+	if (group->scope != cobalt_current_resources(group->pshared))
+		goto bad_tgid;
+
+	config->quota.info.tgid = tg->tgid;
+	config->quota.info.quota = tg->quota_percent;
+	config->quota.info.quota_peak = tg->quota_peak_percent;
+	config->quota.info.quota_sum = xnsched_quota_sum_all(sched);
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = put_config(SCHED_QUOTA, u_config, len, config, sizeof(*config));
+	xnfree(config);
+
+	return ret;
+bad_tgid:
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(config);
+
+	return -ESRCH;
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+static inline
+int set_quota_config(int cpu, union sched_config *config, size_t len)
+{
+	return -EINVAL;
+}
+
+static inline
+ssize_t get_quota_config(int cpu, void __user *u_config,
+			 size_t len,
+			 union sched_config *(*fetch_config)
+			 (int policy, const void __user *u_config,
+			  size_t *len),
+			 ssize_t (*put_config)(int policy, void __user *u_config,
+					       size_t u_len,
+					       const union sched_config *config,
+					       size_t len))
+{
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+static union sched_config *
+sched_fetch_config(int policy, const void __user *u_config, size_t *len)
+{
+	union sched_config *buf;
+	int ret;
+
+	if (u_config == NULL)
+		return ERR_PTR(-EFAULT);
+
+	if (policy == SCHED_QUOTA && *len < sizeof(buf->quota))
+		return ERR_PTR(-EINVAL);
+
+	buf = xnmalloc(*len);
+	if (buf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cobalt_copy_from_user(buf, u_config, *len);
+	if (ret) {
+		xnfree(buf);
+		return ERR_PTR(ret);
+	}
+
+	return buf;
+}
+
+static int sched_ack_config(int policy, const union sched_config *config,
+			    void __user *u_config)
+{
+	union sched_config __user *u_p = u_config;
+
+	if (policy != SCHED_QUOTA)
+		return 0;
+
+	return u_p == NULL ? -EFAULT :
+		cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+				       sizeof(u_p->quota.info));
+}
+
+static ssize_t sched_put_config(int policy,
+				void __user *u_config, size_t u_len,
+				const union sched_config *config, size_t len)
+{
+	union sched_config *u_p = u_config;
+
+	if (u_config == NULL)
+		return -EFAULT;
+
+	if (policy == SCHED_QUOTA) {
+		if (u_len < sizeof(config->quota))
+			return -EINVAL;
+		return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+					      sizeof(u_p->quota.info)) ?:
+			sizeof(u_p->quota.info);
+	}
+
+	return cobalt_copy_to_user(u_config, config, len) ?: len;
+}
+
+int __cobalt_sched_setconfig_np(int cpu, int policy,
+				void __user *u_config,
+				size_t len,
+				union sched_config *(*fetch_config)
+				(int policy, const void __user *u_config,
+				 size_t *len),
+				int (*ack_config)(int policy,
+						  const union sched_config *config,
+						  void __user *u_config))
+{
+	union sched_config *buf;
+	int ret;
+
+	trace_cobalt_sched_setconfig(cpu, policy, len);
+
+	if (cpu < 0 || cpu >= NR_CPUS || !xnsched_threading_cpu(cpu))
+		return -EINVAL;
+
+	if (len == 0)
+		return -EINVAL;
+
+	buf = fetch_config(policy, u_config, &len);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	switch (policy)	{
+	case SCHED_TP:
+		ret = set_tp_config(cpu, buf, len);
+		break;
+	case SCHED_QUOTA:
+		ret = set_quota_config(cpu, buf, len);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret == 0)
+		ret = ack_config(policy, buf, u_config);
+
+	xnfree(buf);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_setconfig_np, conforming,
+	       (int cpu, int policy,
+		union sched_config __user *u_config,
+		size_t len))
+{
+	return __cobalt_sched_setconfig_np(cpu, policy, u_config, len,
+					   sched_fetch_config, sched_ack_config);
+}
+
+ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
+				    void __user *u_config,
+				    size_t len,
+				    union sched_config *(*fetch_config)
+				    (int policy, const void __user *u_config,
+				     size_t *len),
+				    ssize_t (*put_config)(int policy,
+							  void __user *u_config,
+							  size_t u_len,
+							  const union sched_config *config,
+							  size_t len))
+{
+	ssize_t ret;
+
+	switch (policy)	{
+	case SCHED_TP:
+		ret = get_tp_config(cpu, u_config, len,
+				    fetch_config, put_config);
+		break;
+	case SCHED_QUOTA:
+		ret = get_quota_config(cpu, u_config, len,
+				       fetch_config, put_config);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_get_config(cpu, policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_getconfig_np, conforming,
+	       (int cpu, int policy,
+		union sched_config __user *u_config,
+		size_t len))
+{
+	return __cobalt_sched_getconfig_np(cpu, policy, u_config, len,
+					   sched_fetch_config, sched_put_config);
+}
+
+int __cobalt_sched_weightprio(int policy,
+			      const struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	int prio;
+
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, NULL);
+	if (sched_class == NULL)
+		return -EINVAL;
+
+	prio = param_ex->sched_priority;
+	if (prio < 0)
+		prio = -prio;
+
+	return prio + sched_class->weight;
+}
+
+COBALT_SYSCALL(sched_weightprio, current,
+	       (int policy, const struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return __cobalt_sched_weightprio(policy, &param_ex);
+}
+
+int cobalt_sched_setscheduler_ex(pid_t pid,
+				 int policy,
+				 const struct sched_param_ex *param_ex,
+				 __u32 __user *u_winoff,
+				 int __user *u_promoted)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret, promoted = 0;
+	spl_t s;
+
+	trace_cobalt_sched_setscheduler(pid, policy, param_ex);
+
+	if (pid) {
+		xnlock_get_irqsave(&nklock, s);
+		thread = cobalt_thread_find(pid);
+		xnlock_put_irqrestore(&nklock, s);
+	} else
+		thread = cobalt_current_thread();
+
+	if (thread == NULL) {
+		if (u_winoff == NULL || pid != task_pid_vnr(current))
+			return -ESRCH;
+			
+		thread = cobalt_thread_shadow(&hkey, u_winoff);
+		if (IS_ERR(thread))
+			return PTR_ERR(thread);
+
+		promoted = 1;
+	}
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(sched_setscheduler_ex, conforming,
+	       (pid_t pid,
+		int policy,
+		const struct sched_param_ex __user *u_param,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return cobalt_sched_setscheduler_ex(pid, policy, &param_ex,
+					    u_winoff, u_promoted);
+}
+
+int cobalt_sched_getscheduler_ex(pid_t pid,
+				 int *policy_r,
+				 struct sched_param_ex *param_ex)
+{
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	trace_cobalt_sched_getscheduler(pid);
+
+	if (pid) {
+		xnlock_get_irqsave(&nklock, s);
+		thread = cobalt_thread_find(pid);
+		xnlock_put_irqrestore(&nklock, s);
+	} else
+		thread = cobalt_current_thread();
+
+	if (thread == NULL)
+		return -ESRCH;
+
+	return __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex);
+}
+
+COBALT_SYSCALL(sched_getscheduler_ex, current,
+	       (pid_t pid,
+		int __user *u_policy,
+		struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_sched_getscheduler_ex(pid, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	if (cobalt_copy_to_user(u_param, &param_ex, sizeof(param_ex)) ||
+	    cobalt_copy_to_user(u_policy, &policy, sizeof(policy)))
+		return -EFAULT;
+
+	return 0;
+}
+
+void cobalt_sched_reclaim(struct cobalt_process *process)
+{
+	struct cobalt_resources *p = &process->resources;
+	struct cobalt_sched_group *group;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (!list_empty(&p->schedq)) {
+		group = list_get_entry(&p->schedq, struct cobalt_sched_group, next);
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+		xnsched_quota_destroy_group(&group->quota, 1, NULL);
+#endif
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(group);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+++ linux-patched/kernel/xenomai/posix/sem.h	2022-03-21 12:58:29.064892131 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/corectl.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SEM_H
+#define _COBALT_POSIX_SEM_H
+
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/registry.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_process;
+struct filename;
+
+struct cobalt_sem {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	struct cobalt_sem_state *state;
+	int flags;
+	unsigned int refs;
+	struct filename *pathname;
+	struct cobalt_resnode resnode;
+};
+
+/* Copied from Linuxthreads semaphore.h. */
+struct _sem_fastlock
+{
+  long int __status;
+  int __spinlock;
+};
+
+typedef struct
+{
+  struct _sem_fastlock __sem_lock;
+  int __sem_value;
+  long __sem_waiting;
+} sem_t;
+
+#include <cobalt/uapi/sem.h>
+
+#define SEM_VALUE_MAX	(INT_MAX)
+#define SEM_FAILED	NULL
+#define SEM_NAMED	0x80000000
+
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+		  const char __user *u_name,
+		  int oflags, mode_t mode, unsigned int value);
+
+int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
+			   const struct timespec64 *ts);
+
+int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem,
+			     const struct __kernel_timespec __user *u_ts);
+
+int __cobalt_sem_destroy(xnhandle_t handle);
+
+void cobalt_nsem_reclaim(struct cobalt_process *process);
+
+struct cobalt_sem *
+__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sem,
+		  int flags, unsigned value);
+
+void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic,
+			      struct cobalt_sem_shadow *sm);
+
+COBALT_SYSCALL_DECL(sem_init,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     int flags, unsigned value));
+
+COBALT_SYSCALL_DECL(sem_post,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_wait,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_timedwait,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(sem_timedwait64,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(sem_trywait,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_getvalue,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     int __user *u_sval));
+
+COBALT_SYSCALL_DECL(sem_destroy,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_open,
+		    (struct cobalt_sem_shadow __user *__user *u_addrp,
+		     const char __user *u_name,
+		     int oflags, mode_t mode, unsigned int value));
+
+COBALT_SYSCALL_DECL(sem_close,
+		    (struct cobalt_sem_shadow __user *usm));
+
+COBALT_SYSCALL_DECL(sem_unlink, (const char __user *u_name));
+
+COBALT_SYSCALL_DECL(sem_broadcast_np,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_inquire,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     struct cobalt_sem_info __user *u_info,
+		     pid_t __user *u_waitlist,
+		     size_t waitsz));
+
+void cobalt_sem_reclaim(struct cobalt_resnode *node,
+			spl_t s);
+
+#endif /* !_COBALT_POSIX_SEM_H */
+++ linux-patched/kernel/xenomai/posix/corectl.h	2022-03-21 12:58:29.060892170 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mqueue.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_CORECTL_H
+#define _COBALT_POSIX_CORECTL_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <xenomai/posix/syscall.h>
+#include <cobalt/uapi/corectl.h>
+
+struct cobalt_config_vector {
+	void __user *u_buf;
+	size_t u_bufsz;
+};
+
+COBALT_SYSCALL_DECL(corectl,
+		    (int request, void __user *u_buf, size_t u_bufsz));
+
+void cobalt_add_config_chain(struct notifier_block *nb);
+
+void cobalt_remove_config_chain(struct notifier_block *nb);
+
+#endif /* !_COBALT_POSIX_CORECTL_H */
+++ linux-patched/kernel/xenomai/posix/mqueue.c	2022-03-21 12:58:29.057892199 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/signal.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/stdarg.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "thread.h"
+#include "signal.h"
+#include "timer.h"
+#include "mqueue.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+#define COBALT_MSGMAX		65536
+#define COBALT_MSGSIZEMAX	(16*1024*1024)
+#define COBALT_MSGPRIOMAX	32768
+
+struct cobalt_mq {
+	unsigned magic;
+
+	struct list_head link;
+
+	struct xnsynch receivers;
+	struct xnsynch senders;
+	size_t memsize;
+	char *mem;
+	struct list_head queued;
+	struct list_head avail;
+	int nrqueued;
+
+	/* mq_notify */
+	struct siginfo si;
+	mqd_t target_qd;
+	struct cobalt_thread *target;
+
+	struct mq_attr attr;
+
+	unsigned refs;
+	char name[COBALT_MAXNAME];
+	xnhandle_t handle;
+
+	DECLARE_XNSELECT(read_select);
+	DECLARE_XNSELECT(write_select);
+};
+
+struct cobalt_mqd {
+	struct cobalt_mq *mq;
+	struct rtdm_fd fd;
+};
+
+struct cobalt_msg {
+	struct list_head link;
+	unsigned int prio;
+	size_t len;
+	char data[0];
+};
+
+struct cobalt_mqwait_context {
+	struct xnthread_wait_context wc;
+	struct cobalt_msg *msg;
+};
+
+static struct mq_attr default_attr = {
+      .mq_maxmsg = 10,
+      .mq_msgsize = 8192,
+};
+
+static LIST_HEAD(cobalt_mqq);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static int mq_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	return 0;
+}
+
+static struct xnvfile_regular_ops mq_vfile_ops = {
+	.show = mq_vfile_show,
+};
+
+static struct xnpnode_regular __mq_pnode = {
+	.node = {
+		.dirname = "mqueue",
+		.root = &posix_ptree,
+		.ops = &xnregistry_vfreg_ops,
+	},
+	.vfile = {
+		.ops = &mq_vfile_ops,
+	},
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __mq_pnode = {
+	.node = {
+		.dirname = "mqueue",
+	}
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+
+static inline struct cobalt_msg *mq_msg_alloc(struct cobalt_mq *mq)
+{
+	if (list_empty(&mq->avail))
+		return NULL;
+
+	return list_get_entry(&mq->avail, struct cobalt_msg, link);
+}
+
+static inline void mq_msg_free(struct cobalt_mq *mq, struct cobalt_msg * msg)
+{
+	list_add(&msg->link, &mq->avail); /* For earliest re-use of the block. */
+}
+
+static inline int mq_init(struct cobalt_mq *mq, const struct mq_attr *attr)
+{
+	unsigned i, msgsize, memsize;
+	char *mem;
+
+	if (attr == NULL)
+		attr = &default_attr;
+	else {
+		if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
+			return -EINVAL;
+		if (attr->mq_maxmsg > COBALT_MSGMAX)
+			return -EINVAL;
+		if (attr->mq_msgsize > COBALT_MSGSIZEMAX)
+			return -EINVAL;
+	}
+
+	msgsize = attr->mq_msgsize + sizeof(struct cobalt_msg);
+
+	/* Align msgsize on natural boundary. */
+	if ((msgsize % sizeof(unsigned long)))
+		msgsize +=
+		    sizeof(unsigned long) - (msgsize % sizeof(unsigned long));
+
+	memsize = msgsize * attr->mq_maxmsg;
+	memsize = PAGE_ALIGN(memsize);
+	if (get_order(memsize) > MAX_ORDER)
+		return -ENOSPC;
+
+	mem = xnheap_vmalloc(memsize);
+	if (mem == NULL)
+		return -ENOSPC;
+
+	mq->memsize = memsize;
+	INIT_LIST_HEAD(&mq->queued);
+	mq->nrqueued = 0;
+	xnsynch_init(&mq->receivers, XNSYNCH_PRIO, NULL);
+	xnsynch_init(&mq->senders, XNSYNCH_PRIO, NULL);
+	mq->mem = mem;
+
+	/* Fill the pool. */
+	INIT_LIST_HEAD(&mq->avail);
+	for (i = 0; i < attr->mq_maxmsg; i++) {
+		struct cobalt_msg *msg = (struct cobalt_msg *) (mem + i * msgsize);
+		mq_msg_free(mq, msg);
+	}
+
+	mq->attr = *attr;
+	mq->target = NULL;
+	xnselect_init(&mq->read_select);
+	xnselect_init(&mq->write_select);
+	mq->magic = COBALT_MQ_MAGIC;
+	mq->refs = 2;
+	INIT_LIST_HEAD(&mq->link);
+
+	return 0;
+}
+
+static inline void mq_destroy(struct cobalt_mq *mq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xnsynch_destroy(&mq->receivers);
+	xnsynch_destroy(&mq->senders);
+	list_del(&mq->link);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+	xnselect_destroy(&mq->read_select); /* Reschedules. */
+	xnselect_destroy(&mq->write_select); /* Ditto. */
+	xnregistry_remove(mq->handle);
+	xnheap_vfree(mq->mem);
+	kfree(mq);
+}
+
+static int mq_unref_inner(struct cobalt_mq *mq, spl_t s)
+{
+	int destroy;
+
+	destroy = --mq->refs == 0;
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (destroy)
+		mq_destroy(mq);
+
+	return destroy;
+}
+
+static int mq_unref(struct cobalt_mq *mq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	return mq_unref_inner(mq, s);
+}
+
+static void mqd_close(struct rtdm_fd *fd)
+{
+	struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd);
+	struct cobalt_mq *mq = mqd->mq;
+
+	kfree(mqd);
+	mq_unref(mq);
+}
+
+int
+mqd_select(struct rtdm_fd *fd, struct xnselector *selector,
+	   unsigned type, unsigned index)
+{
+	struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd);
+	struct xnselect_binding *binding;
+	struct cobalt_mq *mq;
+	int err;
+	spl_t s;
+
+	if (type == XNSELECT_READ || type == XNSELECT_WRITE) {
+		binding = xnmalloc(sizeof(*binding));
+		if (!binding)
+			return -ENOMEM;
+	} else
+		return -EBADF;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = mqd->mq;
+
+	switch(type) {
+	case XNSELECT_READ:
+		err = -EBADF;
+		if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_WRONLY)
+			goto unlock_and_error;
+
+		err = xnselect_bind(&mq->read_select, binding,
+				selector, type, index,
+				!list_empty(&mq->queued));
+		if (err)
+			goto unlock_and_error;
+		break;
+
+	case XNSELECT_WRITE:
+		err = -EBADF;
+		if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_RDONLY)
+			goto unlock_and_error;
+
+		err = xnselect_bind(&mq->write_select, binding,
+				selector, type, index,
+				!list_empty(&mq->avail));
+		if (err)
+			goto unlock_and_error;
+		break;
+	}
+	xnlock_put_irqrestore(&nklock, s);
+	return 0;
+
+      unlock_and_error:
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(binding);
+	return err;
+}
+
+static struct rtdm_fd_ops mqd_ops = {
+	.close = mqd_close,
+	.select = mqd_select,
+};
+
+static inline int mqd_create(struct cobalt_mq *mq, unsigned long flags, int ufd)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	if (cobalt_ppd_get(0) == &cobalt_kernel_ppd)
+		return -EPERM;
+
+	mqd = kmalloc(sizeof(*mqd), GFP_KERNEL);
+	if (mqd == NULL)
+		return -ENOSPC;
+
+	mqd->fd.oflags = flags;
+	mqd->mq = mq;
+
+	ret = rtdm_fd_enter(&mqd->fd, ufd, COBALT_MQD_MAGIC, &mqd_ops);
+	if (ret < 0)
+		return ret;
+
+	return rtdm_fd_register(&mqd->fd, ufd);
+}
+
+static int mq_open(int uqd, const char *name, int oflags,
+		   int mode, struct mq_attr *attr)
+{
+	struct cobalt_mq *mq;
+	xnhandle_t handle;
+	spl_t s;
+	int err;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return -EINVAL;
+
+  retry_bind:
+	err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	switch (err) {
+	case 0:
+		/* Found */
+		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			return -EEXIST;
+
+		xnlock_get_irqsave(&nklock, s);
+		mq = xnregistry_lookup(handle, NULL);
+		if (mq && mq->magic != COBALT_MQ_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+
+		if (mq) {
+			++mq->refs;
+			xnlock_put_irqrestore(&nklock, s);
+		} else {
+			xnlock_put_irqrestore(&nklock, s);
+			goto retry_bind;
+		}
+
+		err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK),
+				uqd);
+		if (err < 0) {
+			mq_unref(mq);
+			return err;
+		}
+		break;
+
+	case -EWOULDBLOCK:
+		/* Not found */
+		if ((oflags & O_CREAT) == 0)
+			return (mqd_t)-ENOENT;
+
+		mq = kmalloc(sizeof(*mq), GFP_KERNEL);
+		if (mq == NULL)
+			return -ENOSPC;
+
+		err = mq_init(mq, attr);
+		if (err) {
+			kfree(mq);
+			return err;
+		}
+
+		snprintf(mq->name, sizeof(mq->name), "%s", &name[1]);
+
+		err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK),
+				uqd);
+		if (err < 0) {
+			mq_destroy(mq);
+			return err;
+		}
+
+		xnlock_get_irqsave(&nklock, s);
+		err = xnregistry_enter(mq->name, mq, &mq->handle,
+				       &__mq_pnode.node);
+		if (err < 0)
+			--mq->refs;
+		else
+			list_add_tail(&mq->link, &cobalt_mqq);
+		xnlock_put_irqrestore(&nklock, s);
+		if (err < 0) {
+			rtdm_fd_close(uqd, COBALT_MQD_MAGIC);
+			if (err == -EEXIST)
+				goto retry_bind;
+			return err;
+		}
+		break;
+
+	default:
+		return err;
+	}
+
+	return 0;
+}
+
+static inline int mq_close(mqd_t fd)
+{
+	int err;
+
+	err = rtdm_fd_close(fd, COBALT_MQD_MAGIC);
+	return err == -EADV ? -EBADF : err;
+}
+
+static inline int mq_unlink(const char *name)
+{
+	struct cobalt_mq *mq;
+	xnhandle_t handle;
+	spl_t s;
+	int err;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return -EINVAL;
+
+	err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (err == -EWOULDBLOCK)
+		return -ENOENT;
+	if (err)
+		return err;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = xnregistry_lookup(handle, NULL);
+	if (!mq) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+	if (mq->magic != COBALT_MQ_MAGIC) {
+		err = -EINVAL;
+	  err_unlock:
+		xnlock_put_irqrestore(&nklock, s);
+
+		return err;
+	}
+	if (mq_unref_inner(mq, s) == 0)
+		xnregistry_unlink(&name[1]);
+	return 0;
+}
+
+static inline struct cobalt_msg *
+mq_trysend(struct cobalt_mqd *mqd, size_t len)
+{
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	unsigned flags;
+
+	mq = mqd->mq;
+	flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK;
+
+	if (flags != O_WRONLY && flags != O_RDWR)
+		return ERR_PTR(-EBADF);
+
+	if (len > mq->attr.mq_msgsize)
+		return ERR_PTR(-EMSGSIZE);
+
+	msg = mq_msg_alloc(mq);
+	if (msg == NULL)
+		return ERR_PTR(-EAGAIN);
+
+	if (list_empty(&mq->avail))
+		xnselect_signal(&mq->write_select, 0);
+
+	return msg;
+}
+
+static inline struct cobalt_msg *
+mq_tryrcv(struct cobalt_mqd *mqd, size_t len)
+{
+	struct cobalt_msg *msg;
+	unsigned int flags;
+	struct cobalt_mq *mq;
+
+	mq = mqd->mq;
+	flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK;
+
+	if (flags != O_RDONLY && flags != O_RDWR)
+		return ERR_PTR(-EBADF);
+
+	if (len < mq->attr.mq_msgsize)
+		return ERR_PTR(-EMSGSIZE);
+
+	if (list_empty(&mq->queued))
+		return ERR_PTR(-EAGAIN);
+
+	msg = list_get_entry(&mq->queued, struct cobalt_msg, link);
+	mq->nrqueued--;
+
+	if (list_empty(&mq->queued))
+		xnselect_signal(&mq->read_select, 0);
+
+	return msg;
+}
+
+static struct cobalt_msg *
+mq_timedsend_inner(struct cobalt_mqd *mqd,
+		   size_t len, const void __user *u_ts,
+		   int (*fetch_timeout)(struct timespec64 *ts,
+					const void __user *u_ts))
+{
+	struct cobalt_mqwait_context mwc;
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	struct timespec64 ts;
+	xntmode_t tmode;
+	xnticks_t to;
+	spl_t s;
+	int ret;
+
+	to = XN_INFINITE;
+	tmode = XN_RELATIVE;
+redo:
+	xnlock_get_irqsave(&nklock, s);
+	msg = mq_trysend(mqd, len);
+	if (msg != ERR_PTR(-EAGAIN))
+		goto out;
+
+	if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK)
+		goto out;
+
+	if (fetch_timeout) {
+		xnlock_put_irqrestore(&nklock, s);
+		ret = fetch_timeout(&ts, u_ts);
+		if (ret)
+			return ERR_PTR(ret);
+		if (!timespec64_valid(&ts))
+			return ERR_PTR(-EINVAL);
+		to = ts2ns(&ts) + 1;
+		tmode = XN_REALTIME;
+		fetch_timeout = NULL;
+		goto redo;
+	}
+
+	mq = mqd->mq;
+	xnthread_prepare_wait(&mwc.wc);
+	ret = xnsynch_sleep_on(&mq->senders, to, tmode);
+	if (ret) {
+		if (ret & XNBREAK)
+			msg = ERR_PTR(-EINTR);
+		else if (ret & XNTIMEO)
+			msg = ERR_PTR(-ETIMEDOUT);
+		else if (ret & XNRMID)
+			msg = ERR_PTR(-EBADF);
+	} else
+		msg = mwc.msg;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return msg;
+}
+
+static void mq_release_msg(struct cobalt_mq *mq, struct cobalt_msg *msg)
+{
+	struct cobalt_mqwait_context *mwc;
+	struct xnthread_wait_context *wc;
+	struct xnthread *thread;
+
+	/*
+	 * Try passing the free message slot to a waiting sender, link
+	 * it to the free queue otherwise.
+	 */
+	if (xnsynch_pended_p(&mq->senders)) {
+		thread = xnsynch_wakeup_one_sleeper(&mq->senders);
+		wc = xnthread_get_wait_context(thread);
+		mwc = container_of(wc, struct cobalt_mqwait_context, wc);
+		mwc->msg = msg;
+		xnthread_complete_wait(wc);
+	} else {
+		mq_msg_free(mq, msg);
+		if (list_is_singular(&mq->avail))
+			xnselect_signal(&mq->write_select, 1);
+	}
+}
+
+static int
+mq_finish_send(struct cobalt_mqd *mqd, struct cobalt_msg *msg)
+{
+	struct cobalt_mqwait_context *mwc;
+	struct xnthread_wait_context *wc;
+	struct cobalt_sigpending *sigp;
+	struct xnthread *thread;
+	struct cobalt_mq *mq;
+	spl_t s;
+
+	mq = mqd->mq;
+
+	xnlock_get_irqsave(&nklock, s);
+	/* Can we do pipelined sending? */
+	if (xnsynch_pended_p(&mq->receivers)) {
+		thread = xnsynch_wakeup_one_sleeper(&mq->receivers);
+		wc = xnthread_get_wait_context(thread);
+		mwc = container_of(wc, struct cobalt_mqwait_context, wc);
+		mwc->msg = msg;
+		xnthread_complete_wait(wc);
+	} else {
+		/* Nope, have to go through the queue. */
+		list_add_priff(msg, &mq->queued, prio, link);
+		mq->nrqueued++;
+
+		/*
+		 * If first message and no pending reader, send a
+		 * signal if notification was enabled via mq_notify().
+		 */
+		if (list_is_singular(&mq->queued)) {
+			xnselect_signal(&mq->read_select, 1);
+			if (mq->target) {
+				sigp = cobalt_signal_alloc();
+				if (sigp) {
+					cobalt_copy_siginfo(SI_MESGQ, &sigp->si, &mq->si);
+					if (cobalt_signal_send(mq->target, sigp, 0) <= 0)
+						cobalt_signal_free(sigp);
+				}
+				mq->target = NULL;
+			}
+		}
+	}
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static struct cobalt_msg *
+mq_timedrcv_inner(struct cobalt_mqd *mqd,
+		  size_t len,
+		  const void __user *u_ts,
+		  int (*fetch_timeout)(struct timespec64 *ts,
+				       const void __user *u_ts))
+{
+	struct cobalt_mqwait_context mwc;
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	struct timespec64 ts;
+	xntmode_t tmode;
+	xnticks_t to;
+	spl_t s;
+	int ret;
+
+	to = XN_INFINITE;
+	tmode = XN_RELATIVE;
+redo:
+	xnlock_get_irqsave(&nklock, s);
+	msg = mq_tryrcv(mqd, len);
+	if (msg != ERR_PTR(-EAGAIN))
+		goto out;
+
+	if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK)
+		goto out;
+
+	if (fetch_timeout) {
+		xnlock_put_irqrestore(&nklock, s);
+		ret = fetch_timeout(&ts, u_ts);
+		if (ret)
+			return ERR_PTR(ret);
+		if (!timespec64_valid(&ts))
+			return ERR_PTR(-EINVAL);
+		to = ts2ns(&ts) + 1;
+		tmode = XN_REALTIME;
+		fetch_timeout = NULL;
+		goto redo;
+	}
+
+	mq = mqd->mq;
+	xnthread_prepare_wait(&mwc.wc);
+	ret = xnsynch_sleep_on(&mq->receivers, to, tmode);
+	if (ret == 0)
+		msg = mwc.msg;
+	else if (ret & XNRMID)
+		msg = ERR_PTR(-EBADF);
+	else if (ret & XNTIMEO)
+		msg = ERR_PTR(-ETIMEDOUT);
+	else
+		msg = ERR_PTR(-EINTR);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return msg;
+}
+
+static int
+mq_finish_rcv(struct cobalt_mqd *mqd, struct cobalt_msg *msg)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq_release_msg(mqd->mq, msg);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static inline int mq_getattr(struct cobalt_mqd *mqd, struct mq_attr *attr)
+{
+	struct cobalt_mq *mq;
+	spl_t s;
+
+	mq = mqd->mq;
+	*attr = mq->attr;
+	xnlock_get_irqsave(&nklock, s);
+	attr->mq_flags = rtdm_fd_flags(&mqd->fd);
+	attr->mq_curmsgs = mq->nrqueued;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static inline int
+mq_notify(struct cobalt_mqd *mqd, unsigned index, const struct sigevent *evp)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+	struct cobalt_mq *mq;
+	int err;
+	spl_t s;
+
+	if (evp && ((evp->sigev_notify != SIGEV_SIGNAL &&
+		     evp->sigev_notify != SIGEV_NONE) ||
+		    (unsigned int)(evp->sigev_signo - 1) > SIGRTMAX - 1))
+		return -EINVAL;
+
+	if (xnsched_interrupt_p() || thread == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = mqd->mq;
+	if (mq->target && mq->target != thread) {
+		err = -EBUSY;
+		goto unlock_and_error;
+	}
+
+	if (evp == NULL || evp->sigev_notify == SIGEV_NONE)
+		/* Here, mq->target == cobalt_current_thread() or NULL. */
+		mq->target = NULL;
+	else {
+		mq->target = thread;
+		mq->target_qd = index;
+		mq->si.si_signo = evp->sigev_signo;
+		mq->si.si_errno = 0;
+		mq->si.si_code = SI_MESGQ;
+		mq->si.si_value = evp->sigev_value;
+		/*
+		 * XXX: we differ from the regular kernel here, which
+		 * passes the sender's pid/uid data into the
+		 * receiver's namespaces. We pass the receiver's creds
+		 * into the init namespace instead.
+		 */
+		mq->si.si_pid = task_pid_nr(current);
+		mq->si.si_uid = get_current_uuid();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+	return 0;
+
+      unlock_and_error:
+	xnlock_put_irqrestore(&nklock, s);
+	return err;
+}
+
+static inline struct cobalt_mqd *cobalt_mqd_get(mqd_t ufd)
+{
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, COBALT_MQD_MAGIC);
+	if (IS_ERR(fd)) {
+		int err = PTR_ERR(fd);
+		if (err == -EADV)
+			err = cobalt_current_process() ? -EBADF : -EPERM;
+		return ERR_PTR(err);
+	}
+
+	return container_of(fd, struct cobalt_mqd, fd);
+}
+
+static inline void cobalt_mqd_put(struct cobalt_mqd *mqd)
+{
+	rtdm_fd_put(&mqd->fd);
+}
+
+int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(fd);
+	if (IS_ERR(mqd))
+		ret = PTR_ERR(mqd);
+	else {
+		trace_cobalt_mq_notify(fd, evp);
+		ret = mq_notify(mqd, fd, evp);
+		cobalt_mqd_put(mqd);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(mq_notify, primary,
+	       (mqd_t fd, const struct sigevent *__user evp))
+{
+	struct sigevent sev;
+
+	if (evp && cobalt_copy_from_user(&sev, evp, sizeof(sev)))
+		return -EFAULT;
+
+	return __cobalt_mq_notify(fd, evp ? &sev : NULL);
+}
+
+int __cobalt_mq_open(const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr *attr)
+{
+	char name[COBALT_MAXNAME];
+	unsigned int len;
+	mqd_t uqd;
+	int ret;
+
+	len = cobalt_strncpy_from_user(name, u_name, sizeof(name));
+	if (len < 0)
+		return -EFAULT;
+
+	if (len >= sizeof(name))
+		return -ENAMETOOLONG;
+
+	if (len == 0)
+		return -EINVAL;
+
+	trace_cobalt_mq_open(name, oflags, mode);
+
+	uqd = __rtdm_anon_getfd("[cobalt-mq]", oflags);
+	if (uqd < 0)
+		return uqd;
+
+	ret = mq_open(uqd, name, oflags, mode, attr);
+	if (ret < 0) {
+		__rtdm_anon_putfd(uqd);
+		return ret;
+	}
+
+	return uqd;
+}
+
+COBALT_SYSCALL(mq_open, lostage,
+	       (const char __user *u_name, int oflags,
+		mode_t mode, struct mq_attr __user *u_attr))
+{
+	struct mq_attr _attr, *attr = &_attr;
+
+	if ((oflags & O_CREAT) && u_attr) {
+		if (cobalt_copy_from_user(&_attr, u_attr, sizeof(_attr)))
+			return -EFAULT;
+	} else
+		attr = NULL;
+
+	return __cobalt_mq_open(u_name, oflags, mode, attr);
+}
+
+COBALT_SYSCALL(mq_close, lostage, (mqd_t uqd))
+{
+	trace_cobalt_mq_close(uqd);
+
+	return mq_close(uqd);
+}
+
+COBALT_SYSCALL(mq_unlink, lostage, (const char __user *u_name))
+{
+	char name[COBALT_MAXNAME];
+	unsigned len;
+
+	len = cobalt_strncpy_from_user(name, u_name, sizeof(name));
+	if (len < 0)
+		return -EFAULT;
+	if (len >= sizeof(name))
+		return -ENAMETOOLONG;
+
+	trace_cobalt_mq_unlink(name);
+
+	return mq_unlink(name);
+}
+
+int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	ret = mq_getattr(mqd, attr);
+	cobalt_mqd_put(mqd);
+	if (ret)
+		return ret;
+
+	trace_cobalt_mq_getattr(uqd, attr);
+
+	return 0;
+}
+
+COBALT_SYSCALL(mq_getattr, current,
+	       (mqd_t uqd, struct mq_attr __user *u_attr))
+{
+	struct mq_attr attr;
+	int ret;
+
+	ret = __cobalt_mq_getattr(uqd, &attr);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_attr, &attr, sizeof(attr));
+}
+
+static inline int mq_fetch_timeout(struct timespec64 *ts,
+				   const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+static inline int mq_fetch_timeout64(struct timespec64 *ts,
+				     const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio, const void __user *u_ts,
+			  int (*fetch_timeout)(struct timespec64 *ts,
+					       const void __user *u_ts))
+{
+	struct cobalt_msg *msg;
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	if (prio >= COBALT_MSGPRIOMAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (len > 0 && !access_rok(u_buf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	trace_cobalt_mq_send(uqd, u_buf, len, prio);
+	msg = mq_timedsend_inner(mqd, len, u_ts, fetch_timeout);
+	if (IS_ERR(msg)) {
+		ret = PTR_ERR(msg);
+		goto out;
+	}
+
+	ret = cobalt_copy_from_user(msg->data, u_buf, len);
+	if (ret) {
+		mq_finish_rcv(mqd, msg);
+		goto out;
+	}
+	msg->len = len;
+	msg->prio = prio;
+	ret = mq_finish_send(mqd, msg);
+out:
+	cobalt_mqd_put(mqd);
+
+	return ret;
+}
+
+int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len,
+			    unsigned int prio, const void __user *u_ts)
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio, u_ts,
+				     u_ts ? mq_fetch_timeout64 : NULL);
+}
+
+COBALT_SYSCALL(mq_timedsend, primary,
+	       (mqd_t uqd, const void __user *u_buf, size_t len,
+		unsigned int prio, const struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio,
+				     u_ts, u_ts ? mq_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL(mq_timedsend64, primary,
+	       (mqd_t uqd, const void __user *u_buf, size_t len,
+		unsigned int prio, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts);
+}
+
+int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf,
+			     ssize_t *lenp,
+			     unsigned int __user *u_prio,
+			     const void __user *u_ts,
+			     int (*fetch_timeout)(struct timespec64 *ts,
+						  const void __user *u_ts))
+{
+	struct cobalt_mqd *mqd;
+	struct cobalt_msg *msg;
+	unsigned int prio;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	if (*lenp > 0 && !access_wok(u_buf, *lenp)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	msg = mq_timedrcv_inner(mqd, *lenp, u_ts, fetch_timeout);
+	if (IS_ERR(msg)) {
+		ret = PTR_ERR(msg);
+		goto fail;
+	}
+
+	ret = cobalt_copy_to_user(u_buf, msg->data, msg->len);
+	if (ret) {
+		mq_finish_rcv(mqd, msg);
+		goto fail;
+	}
+
+	*lenp = msg->len;
+	prio = msg->prio;
+	ret = mq_finish_rcv(mqd, msg);
+	if (ret)
+		goto fail;
+
+	cobalt_mqd_put(mqd);
+
+	if (u_prio && __xn_put_user(prio, u_prio))
+		return -EFAULT;
+
+	return 0;
+fail:
+	cobalt_mqd_put(mqd);
+
+	return ret;
+}
+
+int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf,
+			       ssize_t __user *u_len,
+			       unsigned int __user *u_prio,
+			       const void __user *u_ts)
+{
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&len, u_len, sizeof(len));
+	if (ret)
+		return ret;
+
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio, u_ts,
+				       u_ts ? mq_fetch_timeout64 : NULL);
+
+	return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len));
+}
+
+COBALT_SYSCALL(mq_timedreceive, primary,
+	       (mqd_t uqd, void __user *u_buf,
+		ssize_t __user *u_len,
+		unsigned int __user *u_prio,
+		const struct __user_old_timespec __user *u_ts))
+{
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&len, u_len, sizeof(len));
+	if (ret)
+		return ret;
+
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio,
+				       u_ts, u_ts ? mq_fetch_timeout : NULL);
+
+	return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len));
+}
+
+COBALT_SYSCALL(mq_timedreceive64, primary,
+	       (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		unsigned int __user *u_prio,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedreceive64(uqd, u_buf, u_len, u_prio, u_ts);
+}
+++ linux-patched/kernel/xenomai/posix/signal.c	2022-03-21 12:58:29.054892228 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/io.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/compat.h>
+#include <cobalt/kernel/time.h>
+#include "internal.h"
+#include "signal.h"
+#include "thread.h"
+#include "timer.h"
+#include "clock.h"
+
+static void *sigpending_mem;
+
+static LIST_HEAD(sigpending_pool);
+
+/*
+ * How many signal notifications which may be pending at any given
+ * time, except timers.  Cobalt signals are always thread directed,
+ * and we assume that in practice, each signal number is processed by
+ * a dedicated thread. We provide for up to three real-time signal
+ * events to pile up, and a single notification pending for other
+ * signals. Timers use a fast queuing logic maintaining a count of
+ * overruns, and therefore do not consume any memory from this pool.
+ */
+#define __SIGPOOL_SIZE  (sizeof(struct cobalt_sigpending) *	\
+			 (_NSIG + (SIGRTMAX - SIGRTMIN) * 2))
+
+static int cobalt_signal_deliver(struct cobalt_thread *thread,
+				 struct cobalt_sigpending *sigp,
+				 int group)
+{				/* nklocked, IRQs off */
+	struct cobalt_sigwait_context *swc;
+	struct xnthread_wait_context *wc;
+	struct list_head *sigwaiters;
+	int sig, ret;
+
+	sig = sigp->si.si_signo;
+	XENO_BUG_ON(COBALT, sig < 1 || sig > _NSIG);
+
+	/*
+	 * Attempt to deliver the signal immediately to the initial
+	 * target that waits for it.
+	 */
+	if (xnsynch_pended_p(&thread->sigwait)) {
+		wc = xnthread_get_wait_context(&thread->threadbase);
+		swc = container_of(wc, struct cobalt_sigwait_context, wc);
+		if (sigismember(swc->set, sig))
+			goto deliver;
+	}
+
+	/*
+	 * If that does not work out and we are sending to a thread
+	 * group, try to deliver to any thread from the same process
+	 * waiting for that signal.
+	 */
+	sigwaiters = &thread->process->sigwaiters;
+	if (!group || list_empty(sigwaiters))
+		return 0;
+
+	list_for_each_entry(thread, sigwaiters, signext) {
+		wc = xnthread_get_wait_context(&thread->threadbase);
+		swc = container_of(wc, struct cobalt_sigwait_context, wc);
+		if (sigismember(swc->set, sig))
+			goto deliver;
+	}
+
+	return 0;
+deliver:
+	cobalt_copy_siginfo(sigp->si.si_code, swc->si, &sigp->si);
+	cobalt_call_extension(signal_deliver, &thread->extref,
+			      ret, swc->si, sigp);
+	xnthread_complete_wait(&swc->wc);
+	xnsynch_wakeup_one_sleeper(&thread->sigwait);
+	list_del(&thread->signext);
+
+	/*
+	 * This is an immediate delivery bypassing any queuing, so we
+	 * have to release the sigpending data right away before
+	 * leaving.
+	 */
+	cobalt_signal_free(sigp);
+
+	return 1;
+}
+
+int cobalt_signal_send(struct cobalt_thread *thread,
+		       struct cobalt_sigpending *sigp,
+		       int group)
+{				/* nklocked, IRQs off */
+	struct list_head *sigq;
+	int sig, ret;
+
+	/* Can we deliver this signal immediately? */
+	ret = cobalt_signal_deliver(thread, sigp, group);
+	if (ret)
+		return ret;	/* Yep, done. */
+
+	/*
+	 * Nope, attempt to queue it. We start by calling any Cobalt
+	 * extension for queuing the signal first.
+	 */
+	if (cobalt_call_extension(signal_queue, &thread->extref, ret, sigp)) {
+		if (ret)
+			/* Queuing done remotely or error. */
+			return ret;
+	}
+
+	sig = sigp->si.si_signo;
+	sigq = thread->sigqueues + sig - 1;
+	if (!list_empty(sigq)) {
+		/* Queue non-rt signals only once. */
+		if (sig < SIGRTMIN)
+			return 0;
+		/* Queue rt signal source only once (SI_TIMER). */
+		if (!list_empty(&sigp->next))
+			return 0;
+	}
+
+	sigaddset(&thread->sigpending, sig);
+	list_add_tail(&sigp->next, sigq);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_send);
+
+int cobalt_signal_send_pid(pid_t pid, struct cobalt_sigpending *sigp)
+{				/* nklocked, IRQs off */
+	struct cobalt_thread *thread;
+
+	thread = cobalt_thread_find(pid);
+	if (thread)
+		return cobalt_signal_send(thread, sigp, 0);
+
+	return -ESRCH;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_send_pid);
+
+struct cobalt_sigpending *cobalt_signal_alloc(void)
+{				/* nklocked, IRQs off */
+	struct cobalt_sigpending *sigp;
+
+	if (list_empty(&sigpending_pool)) {
+		if (xnclock_ratelimit())
+			printk(XENO_WARNING "signal bucket pool underflows\n");
+		return NULL;
+	}
+
+	sigp = list_get_entry(&sigpending_pool, struct cobalt_sigpending, next);
+	INIT_LIST_HEAD(&sigp->next);
+
+	return sigp;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_alloc);
+
+void cobalt_signal_free(struct cobalt_sigpending *sigp)
+{				/* nklocked, IRQs off */
+	if ((void *)sigp >= sigpending_mem &&
+	    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE)
+		list_add_tail(&sigp->next, &sigpending_pool);
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_free);
+
+void cobalt_signal_flush(struct cobalt_thread *thread)
+{
+	struct cobalt_sigpending *sigp, *tmp;
+	struct list_head *sigq;
+	spl_t s;
+	int n;
+
+	/*
+	 * TCB is not accessible from userland anymore, no locking
+	 * required.
+	 */
+	if (sigisemptyset(&thread->sigpending))
+		return;
+
+	for (n = 0; n < _NSIG; n++) {
+		sigq = thread->sigqueues + n;
+		if (list_empty(sigq))
+			continue;
+		/*
+		 * sigpending blocks must be unlinked so that we
+		 * detect this fact when deleting their respective
+		 * owners.
+		 */
+		list_for_each_entry_safe(sigp, tmp, sigq, next) {
+			list_del_init(&sigp->next);
+			if ((void *)sigp >= sigpending_mem &&
+			    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) {
+				xnlock_get_irqsave(&nklock, s);
+				list_add_tail(&sigp->next, &sigpending_pool);
+				xnlock_put_irqrestore(&nklock, s);
+			}
+		}
+	}
+
+	sigemptyset(&thread->sigpending);
+}
+
+static int signal_put_siginfo(void __user *u_si, const struct siginfo *si,
+			      int overrun)
+{
+	struct siginfo __user *u_p = u_si;
+	int ret;
+
+	ret = __xn_put_user(si->si_signo, &u_p->si_signo);
+	ret |= __xn_put_user(si->si_errno, &u_p->si_errno);
+	ret |= __xn_put_user(si->si_code, &u_p->si_code);
+
+	/*
+	 * Copy the generic/standard siginfo bits to userland.
+	 */
+	switch (si->si_code) {
+	case SI_TIMER:
+		ret |= __xn_put_user(si->si_tid, &u_p->si_tid);
+		ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr);
+		ret |= __xn_put_user(overrun, &u_p->si_overrun);
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr);
+		fallthrough;
+	case SI_USER:
+		ret |= __xn_put_user(si->si_pid, &u_p->si_pid);
+		ret |= __xn_put_user(si->si_uid, &u_p->si_uid);
+	}
+
+	return ret;
+}
+
+static int signal_wait(sigset_t *set, xnticks_t timeout,
+		       void __user *u_si, bool compat)
+{
+	struct cobalt_sigpending *sigp = NULL;
+	struct cobalt_sigwait_context swc;
+	struct cobalt_thread *curr;
+	int ret, sig, n, overrun;
+	unsigned long *p, *t, m;
+	struct siginfo si, *sip;
+	struct list_head *sigq;
+	spl_t s;
+
+	curr = cobalt_current_thread();
+	XENO_BUG_ON(COBALT, curr == NULL);
+
+	if (u_si && !access_wok(u_si, sizeof(*u_si)))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+check:
+	if (sigisemptyset(&curr->sigpending))
+		/* Most common/fast path. */
+		goto wait;
+
+	p = curr->sigpending.sig; /* pending */
+	t = set->sig;		  /* tested */
+
+	for (n = 0, sig = 0; n < _NSIG_WORDS; ++n) {
+		m = *p++ & *t++;
+		if (m == 0)
+			continue;
+		sig = ffz(~m) +  n *_NSIG_BPW + 1;
+		break;
+	}
+
+	if (sig) {
+		sigq = curr->sigqueues + sig - 1;
+		if (list_empty(sigq)) {
+			sigdelset(&curr->sigpending, sig);
+			goto check;
+		}
+		sigp = list_get_entry(sigq, struct cobalt_sigpending, next);
+		INIT_LIST_HEAD(&sigp->next); /* Mark sigp as unlinked. */
+		if (list_empty(sigq))
+			sigdelset(&curr->sigpending, sig);
+		sip = &sigp->si;
+		ret = 0;
+		goto done;
+	}
+
+wait:
+	if (timeout == XN_NONBLOCK) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+	swc.set = set;
+	swc.si = &si;
+	xnthread_prepare_wait(&swc.wc);
+	list_add_tail(&curr->signext, &curr->process->sigwaiters);
+	ret = xnsynch_sleep_on(&curr->sigwait, timeout, XN_RELATIVE);
+	if (ret) {
+		list_del(&curr->signext);
+		ret = ret & XNBREAK ? -EINTR : -EAGAIN;
+		goto fail;
+	}
+	sig = si.si_signo;
+	sip = &si;
+done:
+	 /*
+	  * si_overrun raises a nasty issue since we have to
+	  * collect+clear it atomically before we drop the lock,
+	  * although we don't know in advance if any extension would
+	  * use it along with the additional si_codes it may provide,
+	  * but we must drop the lock before running the
+	  * signal_copyinfo handler.
+	  *
+	  * Observing that si_overrun is likely the only "unstable"
+	  * data from the signal information which might change under
+	  * our feet while we copy the bits to userland, we collect it
+	  * here from the atomic section for all unknown si_codes,
+	  * then pass its value to the signal_copyinfo handler.
+	  */
+	switch (sip->si_code) {
+	case SI_TIMER:
+		overrun = cobalt_timer_deliver(curr, sip->si_tid);
+		break;
+	case SI_USER:
+	case SI_MESGQ:
+	case SI_QUEUE:
+		overrun = 0;
+		break;
+	default:
+		overrun = sip->si_overrun;
+		if (overrun)
+			sip->si_overrun = 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (u_si == NULL)
+		goto out;	/* Return signo only. */
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (compat) {
+		ret = sys32_put_siginfo(u_si, sip, overrun);
+		if (!ret)
+			/* Allow an extended target to receive more data. */
+			cobalt_call_extension(signal_copyinfo_compat,
+					      &curr->extref, ret, u_si, sip,
+					      overrun);
+	} else
+#endif
+	{
+		ret = signal_put_siginfo(u_si, sip, overrun);
+		if (!ret)
+			/* Allow an extended target to receive more data. */
+			cobalt_call_extension(signal_copyinfo, &curr->extref,
+					      ret, u_si, sip, overrun);
+	}
+
+out:
+	/*
+	 * If we pulled the signal information from a sigpending
+	 * block, release it to the free pool if applicable.
+	 */
+	if (sigp &&
+	    (void *)sigp >= sigpending_mem &&
+	    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) {
+		xnlock_get_irqsave(&nklock, s);
+		list_add_tail(&sigp->next, &sigpending_pool);
+		xnlock_put_irqrestore(&nklock, s);
+		/* no more ref. to sigp beyond this point. */
+	}
+
+	return ret ? -EFAULT : sig;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sigwait(sigset_t *set)
+{
+	return signal_wait(set, XN_INFINITE, NULL, false);
+}
+
+COBALT_SYSCALL(sigwait, primary,
+	       (const sigset_t __user *u_set, int __user *u_sig))
+{
+	sigset_t set;
+	int sig;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	sig = signal_wait(&set, XN_INFINITE, NULL, false);
+	if (sig < 0)
+		return sig;
+
+	return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig));
+}
+
+int __cobalt_sigtimedwait(sigset_t *set,
+			  const struct timespec64 *timeout,
+			  void __user *u_si,
+			  bool compat)
+{
+	xnticks_t ticks;
+
+	if (!timespec64_valid(timeout))
+		return -EINVAL;
+	ticks = ts2ns(timeout);
+	if (ticks++ == 0)
+		ticks = XN_NONBLOCK;
+
+	return signal_wait(set, ticks, u_si, compat);
+}
+
+COBALT_SYSCALL(sigtimedwait, nonrestartable,
+	       (const sigset_t __user *u_set,
+		struct siginfo __user *u_si,
+		const struct __user_old_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&timeout, u_timeout, sizeof(timeout)))
+		return -EFAULT;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, false);
+}
+
+COBALT_SYSCALL(sigtimedwait64, nonrestartable,
+	       (const sigset_t __user *u_set,
+		struct siginfo __user *u_si,
+		const struct __kernel_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	if (cobalt_get_timespec64(&timeout, u_timeout))
+		return -EFAULT;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, false);
+}
+
+int __cobalt_sigwaitinfo(sigset_t *set,
+			 void __user *u_si,
+			 bool compat)
+{
+	return signal_wait(set, XN_INFINITE, u_si, compat);
+}
+
+COBALT_SYSCALL(sigwaitinfo, nonrestartable,
+	       (const sigset_t __user *u_set, struct siginfo __user *u_si))
+{
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	return __cobalt_sigwaitinfo(&set, u_si, false);
+}
+
+COBALT_SYSCALL(sigpending, primary, (old_sigset_t __user *u_set))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+
+	return cobalt_copy_to_user(u_set, &curr->sigpending, sizeof(*u_set));
+}
+
+int __cobalt_kill(struct cobalt_thread *thread, int sig, int group) /* nklocked, IRQs off */
+{
+	struct cobalt_sigpending *sigp;
+	int ret = 0;
+
+	/*
+	 * We have undocumented pseudo-signals to suspend/resume/unblock
+	 * threads, force them out of primary mode or even demote them
+	 * to the weak scheduling class/priority. Process them early,
+	 * before anyone can notice...
+	 */
+	switch(sig) {
+	case 0:
+		/* Check for existence only. */
+		break;
+	case SIGSUSP:
+		/*
+		 * All callers shall be tagged as conforming calls, so
+		 * self-directed suspension can only happen from
+		 * primary mode. Yummie.
+		 */
+		xnthread_suspend(&thread->threadbase, XNSUSP,
+				 XN_INFINITE, XN_RELATIVE, NULL);
+		if (&thread->threadbase == xnthread_current() &&
+		    xnthread_test_info(&thread->threadbase, XNBREAK))
+			ret = -EINTR;
+		break;
+	case SIGRESM:
+		xnthread_resume(&thread->threadbase, XNSUSP);
+		goto resched;
+	case SIGRELS:
+		xnthread_unblock(&thread->threadbase);
+		goto resched;
+	case SIGKICK:
+		xnthread_kick(&thread->threadbase);
+		goto resched;
+	case SIGDEMT:
+		xnthread_demote(&thread->threadbase);
+		goto resched;
+	case 1 ... _NSIG:
+		sigp = cobalt_signal_alloc();
+		if (sigp) {
+			sigp->si.si_signo = sig;
+			sigp->si.si_errno = 0;
+			sigp->si.si_code = SI_USER;
+			sigp->si.si_pid = task_pid_nr(current);
+			sigp->si.si_uid = get_current_uuid();
+			if (cobalt_signal_send(thread, sigp, group) <= 0)
+				cobalt_signal_free(sigp);
+		}
+	resched:
+		xnsched_run();
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(kill, conforming, (pid_t pid, int sig))
+{
+	struct cobalt_thread *thread;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL)
+		ret = -ESRCH;
+	else
+		ret = __cobalt_kill(thread, sig, 1);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value)
+{
+	struct cobalt_sigpending *sigp;
+	struct cobalt_thread *thread;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	switch(sig) {
+	case 0:
+		/* Check for existence only. */
+		break;
+	case 1 ... _NSIG:
+		sigp = cobalt_signal_alloc();
+		if (sigp) {
+			sigp->si.si_signo = sig;
+			sigp->si.si_errno = 0;
+			sigp->si.si_code = SI_QUEUE;
+			sigp->si.si_pid = task_pid_nr(current);
+			sigp->si.si_uid = get_current_uuid();
+			sigp->si.si_value = *value;
+			if (cobalt_signal_send(thread, sigp, 1) <= 0)
+				cobalt_signal_free(sigp);
+			else
+				xnsched_run();
+		}
+		break;
+	default:
+		/* Cobalt pseudo-signals are never process-directed. */
+		ret = __cobalt_kill(thread, sig, 0);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__cobalt_sigqueue);
+
+COBALT_SYSCALL(sigqueue, conforming,
+	       (pid_t pid, int sig, const union sigval __user *u_value))
+{
+	union sigval val;
+	int ret;
+
+	ret = cobalt_copy_from_user(&val, u_value, sizeof(val));
+
+	return ret ?: __cobalt_sigqueue(pid, sig, &val);
+}
+
+__init int cobalt_signal_init(void)
+{
+	struct cobalt_sigpending *sigp;
+
+	sigpending_mem = xnheap_vmalloc(__SIGPOOL_SIZE);
+	if (sigpending_mem == NULL)
+		return -ENOMEM;
+
+	for (sigp = sigpending_mem;
+	     (void *)sigp < sigpending_mem + __SIGPOOL_SIZE; sigp++)
+		list_add_tail(&sigp->next, &sigpending_pool);
+
+	return 0;
+}
+
+__init void cobalt_signal_cleanup(void)
+{
+	xnheap_vfree(sigpending_mem);
+}
+++ linux-patched/kernel/xenomai/posix/io.c	2022-03-21 12:58:29.050892267 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/extension.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <cobalt/kernel/compat.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/time.h>
+#include <xenomai/rtdm/internal.h>
+#include "process.h"
+#include "internal.h"
+#include "clock.h"
+#include "io.h"
+
+COBALT_SYSCALL(open, lostage,
+	       (const char __user *u_path, int oflag))
+{
+	struct filename *filename;
+	int ufd;
+
+	filename = getname(u_path);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	ufd = __rtdm_dev_open(filename->name, oflag);
+	putname(filename);
+
+	return ufd;
+}
+
+COBALT_SYSCALL(socket, lostage,
+	       (int protocol_family, int socket_type, int protocol))
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+COBALT_SYSCALL(close, lostage, (int fd))
+{
+	return rtdm_fd_close(fd, 0);
+}
+
+COBALT_SYSCALL(fcntl, current, (int fd, int cmd, long arg))
+{
+	return rtdm_fd_fcntl(fd, cmd, arg);
+}
+
+COBALT_SYSCALL(ioctl, handover,
+	       (int fd, unsigned int request, void __user *arg))
+{
+	return rtdm_fd_ioctl(fd, request, arg);
+}
+
+COBALT_SYSCALL(read, handover,
+	       (int fd, void __user *buf, size_t size))
+{
+	return rtdm_fd_read(fd, buf, size);
+}
+
+COBALT_SYSCALL(write, handover,
+	       (int fd, const void __user *buf, size_t size))
+{
+	return rtdm_fd_write(fd, buf, size);
+}
+
+COBALT_SYSCALL(recvmsg, handover,
+	       (int fd, struct user_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	ssize_t ret;
+
+	ret = cobalt_copy_from_user(&m, umsg, sizeof(m));
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_recvmsg(fd, &m, flags);
+	if (ret < 0)
+		return ret;
+
+	return cobalt_copy_to_user(umsg, &m, sizeof(*umsg)) ?: ret;
+}
+
+static int get_timespec(struct timespec64 *ts,
+			const void __user *u_ts)
+{
+	return cobalt_get_u_timespec(ts, u_ts);
+}
+
+static int get_mmsg(struct mmsghdr *mmsg, void __user *u_mmsg)
+{
+	return cobalt_copy_from_user(mmsg, u_mmsg, sizeof(*mmsg));
+}
+
+static int put_mmsg(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return cobalt_copy_to_user(q, mmsg, sizeof(*q));
+}
+
+COBALT_SYSCALL(recvmmsg, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct __user_old_timespec __user *u_timeout))
+{
+	return __rtdm_fd_recvmmsg(fd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg, put_mmsg, get_timespec);
+}
+
+COBALT_SYSCALL(recvmmsg64, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct __kernel_timespec __user *u_timeout))
+{
+	return __rtdm_fd_recvmmsg64(fd, u_msgvec, vlen, flags, u_timeout,
+				    get_mmsg, put_mmsg);
+}
+
+COBALT_SYSCALL(sendmsg, handover,
+	       (int fd, struct user_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	int ret;
+
+	ret = cobalt_copy_from_user(&m, umsg, sizeof(m));
+
+	return ret ?: rtdm_fd_sendmsg(fd, &m, flags);
+}
+
+static int put_mmsglen(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return __xn_put_user(mmsg->msg_len, &q->msg_len);
+}
+
+COBALT_SYSCALL(sendmmsg, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec,
+		unsigned int vlen, unsigned int flags))
+{
+	return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags,
+				  get_mmsg, put_mmsglen);
+}
+
+COBALT_SYSCALL(mmap, lostage,
+	       (int fd, struct _rtdm_mmap_request __user *u_rma,
+	        void __user **u_addrp))
+{
+	struct _rtdm_mmap_request rma;
+	void *u_addr = NULL;
+	int ret;
+
+	ret = cobalt_copy_from_user(&rma, u_rma, sizeof(rma));
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_mmap(fd, &rma, &u_addr);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_addrp, &u_addr, sizeof(u_addr));
+}
+
+static int __cobalt_first_fd_valid_p(fd_set *fds[XNSELECT_MAX_TYPES], int nfds)
+{
+	int i, fd;
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (fds[i]
+		    && (fd = find_first_bit(fds[i]->fds_bits, nfds)) < nfds)
+			return rtdm_fd_valid_p(fd);
+
+	/* All empty is correct, used as a "sleep" mechanism by strange
+	   applications. */
+	return 1;
+}
+
+static int __cobalt_select_bind_all(struct xnselector *selector,
+				    fd_set *fds[XNSELECT_MAX_TYPES], int nfds)
+{
+	bool first_fd = true;
+	unsigned fd, type;
+	int err;
+
+	for (type = 0; type < XNSELECT_MAX_TYPES; type++) {
+		fd_set *set = fds[type];
+		if (set)
+			for (fd = find_first_bit(set->fds_bits, nfds);
+			     fd < nfds;
+			     fd = find_next_bit(set->fds_bits, nfds, fd + 1)) {
+				err = rtdm_fd_select(fd, selector, type);
+				if (err) {
+					/*
+					 * Do not needlessly signal "retry
+					 * under Linux" for mixed fd sets.
+					 */
+					if (err == -EADV && !first_fd)
+						return -EBADF;
+					return err;
+				}
+				first_fd = false;
+			}
+	}
+
+	return 0;
+}
+
+int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds,
+		    void __user *u_xfds, void __user *u_tv, bool compat)
+{
+	void __user *ufd_sets[XNSELECT_MAX_TYPES] = {
+		[XNSELECT_READ] = u_rfds,
+		[XNSELECT_WRITE] = u_wfds,
+		[XNSELECT_EXCEPT] = u_xfds
+	};
+	fd_set *in_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL};
+	fd_set *out_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL};
+	fd_set in_fds_storage[XNSELECT_MAX_TYPES],
+		out_fds_storage[XNSELECT_MAX_TYPES];
+	xnticks_t timeout = XN_INFINITE;
+	struct restart_block *restart;
+	xntmode_t mode = XN_RELATIVE;
+	struct xnselector *selector;
+	struct xnthread *curr;
+	struct __kernel_old_timeval tv;
+	size_t fds_size;
+	int i, err;
+
+	curr = xnthread_current();
+
+	if (u_tv) {
+		if (xnthread_test_localinfo(curr, XNSYSRST)) {
+			xnthread_clear_localinfo(curr, XNSYSRST);
+
+			restart = cobalt_get_restart_block(current);
+			timeout = restart->nanosleep.expires;
+
+			if (restart->fn != cobalt_restart_syscall_placeholder) {
+				err = -EINTR;
+				goto out;
+			}
+		} else {
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_get_timeval(&tv, u_tv))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (!access_wok(u_tv, sizeof(tv))
+				    || cobalt_copy_from_user(&tv, u_tv,
+							     sizeof(tv)))
+					return -EFAULT;
+			}
+
+			if (tv.tv_usec >= 1000000)
+				return -EINVAL;
+
+			timeout = clock_get_ticks(CLOCK_MONOTONIC) + tv2ns(&tv);
+		}
+
+		mode = XN_ABSOLUTE;
+	}
+
+	fds_size = __FDELT__(nfds + __NFDBITS__ - 1) * sizeof(long);
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (ufd_sets[i]) {
+			in_fds[i] = &in_fds_storage[i];
+			out_fds[i] = &out_fds_storage[i];
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_get_fdset(in_fds[i], ufd_sets[i],
+						    fds_size))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (!access_wok((void __user *) ufd_sets[i],
+						sizeof(fd_set))
+				    || cobalt_copy_from_user(in_fds[i],
+							     (void __user *)ufd_sets[i],
+							     fds_size))
+					return -EFAULT;
+			}
+		}
+
+	selector = curr->selector;
+	if (!selector) {
+		/* This function may be called from pure Linux fd_sets, we want
+		   to avoid the xnselector allocation in this case, so, we do a
+		   simple test: test if the first file descriptor we find in the
+		   fd_set is an RTDM descriptor or a message queue descriptor. */
+		if (!__cobalt_first_fd_valid_p(in_fds, nfds))
+			return -EADV;
+
+		selector = xnmalloc(sizeof(*curr->selector));
+		if (selector == NULL)
+			return -ENOMEM;
+		xnselector_init(selector);
+		curr->selector = selector;
+
+		/* Bind directly the file descriptors, we do not need to go
+		   through xnselect returning -ECHRNG */
+		err = __cobalt_select_bind_all(selector, in_fds, nfds);
+		if (err)
+			return err;
+	}
+
+	do {
+		err = xnselect(selector, out_fds, in_fds, nfds, timeout, mode);
+		if (err == -ECHRNG) {
+			int bind_err = __cobalt_select_bind_all(selector,
+								out_fds, nfds);
+			if (bind_err)
+				return bind_err;
+		}
+	} while (err == -ECHRNG);
+
+	if (err == -EINTR && signal_pending(current)) {
+		xnthread_set_localinfo(curr, XNSYSRST);
+
+		restart = cobalt_get_restart_block(current);
+		restart->fn = cobalt_restart_syscall_placeholder;
+		restart->nanosleep.expires = timeout;
+
+		return -ERESTARTSYS;
+	}
+
+out:
+	if (u_tv && (err > 0 || err == -EINTR)) {
+		xnsticks_t diff = timeout - clock_get_ticks(CLOCK_MONOTONIC);
+		if (diff > 0)
+			ticks2tv(&tv, diff);
+		else
+			tv.tv_sec = tv.tv_usec = 0;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (compat) {
+			if (sys32_put_timeval(u_tv, &tv))
+				return -EFAULT;
+		} else
+#endif
+		{
+			if (cobalt_copy_to_user(u_tv, &tv, sizeof(tv)))
+				return -EFAULT;
+		}
+	}
+
+	if (err >= 0)
+		for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
+			if (!ufd_sets[i])
+				continue;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_put_fdset(ufd_sets[i], out_fds[i],
+						    sizeof(fd_set)))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (cobalt_copy_to_user((void __user *)ufd_sets[i],
+							out_fds[i], sizeof(fd_set)))
+					return -EFAULT;
+			}
+		}
+	return err;
+}
+
+/* int select(int, fd_set *, fd_set *, fd_set *, struct __kernel_old_timeval *) */
+COBALT_SYSCALL(select, primary,
+	       (int nfds,
+		fd_set __user *u_rfds,
+		fd_set __user *u_wfds,
+		fd_set __user *u_xfds,
+		struct __kernel_old_timeval __user *u_tv))
+{
+	return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, false);
+}
+++ linux-patched/kernel/xenomai/posix/extension.h	2022-03-21 12:58:29.047892296 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sem.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_EXTENSION_H
+#define _COBALT_POSIX_EXTENSION_H
+
+#include <linux/time.h>
+#include <linux/list.h>
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+#include <cobalt/kernel/thread.h>
+
+struct cobalt_timer;
+struct cobalt_sigpending;
+struct cobalt_extref;
+struct siginfo;
+struct xnsched_class;
+union xnsched_policy_param;
+
+struct cobalt_extension {
+	struct xnthread_personality core;
+	struct {
+		struct cobalt_thread *
+		(*timer_init)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+			      const struct sigevent *__restrict__ evp);
+		int (*timer_settime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+				     const struct itimerspec64 *__restrict__ value,
+				     int flags);
+		int (*timer_gettime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+				     struct itimerspec64 *__restrict__ value);
+		int (*timer_delete)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */
+		int (*timer_cleanup)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */
+		int (*signal_deliver)(struct cobalt_extref *refthread,
+				      struct siginfo *si,
+				      struct cobalt_sigpending *sigp);
+		int (*signal_queue)(struct cobalt_extref *refthread,
+				    struct cobalt_sigpending *sigp);
+		int (*signal_copyinfo)(struct cobalt_extref *refthread,
+				       void __user *u_si,
+				       const struct siginfo *si,
+				       int overrun);
+		int (*signal_copyinfo_compat)(struct cobalt_extref *refthread,
+					      void __user *u_si,
+					      const struct siginfo *si,
+					      int overrun);
+		int (*sched_yield)(struct cobalt_extref *curref);
+		int (*thread_setsched)(struct cobalt_extref *refthread, /* nklocked, IRQs off. */
+				       struct xnsched_class *sched_class,
+				       union xnsched_policy_param *param);
+	} ops;
+};
+
+struct cobalt_extref {
+	struct cobalt_extension *extension;
+	struct list_head next;
+	void *private;
+};
+
+static inline void cobalt_set_extref(struct cobalt_extref *ref,
+				     struct cobalt_extension *ext,
+				     void *priv)
+{
+	ref->extension = ext;
+	ref->private = priv;
+}
+
+/**
+ * All macros return non-zero if some thread-level extension code was
+ * called, leaving the output value into __ret. Otherwise, the __ret
+ * value is undefined.
+ */
+#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \
+	({									\
+		int __val = 0;							\
+		if ((__owner) && (__owner)->extref.extension) {			\
+			(__extref)->extension = (__owner)->extref.extension;	\
+			if ((__extref)->extension->ops.__extfn) {		\
+				(__ret) = (__extref)->extension->ops.		\
+					__extfn(__extref, ##__args );		\
+				__val = 1;					\
+			}							\
+		} else								\
+			(__extref)->extension = NULL;				\
+		__val;								\
+	})
+		
+#define cobalt_call_extension(__extfn, __extref, __ret, __args...)	\
+	({								\
+		int __val = 0;						\
+		if ((__extref)->extension &&				\
+		    (__extref)->extension->ops.__extfn) {		\
+			(__ret) = (__extref)->extension->ops.		\
+				__extfn(__extref, ##__args );		\
+			__val = 1;					\
+		}							\
+		__val;							\
+	})
+
+#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+struct cobalt_extension;
+
+struct cobalt_extref {
+};
+
+static inline void cobalt_set_extref(struct cobalt_extref *ref,
+				     struct cobalt_extension *ext,
+				     void *priv)
+{
+}
+
+#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...)	\
+	({ (void)(__owner); (void)(__ret); 0; })
+
+#define cobalt_call_extension(__extfn, __extref, __ret, __args...)	\
+	({ (void)(__ret); 0; })
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+#endif /* !_COBALT_POSIX_EXTENSION_H */
+++ linux-patched/kernel/xenomai/posix/sem.c	2022-03-21 12:58:29.043892335 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2014,2015 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stddef.h>
+#include <linux/err.h>
+#include <cobalt/kernel/time.h>
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "sem.h"
+#include <trace/events/cobalt-posix.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static int sem_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	return 0;
+}
+
+static struct xnvfile_regular_ops sem_vfile_ops = {
+	.show = sem_vfile_show,
+};
+
+static struct xnpnode_regular __sem_pnode = {
+	.node = {
+		.dirname = "sem",
+		.root = &posix_ptree,
+		.ops = &xnregistry_vfreg_ops,
+	},
+	.vfile = {
+		.ops = &sem_vfile_ops,
+	},
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __sem_pnode = {
+	.node = {
+		.dirname = "sem",
+	}
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static inline struct cobalt_resources *sem_kqueue(struct cobalt_sem *sem)
+{
+	int pshared = !!(sem->flags & SEM_PSHARED);
+	return cobalt_current_resources(pshared);
+}
+
+static inline int sem_check(struct cobalt_sem *sem)
+{
+	if (sem == NULL || sem->magic != COBALT_SEM_MAGIC)
+		return -EINVAL;
+
+	if (sem->resnode.scope && sem->resnode.scope != sem_kqueue(sem))
+		return -EPERM;
+
+	return 0;
+}
+
+int __cobalt_sem_destroy(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	sem = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(sem, COBALT_SEM_MAGIC, typeof(*sem))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (--sem->refs) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	cobalt_mark_deleted(sem);
+	if (!sem->pathname)
+		cobalt_del_resource(&sem->resnode);
+	if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) {
+		xnsched_run();
+		ret = 1;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnregistry_remove(sem->resnode.handle);
+	if (sem->pathname)
+		putname(sem->pathname);
+
+	cobalt_umm_free(&cobalt_ppd_get(!!(sem->flags & SEM_PSHARED))->umm,
+			sem->state);
+
+	xnfree(sem);
+
+	return ret;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+struct cobalt_sem *
+__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sm,
+		  int flags, unsigned int value)
+{
+	struct cobalt_sem_state *state;
+	struct cobalt_sem *sem, *osem;
+	struct cobalt_ppd *sys_ppd;
+	int ret, sflags, pshared;
+	struct list_head *semq;
+	spl_t s;
+
+	if ((flags & SEM_PULSE) != 0 && value > 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	sem = xnmalloc(sizeof(*sem));
+	if (sem == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	pshared = !!(flags & SEM_PSHARED);
+	sys_ppd = cobalt_ppd_get(pshared);
+	state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+	if (state == NULL) {
+		ret = -EAGAIN;
+		goto err_free_sem;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	semq = &cobalt_current_resources(pshared)->semq;
+	if ((sm->magic == COBALT_SEM_MAGIC && !list_empty(semq)) ||
+	    sm->magic == COBALT_NAMED_SEM_MAGIC) {
+		osem = xnregistry_lookup(sm->handle, NULL);
+		if (cobalt_obj_active(osem, COBALT_SEM_MAGIC, typeof(*osem))) {
+			ret = -EBUSY;
+			goto err_lock_put;
+		}
+	}
+
+	if (value > (unsigned)SEM_VALUE_MAX) {
+		ret = -EINVAL;
+		goto err_lock_put;
+	}
+
+	ret = xnregistry_enter(name ?: "", sem, &sem->resnode.handle,
+			       name ? &__sem_pnode.node : NULL);
+	if (ret < 0)
+		goto err_lock_put;
+
+	sem->magic = COBALT_SEM_MAGIC;
+	if (!name)
+		cobalt_add_resource(&sem->resnode, sem, pshared);
+	else
+		sem->resnode.scope = NULL;
+	sflags = flags & SEM_FIFO ? 0 : XNSYNCH_PRIO;
+	xnsynch_init(&sem->synchbase, sflags, NULL);
+
+	sem->state = state;
+	atomic_set(&state->value, value);
+	state->flags = flags;
+	sem->flags = flags;
+	sem->refs = name ? 2 : 1;
+	sem->pathname = NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	__cobalt_sem_shadow_init(sem,
+			name ? COBALT_NAMED_SEM_MAGIC : COBALT_SEM_MAGIC, sm);
+
+	trace_cobalt_psem_init(name ?: "anon",
+			       sem->resnode.handle, flags, value);
+
+	return sem;
+
+err_lock_put:
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_umm_free(&sys_ppd->umm, state);
+err_free_sem:
+	xnfree(sem);
+out:
+	trace_cobalt_psem_init_failed(name ?: "anon", flags, value, ret);
+
+	return ERR_PTR(ret);
+}
+
+void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic,
+			      struct cobalt_sem_shadow *sm)
+{
+	__u32 flags = sem->state->flags;
+	struct cobalt_ppd *sys_ppd;
+
+	sys_ppd = cobalt_ppd_get(!!(flags & SEM_PSHARED));
+
+	sm->magic = magic;
+	sm->handle = sem->resnode.handle;
+	sm->state_offset = cobalt_umm_offset(&sys_ppd->umm, sem->state);
+	if (sem->state->flags & SEM_PSHARED)
+		sm->state_offset = -sm->state_offset;
+}
+
+static int sem_destroy(struct cobalt_sem_shadow *sm)
+{
+	struct cobalt_sem *sem;
+	int warn, ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (sm->magic != COBALT_SEM_MAGIC) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	sem = xnregistry_lookup(sm->handle, NULL);
+	ret = sem_check(sem);
+	if (ret)
+		goto fail;
+
+	if ((sem->flags & SEM_NOBUSYDEL) != 0 &&
+	    xnsynch_pended_p(&sem->synchbase)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	warn = sem->flags & SEM_WARNDEL;
+	cobalt_mark_deleted(sm);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = __cobalt_sem_destroy(sem->resnode.handle);
+
+	return warn ? ret : 0;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static inline int do_trywait(struct cobalt_sem *sem)
+{
+	int ret;
+	
+	ret = sem_check(sem);
+	if (ret)
+		return ret;
+
+	if (atomic_sub_return(1, &sem->state->value) < 0)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int sem_wait(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret, info;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = do_trywait(sem);
+	if (ret != -EAGAIN)
+		goto out;
+
+	ret = 0;
+	info = xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE);
+	if (info & XNRMID) {
+		ret = -EINVAL;
+	} else if (info & XNBREAK) {
+		atomic_inc(&sem->state->value); /* undo do_trywait() */
+		ret = -EINTR;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
+			   const struct timespec64 *ts)
+{
+	int ret, info;
+	bool validate_ts = true;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	xntmode_t tmode;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_timedwait(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		sem = xnregistry_lookup(handle, NULL);
+		ret = do_trywait(sem);
+		if (ret != -EAGAIN)
+			break;
+
+		/*
+		 * POSIX states that the validity of the timeout spec
+		 * _need_ not be checked if the semaphore can be
+		 * locked immediately, we show this behavior despite
+		 * it's actually more complex, to keep some
+		 * applications ported to Linux happy.
+		 */
+		if (validate_ts) {
+			atomic_inc(&sem->state->value);
+			if (!ts) {
+				ret = -EFAULT;
+				break;
+			}
+			if (!timespec64_valid(ts)) {
+				ret = -EINVAL;
+				break;
+			}
+			validate_ts = false;
+			continue;
+		}
+
+		ret = 0;
+		tmode = sem->flags & SEM_RAWCLOCK ? XN_ABSOLUTE : XN_REALTIME;
+		info = xnsynch_sleep_on(&sem->synchbase, ts2ns(ts) + 1, tmode);
+		if (info & XNRMID)
+			ret = -EINVAL;
+		else if (info & (XNBREAK|XNTIMEO)) {
+			ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+			atomic_inc(&sem->state->value);
+		}
+		break;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem,
+			     const struct __kernel_timespec __user *u_ts)
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = cobalt_get_timespec64(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+static int sem_post(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret)
+		goto out;
+
+	if (atomic_read(&sem->state->value) == SEM_VALUE_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (atomic_inc_return(&sem->state->value) <= 0) {
+		if (xnsynch_wakeup_one_sleeper(&sem->synchbase))
+			xnsched_run();
+	} else if (sem->flags & SEM_PULSE)
+		atomic_set(&sem->state->value, 0);
+out:	
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static int sem_getvalue(xnhandle_t handle, int *value)
+{
+	struct cobalt_sem *sem;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret) {
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	*value = atomic_read(&sem->state->value);
+	if ((sem->flags & SEM_REPORT) == 0 && *value < 0)
+		*value = 0;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+COBALT_SYSCALL(sem_init, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		int flags, unsigned int value))
+{
+	struct cobalt_sem_shadow sm;
+	struct cobalt_sem *sem;
+
+	if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm)))
+		return -EFAULT;
+
+	if (flags & ~(SEM_FIFO|SEM_PULSE|SEM_PSHARED|SEM_REPORT|\
+		      SEM_WARNDEL|SEM_RAWCLOCK|SEM_NOBUSYDEL))
+		return -EINVAL;
+
+	sem = __cobalt_sem_init(NULL, &sm, flags, value);
+	if (IS_ERR(sem))
+		return PTR_ERR(sem);
+
+	return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem));
+}
+
+COBALT_SYSCALL(sem_post, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_post(handle);
+
+	return sem_post(handle);
+}
+
+COBALT_SYSCALL(sem_wait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_wait(handle);
+
+	return sem_wait(handle);
+}
+
+COBALT_SYSCALL(sem_timedwait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		const struct __user_old_timespec __user *u_ts))
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = cobalt_get_u_timespec(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+COBALT_SYSCALL(sem_timedwait64, primary,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_sem_timedwait64(u_sem, u_ts);
+}
+
+COBALT_SYSCALL(sem_trywait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_trywait(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	sem = xnregistry_lookup(handle, NULL);
+	ret = do_trywait(sem);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sem_getvalue, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		int __user *u_sval))
+{
+	int ret, sval = -1;
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+
+	ret = sem_getvalue(handle, &sval);
+	trace_cobalt_psem_getvalue(handle, sval);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_sval, &sval, sizeof(sval));
+}
+
+COBALT_SYSCALL(sem_destroy, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem_shadow sm;
+	int err;
+
+	if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm)))
+		return -EFAULT;
+
+	trace_cobalt_psem_destroy(sm.handle);
+
+	err = sem_destroy(&sm);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)) ?: err;
+}
+
+COBALT_SYSCALL(sem_broadcast_np, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_broadcast(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret == 0 && atomic_read(&sem->state->value) < 0) {
+		atomic_set(&sem->state->value, 0);
+		xnsynch_flush(&sem->synchbase, 0);
+		xnsched_run();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sem_inquire, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		struct cobalt_sem_info __user *u_info,
+		pid_t __user *u_waitlist,
+		size_t waitsz))
+{
+	int val = 0, nrwait = 0, nrpids, ret = 0;
+	unsigned long pstamp, nstamp = 0;
+	struct cobalt_sem_info info;
+	pid_t *t = NULL, fbuf[16];
+	struct xnthread *thread;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_inquire(handle);
+
+	nrpids = waitsz / sizeof(pid_t);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		pstamp = nstamp;
+		sem = xnregistry_lookup(handle, &nstamp);
+		if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		/*
+		 * Allocate memory to return the wait list without
+		 * holding any lock, then revalidate the handle.
+		 */
+		if (t == NULL) {
+			val = atomic_read(&sem->state->value);
+			if (val >= 0 || u_waitlist == NULL)
+				break;
+			xnlock_put_irqrestore(&nklock, s);
+			if (nrpids > -val)
+				nrpids = -val;
+			if (-val <= ARRAY_SIZE(fbuf))
+				t = fbuf; /* Use fast buffer. */
+			else {
+				t = xnmalloc(-val * sizeof(pid_t));
+				if (t == NULL)
+					return -ENOMEM;
+			}
+			xnlock_get_irqsave(&nklock, s);
+		} else if (pstamp == nstamp)
+			break;
+		else if (val != atomic_read(&sem->state->value)) {
+			xnlock_put_irqrestore(&nklock, s);
+			if (t != fbuf)
+				xnfree(t);
+			t = NULL;
+			xnlock_get_irqsave(&nklock, s);
+		}
+	}
+
+	info.flags = sem->flags;
+	info.value = (sem->flags & SEM_REPORT) || val >= 0 ? val : 0;
+	info.nrwait = val < 0 ? -val : 0;
+
+	if (xnsynch_pended_p(&sem->synchbase) && u_waitlist != NULL) {
+		xnsynch_for_each_sleeper(thread, &sem->synchbase) {
+			if (nrwait >= nrpids)
+				break;
+			t[nrwait++] = xnthread_host_pid(thread);
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = cobalt_copy_to_user(u_info, &info, sizeof(info));
+	if (ret == 0 && nrwait > 0)
+		ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t));
+
+	if (t && t != fbuf)
+		xnfree(t);
+
+	return ret ?: nrwait;
+}
+
+void cobalt_sem_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	int named, ret;
+
+	sem = container_of(node, struct cobalt_sem, resnode);
+	named = (sem->flags & SEM_NAMED) != 0;
+	handle = node->handle;
+	xnlock_put_irqrestore(&nklock, s);
+	ret = __cobalt_sem_destroy(handle);
+	if (named && ret == -EBUSY)
+		xnregistry_unlink(xnregistry_key(handle));
+}
+++ linux-patched/kernel/xenomai/posix/timer.h	2022-03-21 12:58:29.040892365 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/corectl.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_TIMER_H
+#define _COBALT_POSIX_TIMER_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_timer {
+	struct xntimer timerbase;
+	timer_t id;
+	int overruns;
+	clockid_t clockid;
+	pid_t target;
+	struct cobalt_sigpending sigp;
+	struct cobalt_extref extref;
+};
+
+int cobalt_timer_deliver(struct cobalt_thread *waiter,
+			 timer_t timerid);
+
+void cobalt_timer_reclaim(struct cobalt_process *p);
+
+static inline timer_t cobalt_timer_id(const struct cobalt_timer *timer)
+{
+	return timer->id;
+}
+
+struct cobalt_timer *
+cobalt_timer_by_id(struct cobalt_process *p, timer_t timer_id);
+
+void cobalt_timer_handler(struct xntimer *xntimer);
+
+void __cobalt_timer_getval(struct xntimer *__restrict__ timer, 
+			   struct itimerspec64 *__restrict__ value);
+
+int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag, 
+			  const struct itimerspec64 *__restrict__ value);
+
+int __cobalt_timer_create(clockid_t clock,
+			  const struct sigevent *sev,
+			  timer_t __user *u_tm);
+
+int __cobalt_timer_settime(timer_t timerid, int flags,
+			   const struct itimerspec64 *__restrict__ value,
+			   struct itimerspec64 *__restrict__ ovalue);
+
+int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value);
+
+COBALT_SYSCALL_DECL(timer_create,
+		    (clockid_t clock,
+		     const struct sigevent __user *u_sev,
+		     timer_t __user *u_tm));
+
+COBALT_SYSCALL_DECL(timer_delete, (timer_t tm));
+
+COBALT_SYSCALL_DECL(timer_settime,
+		    (timer_t tm, int flags,
+		     const struct __user_old_itimerspec __user *u_newval,
+		     struct __user_old_itimerspec __user *u_oldval));
+
+COBALT_SYSCALL_DECL(timer_gettime,
+		    (timer_t tm, struct __user_old_itimerspec __user *u_val));
+
+COBALT_SYSCALL_DECL(timer_getoverrun, (timer_t tm));
+
+#endif /* !_COBALT_POSIX_TIMER_H */
+++ linux-patched/kernel/xenomai/posix/corectl.c	2022-03-21 12:58:29.036892404 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timerfd.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/atomic.h>
+#include <linux/printk.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/thread.h>
+#include <xenomai/version.h>
+#include <pipeline/tick.h>
+#include <asm/xenomai/syscall.h>
+#include "corectl.h"
+
+static BLOCKING_NOTIFIER_HEAD(config_notifier_list);
+
+static int do_conf_option(int option, void __user *u_buf, size_t u_bufsz)
+{
+	struct cobalt_config_vector vec;
+	int ret, val = 0;
+
+	if (option <= _CC_COBALT_GET_CORE_STATUS && u_bufsz < sizeof(val))
+		return -EINVAL;
+
+	switch (option) {
+	case _CC_COBALT_GET_VERSION:
+		val = XENO_VERSION_CODE;
+		break;
+	case _CC_COBALT_GET_NR_PIPES:
+#ifdef CONFIG_XENO_OPT_PIPE
+		val = CONFIG_XENO_OPT_PIPE_NRDEV;
+#endif
+		break;
+	case _CC_COBALT_GET_NR_TIMERS:
+		val = CONFIG_XENO_OPT_NRTIMERS;
+		break;
+	case _CC_COBALT_GET_POLICIES:
+		val = _CC_COBALT_SCHED_FIFO|_CC_COBALT_SCHED_RR;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK))
+			val |= _CC_COBALT_SCHED_WEAK;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_SPORADIC))
+			val |= _CC_COBALT_SCHED_SPORADIC;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_QUOTA))
+			val |= _CC_COBALT_SCHED_QUOTA;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_TP))
+			val |= _CC_COBALT_SCHED_TP;
+		break;
+	case _CC_COBALT_GET_DEBUG:
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_COBALT))
+			val |= _CC_COBALT_DEBUG_ASSERT;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_CONTEXT))
+			val |= _CC_COBALT_DEBUG_CONTEXT;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LOCKING))
+			val |= _CC_COBALT_DEBUG_LOCKING;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_USER))
+			val |= _CC_COBALT_DEBUG_USER;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED))
+			val |= _CC_COBALT_DEBUG_MUTEX_RELAXED;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP))
+			val |= _CC_COBALT_DEBUG_MUTEX_SLEEP;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY))
+			val |= _CC_COBALT_DEBUG_LEGACY;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_TRACE_RELAX))
+			val |= _CC_COBALT_DEBUG_TRACE_RELAX;
+		if (IS_ENABLED(CONFIG_XENO_DRIVERS_RTNET_CHECKED))
+			val |= _CC_COBALT_DEBUG_NET;
+		break;
+	case _CC_COBALT_GET_WATCHDOG:
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+		val = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT;
+#endif
+		break;
+	case _CC_COBALT_GET_CORE_STATUS:
+		val = realtime_core_state();
+		break;
+	default:
+		if (is_primary_domain())
+			/* Switch to secondary mode first. */
+			return -ENOSYS;
+		vec.u_buf = u_buf;
+		vec.u_bufsz = u_bufsz;
+		ret = blocking_notifier_call_chain(&config_notifier_list,
+						   option, &vec);
+		if (ret == NOTIFY_DONE)
+			return -EINVAL; /* Nobody cared. */
+		return notifier_to_errno(ret);
+	}
+
+	ret = cobalt_copy_to_user(u_buf, &val, sizeof(val));
+
+	return ret ? -EFAULT : 0;
+}
+
+static int stop_services(const void __user *u_buf, size_t u_bufsz)
+{
+	const u32 final_grace_period = 3; /* seconds */
+	enum cobalt_run_states state;
+	__u32 grace_period;
+	int ret;
+
+	/*
+	 * XXX: we don't have any syscall for unbinding a thread from
+	 * the Cobalt core, so we deny real-time threads from stopping
+	 * Cobalt services. i.e. _CC_COBALT_STOP_CORE must be issued
+	 * from a plain regular linux thread.
+	 */
+	if (xnthread_current())
+		return -EPERM;
+
+	if (u_bufsz != sizeof(__u32))
+		return -EINVAL;
+
+	ret = cobalt_copy_from_user(&grace_period,
+				    u_buf, sizeof(grace_period));
+	if (ret)
+		return ret;
+
+	state = atomic_cmpxchg(&cobalt_runstate,
+			       COBALT_STATE_RUNNING,
+			       COBALT_STATE_TEARDOWN);
+	switch (state) {
+	case COBALT_STATE_STOPPED:
+		break;
+	case COBALT_STATE_RUNNING:
+		/* Kill user threads. */
+		ret = xnthread_killall(grace_period, XNUSER);
+		if (ret) {
+			set_realtime_core_state(state);
+			return ret;
+		}
+		cobalt_call_state_chain(COBALT_STATE_TEARDOWN);
+		/* Kill lingering RTDM tasks. */
+		ret = xnthread_killall(final_grace_period, 0);
+		if (ret == -EAGAIN)
+			printk(XENO_WARNING "some RTDM tasks won't stop");
+		pipeline_uninstall_tick_proxy();
+		set_realtime_core_state(COBALT_STATE_STOPPED);
+		printk(XENO_INFO "services stopped\n");
+		break;
+	default:
+		ret = -EINPROGRESS;
+	}
+
+	return ret;
+}
+
+static int start_services(void)
+{
+	enum cobalt_run_states state;
+	int ret = 0;
+
+	state = atomic_cmpxchg(&cobalt_runstate,
+			       COBALT_STATE_STOPPED,
+			       COBALT_STATE_WARMUP);
+	switch (state) {
+	case COBALT_STATE_RUNNING:
+		break;
+	case COBALT_STATE_STOPPED:
+		pipeline_install_tick_proxy();
+		cobalt_call_state_chain(COBALT_STATE_WARMUP);
+		set_realtime_core_state(COBALT_STATE_RUNNING);
+		printk(XENO_INFO "services started\n");
+		break;
+	default:
+		ret = -EINPROGRESS;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(corectl, probing,
+	       (int request, void __user *u_buf, size_t u_bufsz))
+{
+	int ret;
+	
+	switch (request) {
+	case _CC_COBALT_STOP_CORE:
+		ret = stop_services(u_buf, u_bufsz);
+		break;
+	case _CC_COBALT_START_CORE:
+		ret = start_services();
+		break;
+	default:
+		ret = do_conf_option(request, u_buf, u_bufsz);
+	}
+	
+	return ret;
+}
+
+void cobalt_add_config_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&config_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_add_config_chain);
+
+void cobalt_remove_config_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&config_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_remove_config_chain);
+++ linux-patched/kernel/xenomai/posix/timerfd.c	2022-03-21 12:58:29.033892433 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/cond.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/timerfd.h>
+#include <linux/err.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "clock.h"
+#include "timer.h"
+#include "timerfd.h"
+
+struct cobalt_tfd {
+	int flags;
+	clockid_t clockid;
+	struct rtdm_fd fd;
+	struct xntimer timer;
+	DECLARE_XNSELECT(read_select);
+	struct itimerspec64 value;
+	struct xnsynch readers;
+	struct xnthread *target;
+};
+
+#define COBALT_TFD_TICKED	(1 << 2)
+
+#define COBALT_TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_WAKEUP)
+
+static ssize_t timerfd_read(struct rtdm_fd *fd, void __user *buf, size_t size)
+{
+	struct cobalt_tfd *tfd;
+	__u64 __user *u_ticks;
+	__u64 ticks = 0;
+	bool aligned;
+	spl_t s;
+	int err;
+
+	if (size < sizeof(ticks))
+		return -EINVAL;
+
+	u_ticks = buf;
+	if (!access_wok(u_ticks, sizeof(*u_ticks)))
+		return -EFAULT;
+
+	aligned = (((unsigned long)buf) & (sizeof(ticks) - 1)) == 0;
+
+	tfd = container_of(fd, struct cobalt_tfd, fd);
+
+	xnlock_get_irqsave(&nklock, s);
+	if (tfd->flags & COBALT_TFD_TICKED) {
+		err = 0;
+		goto out;
+	}
+	if (rtdm_fd_flags(fd) & O_NONBLOCK) {
+		err = -EAGAIN;
+		goto out;
+	}
+
+	do {
+		err = xnsynch_sleep_on(&tfd->readers, XN_INFINITE, XN_RELATIVE);
+	} while (err == 0 && (tfd->flags & COBALT_TFD_TICKED) == 0);
+
+	if (err & XNBREAK)
+		err = -EINTR;
+  out:
+	if (err == 0) {
+		xnticks_t now;
+
+		if (xntimer_periodic_p(&tfd->timer)) {
+			now = xnclock_read_raw(xntimer_clock(&tfd->timer));
+			ticks = 1 + xntimer_get_overruns(&tfd->timer,
+					 xnthread_current(), now);
+		} else
+			ticks = 1;
+
+		tfd->flags &= ~COBALT_TFD_TICKED;
+		xnselect_signal(&tfd->read_select, 0);
+	}
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err == 0) {
+		err = aligned ? __xn_put_user(ticks, u_ticks) :
+			__xn_copy_to_user(buf, &ticks, sizeof(ticks));
+		if (err)
+			err =-EFAULT;
+	}
+
+	return err ?: sizeof(ticks);
+}
+
+static int
+timerfd_select(struct rtdm_fd *fd, struct xnselector *selector,
+	       unsigned type, unsigned index)
+{
+	struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd);
+	struct xnselect_binding *binding;
+	spl_t s;
+	int err;
+
+	if (type != XNSELECT_READ)
+		return -EBADF;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (binding == NULL)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_set_affinity(&tfd->timer, xnthread_current()->sched);
+	err = xnselect_bind(&tfd->read_select, binding, selector, type,
+			index, tfd->flags & COBALT_TFD_TICKED);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static void timerfd_close(struct rtdm_fd *fd)
+{
+	struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_destroy(&tfd->timer);
+	xnsynch_destroy(&tfd->readers);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+	xnselect_destroy(&tfd->read_select); /* Reschedules. */
+	xnfree(tfd);
+}
+
+static struct rtdm_fd_ops timerfd_ops = {
+	.read_rt = timerfd_read,
+	.select = timerfd_select,
+	.close = timerfd_close,
+};
+
+static void timerfd_handler(struct xntimer *xntimer)
+{
+	struct cobalt_tfd *tfd;
+
+	tfd = container_of(xntimer, struct cobalt_tfd, timer);
+	tfd->flags |= COBALT_TFD_TICKED;
+	xnselect_signal(&tfd->read_select, 1);
+	xnsynch_wakeup_one_sleeper(&tfd->readers);
+	if (tfd->target)
+		xnthread_unblock(tfd->target);
+}
+
+COBALT_SYSCALL(timerfd_create, lostage, (int clockid, int flags))
+{
+	struct cobalt_tfd *tfd;
+	struct xnthread *curr;
+	struct xnclock *clock;
+	int ret, ufd;
+
+	if (flags & ~TFD_CREATE_FLAGS)
+		return -EINVAL;
+
+	clock = cobalt_clock_find(clockid);
+	if (IS_ERR(clock))
+		return PTR_ERR(clock);
+
+	tfd = xnmalloc(sizeof(*tfd));
+	if (tfd == NULL)
+		return -ENOMEM;
+
+	ufd = __rtdm_anon_getfd("[cobalt-timerfd]",
+				O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_getfd;
+	}
+
+	tfd->flags = flags & ~TFD_NONBLOCK;
+	tfd->fd.oflags = (flags & TFD_NONBLOCK) ? O_NONBLOCK : 0;
+	tfd->clockid = clockid;
+	curr = xnthread_current();
+	xntimer_init(&tfd->timer, clock, timerfd_handler,
+		     curr ? curr->sched : NULL, XNTIMER_UGRAVITY);
+	xnsynch_init(&tfd->readers, XNSYNCH_PRIO, NULL);
+	xnselect_init(&tfd->read_select);
+	tfd->target = NULL;
+
+	ret = rtdm_fd_enter(&tfd->fd, ufd, COBALT_TIMERFD_MAGIC, &timerfd_ops);
+	if (ret < 0)
+		goto fail;
+
+	ret = rtdm_fd_register(&tfd->fd, ufd);
+	if (ret < 0)
+		goto fail;
+
+	return ufd;
+fail:
+	xnselect_destroy(&tfd->read_select);
+	xnsynch_destroy(&tfd->readers);
+	xntimer_destroy(&tfd->timer);
+	__rtdm_anon_putfd(ufd);
+fail_getfd:
+	xnfree(tfd);
+
+	return ret;
+}
+
+static inline struct cobalt_tfd *tfd_get(int ufd)
+{
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, COBALT_TIMERFD_MAGIC);
+	if (IS_ERR(fd)) {
+		int err = PTR_ERR(fd);
+		if (err == -EBADF && cobalt_current_process() == NULL)
+			err = -EPERM;
+		return ERR_PTR(err);
+	}
+
+	return container_of(fd, struct cobalt_tfd, fd);
+}
+
+static inline void tfd_put(struct cobalt_tfd *tfd)
+{
+	rtdm_fd_put(&tfd->fd);
+}
+
+int __cobalt_timerfd_settime(int fd, int flags,
+			     const struct itimerspec64 *value,
+			     struct itimerspec64 *ovalue)
+{
+	struct cobalt_tfd *tfd;
+	int cflag, ret;
+	spl_t s;
+
+	if (flags & ~COBALT_TFD_SETTIME_FLAGS)
+		return -EINVAL;
+
+	tfd = tfd_get(fd);
+	if (IS_ERR(tfd))
+		return PTR_ERR(tfd);
+
+	cflag = (flags & TFD_TIMER_ABSTIME) ? TIMER_ABSTIME : 0;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	tfd->target = NULL;
+	if (flags & TFD_WAKEUP) {
+		tfd->target = xnthread_current();
+		if (tfd->target == NULL) {
+			ret = -EPERM;
+			goto out;
+		}
+	}
+
+	if (ovalue)
+		__cobalt_timer_getval(&tfd->timer, ovalue);
+
+	xntimer_set_affinity(&tfd->timer, xnthread_current()->sched);
+
+	ret = __cobalt_timer_setval(&tfd->timer,
+				    clock_flag(cflag, tfd->clockid), value);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	tfd_put(tfd);
+
+	return ret;
+}
+
+COBALT_SYSCALL(timerfd_settime, primary,
+	       (int fd, int flags,
+		const struct __user_old_itimerspec __user *new_value,
+		struct __user_old_itimerspec __user *old_value))
+{
+	struct itimerspec64 ovalue, value;
+	int ret;
+
+	ret = cobalt_get_u_itimerspec(&value, new_value);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue);
+	if (ret)
+		return ret;
+
+	if (old_value) {
+		ret = cobalt_copy_to_user(old_value, &ovalue, sizeof(ovalue));
+		value.it_value.tv_sec = 0;
+		value.it_value.tv_nsec = 0;
+		__cobalt_timerfd_settime(fd, flags, &value, NULL);
+	}
+
+	return ret;
+}
+
+int __cobalt_timerfd_gettime(int fd, struct itimerspec64 *value)
+{
+	struct cobalt_tfd *tfd;
+	spl_t s;
+
+	tfd = tfd_get(fd);
+	if (IS_ERR(tfd))
+		return PTR_ERR(tfd);
+
+	xnlock_get_irqsave(&nklock, s);
+	__cobalt_timer_getval(&tfd->timer, value);
+	xnlock_put_irqrestore(&nklock, s);
+
+	tfd_put(tfd);
+
+	return 0;
+}
+
+COBALT_SYSCALL(timerfd_gettime, current,
+	       (int fd, struct __user_old_itimerspec __user *curr_value))
+{
+	struct itimerspec64 value;
+	int ret;
+
+	ret = __cobalt_timerfd_gettime(fd, &value);
+
+	return ret ?: cobalt_put_u_itimerspec(curr_value, &value);
+}
+++ linux-patched/kernel/xenomai/posix/cond.c	2022-03-21 12:58:29.029892472 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "internal.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+
+static inline int
+pthread_cond_init(struct cobalt_cond_shadow *cnd, const struct cobalt_condattr *attr)
+{
+	int synch_flags = XNSYNCH_PRIO, ret;
+	struct cobalt_cond *cond, *old_cond;
+	struct cobalt_cond_state *state;
+	struct cobalt_ppd *sys_ppd;
+	struct list_head *condq;
+	spl_t s;
+
+	cond = xnmalloc(sizeof(*cond));
+	if (cond == NULL)
+		return -ENOMEM;
+
+	sys_ppd = cobalt_ppd_get(attr->pshared);
+	state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+	if (state == NULL) {
+		ret = -EAGAIN;
+		goto fail_umm;
+	}
+	cond->state = state;
+	state->pending_signals = 0;
+	state->mutex_state_offset = ~0U;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	condq = &cobalt_current_resources(attr->pshared)->condq;
+	if (cnd->magic == COBALT_COND_MAGIC && !list_empty(condq)) {
+		old_cond = xnregistry_lookup(cnd->handle, NULL);
+		if (cobalt_obj_active(old_cond, COBALT_COND_MAGIC,
+				      typeof(*old_cond))) {
+			ret = -EBUSY;
+			goto fail_register;
+		}
+	}
+
+	ret = xnregistry_enter_anon(cond, &cond->resnode.handle);
+	if (ret < 0)
+		goto fail_register;
+	if (attr->pshared)
+		cond->resnode.handle |= XNSYNCH_PSHARED;
+	cond->magic = COBALT_COND_MAGIC;
+	xnsynch_init(&cond->synchbase, synch_flags, NULL);
+	cond->attr = *attr;
+	cond->mutex = NULL;
+	cobalt_add_resource(&cond->resnode, cond, attr->pshared);
+
+	cnd->handle = cond->resnode.handle;
+	cnd->state_offset = cobalt_umm_offset(&sys_ppd->umm, state);
+	cnd->magic = COBALT_COND_MAGIC;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+fail_register:
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_umm_free(&sys_ppd->umm, state);
+fail_umm:
+	xnfree(cond);
+
+	return ret;
+}
+
+static inline int pthread_cond_destroy(struct cobalt_cond_shadow *cnd)
+{
+	struct cobalt_cond *cond;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	cond = xnregistry_lookup(cnd->handle, NULL);
+	if (cond == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	if (!cobalt_obj_active(cnd, COBALT_COND_MAGIC, struct cobalt_cond_shadow)
+	    || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	if (cond->resnode.scope !=
+	    cobalt_current_resources(cond->attr.pshared)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPERM;
+	}
+
+	if (xnsynch_pended_p(&cond->synchbase) || cond->mutex) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	cobalt_cond_reclaim(&cond->resnode, s); /* drops lock */
+
+	cobalt_mark_deleted(cnd);
+
+	return 0;
+}
+
+static inline int cobalt_cond_timedwait_prologue(struct xnthread *cur,
+						 struct cobalt_cond *cond,
+						 struct cobalt_mutex *mutex,
+						 xnticks_t abs_to)
+{
+	int err, ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* If another thread waiting for cond does not use the same mutex */
+	if (!cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)
+	    || (cond->mutex && cond->mutex != mutex)) {
+		err = -EINVAL;
+		goto unlock_and_return;
+	}
+
+	if (cond->resnode.scope !=
+	    cobalt_current_resources(cond->attr.pshared)) {
+		err = -EPERM;
+		goto unlock_and_return;
+	}
+
+	if (mutex->attr.pshared != cond->attr.pshared) {
+		err = -EINVAL;
+		goto unlock_and_return;
+	}
+
+	/* Unlock mutex. */
+	err = cobalt_mutex_release(cur, mutex);
+	if (err < 0)
+		goto unlock_and_return;
+
+	/* err == 1 means a reschedule is needed, but do not
+	   reschedule here, releasing the mutex and suspension must be
+	   done atomically in pthread_cond_*wait. */
+
+	/* Bind mutex to cond. */
+	if (cond->mutex == NULL) {
+		cond->mutex = mutex;
+		list_add_tail(&cond->mutex_link, &mutex->conds);
+	}
+
+	/* Wait for another thread to signal the condition. */
+	if (abs_to != XN_INFINITE)
+		ret = xnsynch_sleep_on(&cond->synchbase, abs_to,
+				       clock_flag(TIMER_ABSTIME, cond->attr.clock));
+	else
+		ret = xnsynch_sleep_on(&cond->synchbase, XN_INFINITE, XN_RELATIVE);
+
+	/* There are three possible wakeup conditions :
+	   - cond_signal / cond_broadcast, no status bit is set, and the function
+	     should return 0 ;
+	   - timeout, the status XNTIMEO is set, and the function should return
+	     ETIMEDOUT ;
+	   - pthread_kill, the status bit XNBREAK is set, but ignored, the
+	     function simply returns EINTR (used only by the user-space
+	     interface, replaced by 0 anywhere else), causing a wakeup, spurious
+	     or not whether pthread_cond_signal was called between pthread_kill
+	     and the moment when xnsynch_sleep_on returned ;
+	 */
+
+	err = 0;
+
+	if (ret & XNBREAK)
+		err = -EINTR;
+	else if (ret & XNTIMEO)
+		err = -ETIMEDOUT;
+
+unlock_and_return:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static inline int cobalt_cond_timedwait_epilogue(struct xnthread *cur,
+						 struct cobalt_cond *cond,
+						 struct cobalt_mutex *mutex)
+{
+	int err;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	err = __cobalt_mutex_acquire_unchecked(cur, mutex, NULL);
+	if (err == -EINTR)
+		goto unlock_and_return;
+
+	/*
+	 * Unbind mutex and cond, if no other thread is waiting, if
+	 * the job was not already done.
+	 */
+	if (!xnsynch_pended_p(&cond->synchbase) && cond->mutex == mutex) {
+		cond->mutex = NULL;
+		list_del(&cond->mutex_link);
+	}
+
+unlock_and_return:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+COBALT_SYSCALL(cond_init, current,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		const struct cobalt_condattr __user *u_attr))
+{
+	struct cobalt_cond_shadow cnd;
+	struct cobalt_condattr attr;
+	int err;
+
+	if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr)))
+		return -EFAULT;
+
+	trace_cobalt_cond_init(u_cnd, &attr);
+
+	err = pthread_cond_init(&cnd, &attr);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
+}
+
+COBALT_SYSCALL(cond_destroy, current,
+	       (struct cobalt_cond_shadow __user *u_cnd))
+{
+	struct cobalt_cond_shadow cnd;
+	int err;
+
+	if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
+		return -EFAULT;
+
+	trace_cobalt_cond_destroy(u_cnd);
+
+	err = pthread_cond_destroy(&cnd);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
+}
+
+struct us_cond_data {
+	int err;
+};
+
+static inline int cond_fetch_timeout(struct timespec64 *ts,
+				     const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT :	cobalt_get_u_timespec(ts, u_ts);
+}
+
+int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd,
+				struct cobalt_mutex_shadow __user *u_mx,
+				int *u_err,
+				void __user *u_ts,
+				int (*fetch_timeout)(struct timespec64 *ts,
+						     const void __user *u_ts))
+{
+	struct xnthread *cur = xnthread_current();
+	struct cobalt_cond *cond;
+	struct cobalt_mutex *mx;
+	struct us_cond_data d;
+	struct timespec64 ts;
+	xnhandle_t handle;
+	int err, perr = 0;
+	__u32 offset;
+
+	handle = cobalt_get_handle_from_user(&u_cnd->handle);
+	cond = xnregistry_lookup(handle, NULL);
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	mx = xnregistry_lookup(handle, NULL);
+
+	if (cond->mutex == NULL) {
+		__xn_get_user(offset, &u_mx->state_offset);
+		cond->state->mutex_state_offset = offset;
+	}
+
+	if (fetch_timeout) {
+		err = fetch_timeout(&ts, u_ts);
+		if (err == 0) {
+			trace_cobalt_cond_timedwait(u_cnd, u_mx, &ts);
+			err = cobalt_cond_timedwait_prologue(cur, cond, mx,
+							     ts2ns(&ts) + 1);
+		}
+	} else {
+		trace_cobalt_cond_wait(u_cnd, u_mx);
+		err = cobalt_cond_timedwait_prologue(cur, cond, mx, XN_INFINITE);
+	}
+
+	switch(err) {
+	case 0:
+	case -ETIMEDOUT:
+		perr = d.err = err;
+		err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
+		break;
+
+	case -EINTR:
+		perr = err;
+		d.err = 0;	/* epilogue should return 0. */
+		break;
+
+	default:
+		/* Please gcc and handle the case which will never
+		   happen */
+		d.err = EINVAL;
+	}
+
+	if (cond->mutex == NULL)
+		cond->state->mutex_state_offset = ~0U;
+
+	if (err == -EINTR)
+		__xn_put_user(d.err, u_err);
+
+	return err == 0 ? perr : err;
+}
+
+/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
+COBALT_SYSCALL(cond_wait_prologue, nonrestartable,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		struct cobalt_mutex_shadow __user *u_mx,
+		int *u_err,
+		unsigned int timed,
+		struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts,
+					   timed ? cond_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL(cond_wait_epilogue, primary,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct xnthread *cur = xnthread_current();
+	struct cobalt_cond *cond;
+	struct cobalt_mutex *mx;
+	xnhandle_t handle;
+	int err;
+
+	handle = cobalt_get_handle_from_user(&u_cnd->handle);
+	cond = xnregistry_lookup(handle, NULL);
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	mx = xnregistry_lookup(handle, NULL);
+	err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
+
+	if (cond->mutex == NULL)
+		cond->state->mutex_state_offset = ~0U;
+
+	return err;
+}
+
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
+{
+	struct cobalt_cond_state *state;
+	__u32 pending_signals;
+	int need_resched;
+
+	state = cond->state;
+	pending_signals = state->pending_signals;
+
+	switch(pending_signals) {
+	default:
+		state->pending_signals = 0;
+		need_resched = xnsynch_wakeup_many_sleepers(&cond->synchbase,
+							    pending_signals);
+		break;
+
+	case ~0U:
+		need_resched =
+			xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
+		state->pending_signals = 0;
+		break;
+
+	case 0:
+		need_resched = 0;
+		break;
+	}
+
+	return need_resched;
+}
+
+void cobalt_cond_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_cond *cond;
+
+	cond = container_of(node, struct cobalt_cond, resnode);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&cond->synchbase);
+	cobalt_mark_deleted(cond);
+	xnlock_put_irqrestore(&nklock, s);
+
+	cobalt_umm_free(&cobalt_ppd_get(cond->attr.pshared)->umm,
+			cond->state);
+	xnfree(cond);
+}
+++ linux-patched/kernel/xenomai/posix/syscall.h	2022-03-21 12:58:29.026892501 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/process.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SYSCALL_H
+#define _COBALT_POSIX_SYSCALL_H
+
+#include <cobalt/uapi/syscall.h>
+
+struct pt_regs;
+
+/* Regular (native) syscall handler implementation. */
+#define COBALT_SYSCALL(__name, __mode, __args)	\
+	long CoBaLt_ ## __name __args
+
+/* Regular (native) syscall handler declaration. */
+#define COBALT_SYSCALL_DECL(__name, __args)	\
+	long CoBaLt_ ## __name __args
+
+#include <asm/xenomai/syscall32.h>
+
+int handle_head_syscall(bool caller_is_relaxed,
+			struct pt_regs *regs);
+
+int handle_root_syscall(struct pt_regs *regs);
+
+#endif /* !_COBALT_POSIX_SYSCALL_H */
+++ linux-patched/kernel/xenomai/posix/process.c	2022-03-21 12:58:29.022892540 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/nsem.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <cobalt/uapi/syscall.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+#include <rtdm/driver.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include "../debug.h"
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "mutex.h"
+#include "cond.h"
+#include "mqueue.h"
+#include "sem.h"
+#include "signal.h"
+#include "timer.h"
+#include "monitor.h"
+#include "clock.h"
+#include "event.h"
+#include "timerfd.h"
+#include "io.h"
+
+static int gid_arg = -1;
+module_param_named(allowed_group, gid_arg, int, 0644);
+
+static DEFINE_MUTEX(personality_lock);
+
+static struct hlist_head *process_hash;
+DEFINE_PRIVATE_XNLOCK(process_hash_lock);
+#define PROCESS_HASH_SIZE 13
+
+struct xnthread_personality *cobalt_personalities[NR_PERSONALITIES];
+
+static struct xnsynch yield_sync;
+
+LIST_HEAD(cobalt_global_thread_list);
+
+DEFINE_XNPTREE(posix_ptree, "posix");
+
+struct cobalt_resources cobalt_global_resources = {
+	.condq = LIST_HEAD_INIT(cobalt_global_resources.condq),
+	.mutexq = LIST_HEAD_INIT(cobalt_global_resources.mutexq),
+	.semq = LIST_HEAD_INIT(cobalt_global_resources.semq),
+	.monitorq = LIST_HEAD_INIT(cobalt_global_resources.monitorq),
+	.eventq = LIST_HEAD_INIT(cobalt_global_resources.eventq),
+	.schedq = LIST_HEAD_INIT(cobalt_global_resources.schedq),
+};
+
+static unsigned __attribute__((pure)) process_hash_crunch(struct mm_struct *mm)
+{
+	unsigned long hash = ((unsigned long)mm - PAGE_OFFSET) / sizeof(*mm);
+	return hash % PROCESS_HASH_SIZE;
+}
+
+static struct cobalt_process *__process_hash_search(struct mm_struct *mm)
+{
+	unsigned int bucket = process_hash_crunch(mm);
+	struct cobalt_process *p;
+
+	hlist_for_each_entry(p, &process_hash[bucket], hlink)
+		if (p->mm == mm)
+			return p;
+	
+	return NULL;
+}
+
+static int process_hash_enter(struct cobalt_process *p)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned int bucket = process_hash_crunch(mm);
+	int err;
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	if (__process_hash_search(mm)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	p->mm = mm;
+	hlist_add_head(&p->hlink, &process_hash[bucket]);
+	err = 0;
+  out:
+	xnlock_put_irqrestore(&process_hash_lock, s);
+	return err;
+}
+
+static void process_hash_remove(struct cobalt_process *p)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	if (p->mm)
+		hlist_del(&p->hlink);
+	xnlock_put_irqrestore(&process_hash_lock, s);
+}
+
+struct cobalt_process *cobalt_search_process(struct mm_struct *mm)
+{
+	struct cobalt_process *process;
+	spl_t s;
+	
+	xnlock_get_irqsave(&process_hash_lock, s);
+	process = __process_hash_search(mm);
+	xnlock_put_irqrestore(&process_hash_lock, s);
+	
+	return process;
+}
+
+static void *lookup_context(int xid)
+{
+	struct cobalt_process *process = cobalt_current_process();
+	void *priv = NULL;
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	/*
+	 * First try matching the process context attached to the
+	 * (usually main) thread which issued sc_cobalt_bind. If not
+	 * found, try matching by mm context, which should point us
+	 * back to the latter. If none match, then the current process
+	 * is unbound.
+	 */
+	if (process == NULL && current->mm)
+		process = __process_hash_search(current->mm);
+	if (process)
+		priv = process->priv[xid];
+
+	xnlock_put_irqrestore(&process_hash_lock, s);
+
+	return priv;
+}
+
+void cobalt_remove_process(struct cobalt_process *process)
+{
+	struct xnthread_personality *personality;
+	void *priv;
+	int xid;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = NR_PERSONALITIES - 1; xid >= 0; xid--) {
+		if (!__test_and_clear_bit(xid, &process->permap))
+			continue;
+		personality = cobalt_personalities[xid];
+		priv = process->priv[xid];
+		if (priv == NULL)
+			continue;
+		/*
+		 * CAUTION: process potentially refers to stale memory
+		 * upon return from detach_process() for the Cobalt
+		 * personality, so don't dereference it afterwards.
+		 */
+		if (xid)
+			process->priv[xid] = NULL;
+		__clear_bit(personality->xid, &process->permap);
+		personality->ops.detach_process(priv);
+		atomic_dec(&personality->refcnt);
+		XENO_WARN_ON(COBALT, atomic_read(&personality->refcnt) < 0);
+		if (personality->module)
+			module_put(personality->module);
+	}
+
+	cobalt_set_process(NULL);
+
+	mutex_unlock(&personality_lock);
+}
+
+static void post_ppd_release(struct cobalt_umm *umm)
+{
+	struct cobalt_process *process;
+
+	process = container_of(umm, struct cobalt_process, sys_ppd.umm);
+	kfree(process);
+}
+
+static inline char *get_exe_path(struct task_struct *p)
+{
+	struct file *exe_file;
+	char *pathname, *buf;
+	struct mm_struct *mm;
+	struct path path;
+
+	/*
+	 * PATH_MAX is fairly large, and in any case won't fit on the
+	 * caller's stack happily; since we are mapping a shadow,
+	 * which is a heavyweight operation anyway, let's pick the
+	 * memory from the page allocator.
+	 */
+	buf = (char *)__get_free_page(GFP_KERNEL);
+	if (buf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mm = get_task_mm(p);
+	if (mm == NULL) {
+		pathname = "vmlinux";
+		goto copy;	/* kernel thread */
+	}
+
+	exe_file = get_mm_exe_file(mm);
+	mmput(mm);
+	if (exe_file == NULL) {
+		pathname = ERR_PTR(-ENOENT);
+		goto out;	/* no luck. */
+	}
+
+	path = exe_file->f_path;
+	path_get(&exe_file->f_path);
+	fput(exe_file);
+	pathname = d_path(&path, buf, PATH_MAX);
+	path_put(&path);
+	if (IS_ERR(pathname))
+		goto out;	/* mmmh... */
+copy:
+	/* caution: d_path() may start writing anywhere in the buffer. */
+	pathname = kstrdup(pathname, GFP_KERNEL);
+out:
+	free_page((unsigned long)buf);
+
+	return pathname;
+}
+
+static inline int raise_cap(int cap)
+{
+	struct cred *new;
+
+	new = prepare_creds();
+	if (new == NULL)
+		return -ENOMEM;
+
+	cap_raise(new->cap_effective, cap);
+
+	return commit_creds(new);
+}
+
+static int bind_personality(struct xnthread_personality *personality)
+{
+	struct cobalt_process *process;
+	void *priv;
+
+	/*
+	 * We also check capabilities for stacking a Cobalt extension,
+	 * in case the process dropped the supervisor privileges after
+	 * a successful initial binding to the Cobalt interface.
+	 */
+	if (!capable(CAP_SYS_NICE) &&
+	    (gid_arg == -1 || !in_group_p(KGIDT_INIT(gid_arg))))
+		return -EPERM;
+	/*
+	 * Protect from the same process binding to the same interface
+	 * several times.
+	 */
+	priv = lookup_context(personality->xid);
+	if (priv)
+		return 0;
+
+	priv = personality->ops.attach_process();
+	if (IS_ERR(priv))
+		return PTR_ERR(priv);
+
+	process = cobalt_current_process();
+	/*
+	 * We are still covered by the personality_lock, so we may
+	 * safely bump the module refcount after the attach handler
+	 * has returned.
+	 */
+	if (personality->module && !try_module_get(personality->module)) {
+		personality->ops.detach_process(priv);
+		return -EAGAIN;
+	}
+
+	__set_bit(personality->xid, &process->permap);
+	atomic_inc(&personality->refcnt);
+	process->priv[personality->xid] = priv;
+
+	raise_cap(CAP_SYS_NICE);
+	raise_cap(CAP_IPC_LOCK);
+	raise_cap(CAP_SYS_RAWIO);
+
+	return 0;
+}
+
+int cobalt_bind_personality(unsigned int magic)
+{
+	struct xnthread_personality *personality;
+	int xid, ret = -ESRCH;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = 1; xid < NR_PERSONALITIES; xid++) {
+		personality = cobalt_personalities[xid];
+		if (personality && personality->magic == magic) {
+			ret = bind_personality(personality);
+			break;
+		}
+	}
+
+	mutex_unlock(&personality_lock);
+
+	return ret ?: xid;
+}
+
+int cobalt_bind_core(int ufeatures)
+{
+	struct cobalt_process *process;
+	int ret;
+
+	mutex_lock(&personality_lock);
+	ret = bind_personality(&cobalt_personality);
+	mutex_unlock(&personality_lock);
+	if (ret)
+		return ret;
+
+	process = cobalt_current_process();
+	/* Feature set userland knows about. */
+	process->ufeatures = ufeatures;
+
+	return 0;
+}
+
+/**
+ * @fn int cobalt_register_personality(struct xnthread_personality *personality)
+ * @internal
+ * @brief Register a new interface personality.
+ *
+ * - personality->ops.attach_process() is called when a user-space
+ *   process binds to the personality, on behalf of one of its
+ *   threads. The attach_process() handler may return:
+ *
+ *   . an opaque pointer, representing the context of the calling
+ *   process for this personality;
+ *
+ *   . a NULL pointer, meaning that no per-process structure should be
+ *   attached to this process for this personality;
+ *
+ *   . ERR_PTR(negative value) indicating an error, the binding
+ *   process will then abort.
+ *
+ * - personality->ops.detach_process() is called on behalf of an
+ *   exiting user-space process which has previously attached to the
+ *   personality. This handler is passed a pointer to the per-process
+ *   data received earlier from the ops->attach_process() handler.
+ *
+ * @return the personality (extension) identifier.
+ *
+ * @note cobalt_get_context() is NULL when ops.detach_process() is
+ * invoked for the personality the caller detaches from.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_register_personality(struct xnthread_personality *personality)
+{
+	int xid;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = 0; xid < NR_PERSONALITIES; xid++) {
+		if (cobalt_personalities[xid] == NULL) {
+			personality->xid = xid;
+			atomic_set(&personality->refcnt, 0);
+			cobalt_personalities[xid] = personality;
+			goto out;
+		}
+	}
+
+	xid = -EAGAIN;
+out:
+	mutex_unlock(&personality_lock);
+
+	return xid;
+}
+EXPORT_SYMBOL_GPL(cobalt_register_personality);
+
+/*
+ * @brief Unregister an interface personality.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_unregister_personality(int xid)
+{
+	struct xnthread_personality *personality;
+	int ret = 0;
+
+	if (xid < 0 || xid >= NR_PERSONALITIES)
+		return -EINVAL;
+
+	mutex_lock(&personality_lock);
+
+	personality = cobalt_personalities[xid];
+	if (atomic_read(&personality->refcnt) > 0)
+		ret = -EBUSY;
+	else
+		cobalt_personalities[xid] = NULL;
+
+	mutex_unlock(&personality_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cobalt_unregister_personality);
+
+/**
+ * Stack a new personality over Cobalt for the current thread.
+ *
+ * This service registers the current thread as a member of the
+ * additional personality identified by @a xid. If the current thread
+ * is already assigned this personality, the call returns successfully
+ * with no effect.
+ *
+ * @param xid the identifier of the additional personality.
+ *
+ * @return A handle to the previous personality. The caller should
+ * save this handle for unstacking @a xid when applicable via a call
+ * to cobalt_pop_personality().
+ *
+ * @coretags{secondary-only}
+ */
+struct xnthread_personality *
+cobalt_push_personality(int xid)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct xnthread_personality *prev, *next;
+	struct xnthread *thread = p->thread;
+
+	secondary_mode_only();
+
+	mutex_lock(&personality_lock);
+
+	if (xid < 0 || xid >= NR_PERSONALITIES ||
+	    p->process == NULL || !test_bit(xid, &p->process->permap)) {
+		mutex_unlock(&personality_lock);
+		return NULL;
+	}
+
+	next = cobalt_personalities[xid];
+	prev = thread->personality;
+	if (next == prev) {
+		mutex_unlock(&personality_lock);
+		return prev;
+	}
+
+	thread->personality = next;
+	mutex_unlock(&personality_lock);
+	xnthread_run_handler(thread, map_thread);
+
+	return prev;
+}
+EXPORT_SYMBOL_GPL(cobalt_push_personality);
+
+/**
+ * Pop the topmost personality from the current thread.
+ *
+ * This service pops the topmost personality off the current thread.
+ *
+ * @param prev the previous personality which was returned by the
+ * latest call to cobalt_push_personality() for the current thread.
+ *
+ * @coretags{secondary-only}
+ */
+void cobalt_pop_personality(struct xnthread_personality *prev)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct xnthread *thread = p->thread;
+
+	secondary_mode_only();
+	thread->personality = prev;
+}
+EXPORT_SYMBOL_GPL(cobalt_pop_personality);
+
+/**
+ * Return the per-process data attached to the calling user process.
+ *
+ * This service returns the per-process data attached to the calling
+ * user process for the personality whose xid is @a xid.
+ *
+ * The per-process data was obtained from the ->attach_process()
+ * handler defined for the personality @a xid refers to.
+ *
+ * See cobalt_register_personality() documentation for information on
+ * the way to attach a per-process data to a process.
+ *
+ * @param xid the personality identifier.
+ *
+ * @return the per-process data if the current context is a user-space
+ * process; @return NULL otherwise. As a special case,
+ * cobalt_get_context(0) returns the current Cobalt process
+ * descriptor, which is strictly identical to calling
+ * cobalt_current_process().
+ *
+ * @coretags{task-unrestricted}
+ */
+void *cobalt_get_context(int xid)
+{
+	return lookup_context(xid);
+}
+EXPORT_SYMBOL_GPL(cobalt_get_context);
+
+int cobalt_yield(xnticks_t min, xnticks_t max)
+{
+	xnticks_t start;
+	int ret;
+
+	start = xnclock_read_monotonic(&nkclock);
+	max += start;
+	min += start;
+
+	do {
+		ret = xnsynch_sleep_on(&yield_sync, max, XN_ABSOLUTE);
+		if (ret & XNBREAK)
+			return -EINTR;
+	} while (ret == 0 && xnclock_read_monotonic(&nkclock) < min);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_yield);
+
+/**
+ * @fn int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
+ * @internal
+ * @brief Create a shadow thread context over a user task.
+ *
+ * This call maps a Xenomai thread to the current regular Linux task
+ * running in userland.  The priority and scheduling class of the
+ * underlying Linux task are not affected; it is assumed that the
+ * interface library did set them appropriately before issuing the
+ * shadow mapping request.
+ *
+ * @param thread The descriptor address of the new shadow thread to be
+ * mapped to current. This descriptor must have been previously
+ * initialized by a call to xnthread_init().
+ *
+ * @param u_winoff will receive the offset of the per-thread
+ * "u_window" structure in the global heap associated to @a
+ * thread. This structure reflects thread state information visible
+ * from userland through a shared memory window.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -EINVAL is returned if the thread control block does not bear the
+ * XNUSER bit.
+ *
+ * - -EBUSY is returned if either the current Linux task or the
+ * associated shadow thread is already involved in a shadow mapping.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
+{
+	struct xnthread_user_window *u_window;
+	struct xnthread_start_attr attr;
+	struct cobalt_ppd *sys_ppd;
+	struct cobalt_umm *umm;
+	int ret;
+
+	if (!xnthread_test_state(thread, XNUSER))
+		return -EINVAL;
+
+	if (xnthread_current() || xnthread_test_state(thread, XNMAPPED))
+		return -EBUSY;
+
+	if (!access_wok(u_winoff, sizeof(*u_winoff)))
+		return -EFAULT;
+
+	ret = pipeline_prepare_current();
+	if (ret)
+		return ret;
+
+	umm = &cobalt_kernel_ppd.umm;
+	u_window = cobalt_umm_zalloc(umm, sizeof(*u_window));
+	if (u_window == NULL)
+		return -ENOMEM;
+
+	thread->u_window = u_window;
+	__xn_put_user(cobalt_umm_offset(umm, u_window), u_winoff);
+	xnthread_pin_initial(thread);
+
+	/*
+	 * CAUTION: we enable the pipeline notifier only when our
+	 * shadow TCB is consistent, so that we won't trigger false
+	 * positive in debug code from handle_schedule_event() and
+	 * friends.
+	 */
+	pipeline_init_shadow_tcb(thread);
+	xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+	pipeline_attach_current(thread);
+	xnthread_set_state(thread, XNMAPPED);
+	xndebug_shadow_init(thread);
+	sys_ppd = cobalt_ppd_get(0);
+	atomic_inc(&sys_ppd->refcnt);
+	/*
+	 * ->map_thread() handler is invoked after the TCB is fully
+	 * built, and when we know for sure that current will go
+	 * through our task-exit handler, because it has a shadow
+	 * extension and I-pipe notifications will soon be enabled for
+	 * it.
+	 */
+	xnthread_run_handler(thread, map_thread);
+	pipeline_enable_kevents();
+
+	attr.mode = 0;
+	attr.entry = NULL;
+	attr.cookie = NULL;
+	ret = xnthread_start(thread, &attr);
+	if (ret)
+		return ret;
+
+	xnthread_sync_window(thread);
+
+	xntrace_pid(xnthread_host_pid(thread),
+		    xnthread_current_priority(thread));
+
+	return 0;
+}
+
+void cobalt_signal_yield(void)
+{
+	spl_t s;
+
+	if (!xnsynch_pended_p(&yield_sync))
+		return;
+
+	xnlock_get_irqsave(&nklock, s);
+	if (xnsynch_pended_p(&yield_sync)) {
+		xnsynch_flush(&yield_sync, 0);
+		xnsched_run();
+	}
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static inline struct cobalt_process *
+process_from_thread(struct xnthread *thread)
+{
+	return container_of(thread, struct cobalt_thread, threadbase)->process;
+}
+
+void cobalt_stop_debugged_process(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+	struct cobalt_thread *cth;
+
+	if (process->debugged_threads > 0)
+		return;
+
+	list_for_each_entry(cth, &process->thread_list, next) {
+		if (&cth->threadbase == thread)
+			continue;
+
+		xnthread_suspend(&cth->threadbase, XNDBGSTOP, XN_INFINITE,
+				 XN_RELATIVE, NULL);
+	}
+}
+
+static void cobalt_resume_debugged_process(struct cobalt_process *process)
+{
+	struct cobalt_thread *cth;
+
+	xnsched_lock();
+
+	list_for_each_entry(cth, &process->thread_list, next)
+		if (xnthread_test_state(&cth->threadbase, XNDBGSTOP))
+			xnthread_resume(&cth->threadbase, XNDBGSTOP);
+
+	xnsched_unlock();
+}
+
+/* called with nklock held */
+void cobalt_register_debugged_thread(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+
+	xnthread_set_state(thread, XNSSTEP);
+
+	cobalt_stop_debugged_process(thread);
+	process->debugged_threads++;
+
+	if (xnthread_test_state(thread, XNRELAX))
+		xnthread_suspend(thread, XNDBGSTOP, XN_INFINITE, XN_RELATIVE,
+				 NULL);
+}
+
+/* called with nklock held */
+void cobalt_unregister_debugged_thread(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+
+	process->debugged_threads--;
+	xnthread_clear_state(thread, XNSSTEP);
+
+	if (process->debugged_threads == 0)
+		cobalt_resume_debugged_process(process);
+}
+
+int cobalt_handle_setaffinity_event(struct task_struct *task)
+{
+#ifdef CONFIG_SMP
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	/*
+	 * Detect a Cobalt thread sleeping in primary mode which is
+	 * required to migrate to another CPU by the host kernel.
+	 *
+	 * We may NOT fix up thread->sched immediately using the
+	 * passive migration call, because that latter always has to
+	 * take place on behalf of the target thread itself while
+	 * running in secondary mode. Therefore, that thread needs to
+	 * go through secondary mode first, then move back to primary
+	 * mode, so that affinity_ok() does the fixup work.
+	 *
+	 * We force this by sending a SIGSHADOW signal to the migrated
+	 * thread, asking it to switch back to primary mode from the
+	 * handler, at which point the interrupted syscall may be
+	 * restarted.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS & ~XNRELAX))
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN);
+
+	xnlock_put_irqrestore(&nklock, s);
+#endif /* CONFIG_SMP */
+
+	return KEVENT_PROPAGATE;
+}
+
+#ifdef CONFIG_SMP
+void cobalt_adjust_affinity(struct task_struct *task) /* nklocked, IRQs off */
+{
+	struct xnthread *thread = xnthread_from_task(task);
+	struct xnsched *sched;
+	int cpu = task_cpu(task);
+
+	/*
+	 * To maintain consistency between both Cobalt and host
+	 * schedulers, reflecting a thread migration to another CPU
+	 * into the Cobalt scheduler state must happen from secondary
+	 * mode only, on behalf of the migrated thread itself once it
+	 * runs on the target CPU.
+	 *
+	 * This means that the Cobalt scheduler state regarding the
+	 * CPU information lags behind the host scheduler state until
+	 * the migrated thread switches back to primary mode
+	 * (i.e. task_cpu(p) != xnsched_cpu(xnthread_from_task(p)->sched)).
+	 * This is ok since Cobalt does not schedule such thread until then.
+	 *
+	 * check_affinity() detects when a Cobalt thread switching
+	 * back to primary mode did move to another CPU earlier while
+	 * in secondary mode. If so, do the fixups to reflect the
+	 * change.
+	 */
+	if (!xnsched_threading_cpu(cpu)) {
+		/*
+		 * The thread is about to switch to primary mode on a
+		 * non-rt CPU, which is damn wrong and hopeless.
+		 * Whine and cancel that thread.
+		 */
+		printk(XENO_WARNING "thread %s[%d] switched to non-rt CPU%d, aborted.\n",
+		       thread->name, xnthread_host_pid(thread), cpu);
+		/*
+		 * Can't call xnthread_cancel() from a migration
+		 * point, that would break. Since we are on the wakeup
+		 * path to hardening, just raise XNCANCELD to catch it
+		 * in xnthread_harden().
+		 */
+		xnthread_set_info(thread, XNCANCELD);
+		return;
+	}
+
+	sched = xnsched_struct(cpu);
+	if (sched == thread->sched)
+		return;
+
+	/*
+	 * The current thread moved to a supported real-time CPU,
+	 * which is not part of its original affinity mask
+	 * though. Assume user wants to extend this mask.
+	 */
+	if (!cpumask_test_cpu(cpu, &thread->affinity))
+		cpumask_set_cpu(cpu, &thread->affinity);
+
+	xnthread_run_handler_stack(thread, move_thread, cpu);
+	xnthread_migrate_passive(thread, sched);
+}
+#endif /* CONFIG_SMP */
+
+static void __handle_taskexit_event(struct task_struct *p)
+{
+	struct cobalt_ppd *sys_ppd;
+	struct xnthread *thread;
+	spl_t s;
+
+	/*
+	 * We are called for both kernel and user shadows over the
+	 * root thread.
+	 */
+	secondary_mode_only();
+
+	thread = xnthread_current();
+	XENO_BUG_ON(COBALT, thread == NULL);
+	trace_cobalt_shadow_unmap(thread);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNSSTEP))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_run_handler_stack(thread, exit_thread);
+
+	if (xnthread_test_state(thread, XNUSER)) {
+		cobalt_umm_free(&cobalt_kernel_ppd.umm, thread->u_window);
+		thread->u_window = NULL;
+		sys_ppd = cobalt_ppd_get(0);
+		if (atomic_dec_and_test(&sys_ppd->refcnt))
+			cobalt_remove_process(cobalt_current_process());
+	}
+}
+
+int cobalt_handle_user_return(struct task_struct *task)
+{
+	struct xnthread *thread;
+	spl_t s;
+	int err;
+
+	thread = xnthread_from_task(task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (xnthread_test_info(thread, XNCONTHI)) {
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_clear_info(thread, XNCONTHI);
+		xnlock_put_irqrestore(&nklock, s);
+
+		err = xnthread_harden();
+
+		/*
+		 * XNCONTHI may or may not have been re-applied if
+		 * harden bailed out due to pending signals. Make sure
+		 * it is set in that case.
+		 */
+		if (err == -ERESTARTSYS) {
+			xnlock_get_irqsave(&nklock, s);
+			xnthread_set_info(thread, XNCONTHI);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+static void detach_current(void)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+int cobalt_handle_taskexit_event(struct task_struct *task) /* task == current */
+{
+	__handle_taskexit_event(task);
+
+	/*
+	 * __xnthread_cleanup() -> ... -> finalize_thread
+	 * handler. From that point, the TCB is dropped. Be careful of
+	 * not treading on stale memory within @thread.
+	 */
+	__xnthread_cleanup(xnthread_current());
+
+	detach_current();
+
+	return KEVENT_PROPAGATE;
+}
+
+int cobalt_handle_cleanup_event(struct mm_struct *mm)
+{
+	struct cobalt_process *old, *process;
+	struct cobalt_ppd *sys_ppd;
+	struct xnthread *curr;
+
+	/*
+	 * We are NOT called for exiting kernel shadows.
+	 * cobalt_current_process() is cleared if we get there after
+	 * handle_task_exit(), so we need to restore this context
+	 * pointer temporarily.
+	 */
+	process = cobalt_search_process(mm);
+	old = cobalt_set_process(process);
+	sys_ppd = cobalt_ppd_get(0);
+	if (sys_ppd != &cobalt_kernel_ppd) {
+		bool running_exec;
+
+		/*
+		 * Detect a userland shadow running exec(), i.e. still
+		 * attached to the current linux task (no prior
+		 * detach_current). In this case, we emulate a task
+		 * exit, since the Xenomai binding shall not survive
+		 * the exec() syscall. Since the process will keep on
+		 * running though, we have to disable the event
+		 * notifier manually for it.
+		 */
+		curr = xnthread_current();
+		running_exec = curr && (current->flags & PF_EXITING) == 0;
+		if (running_exec) {
+			__handle_taskexit_event(current);
+			pipeline_cleanup_process();
+		}
+		if (atomic_dec_and_test(&sys_ppd->refcnt))
+			cobalt_remove_process(process);
+		if (running_exec) {
+			__xnthread_cleanup(curr);
+			detach_current();
+		}
+	}
+
+	/*
+	 * CAUTION: Do not override a state change caused by
+	 * cobalt_remove_process().
+	 */
+	if (cobalt_current_process() == process)
+		cobalt_set_process(old);
+
+	return KEVENT_PROPAGATE;
+}
+
+static int attach_process(struct cobalt_process *process)
+{
+	struct cobalt_ppd *p = &process->sys_ppd;
+	char *exe_path;
+	int ret;
+
+	ret = cobalt_umm_init(&p->umm, CONFIG_XENO_OPT_PRIVATE_HEAPSZ * 1024,
+			      post_ppd_release);
+	if (ret)
+		return ret;
+
+	cobalt_umm_set_name(&p->umm, "private heap[%d]", task_pid_nr(current));
+
+	ret = pipeline_attach_process(process);
+	if (ret)
+		goto fail_pipeline;
+
+	exe_path = get_exe_path(current);
+	if (IS_ERR(exe_path)) {
+		printk(XENO_WARNING
+		       "%s[%d] can't find exe path\n",
+		       current->comm, task_pid_nr(current));
+		exe_path = NULL; /* Not lethal, but weird. */
+	}
+	p->exe_path = exe_path;
+	xntree_init(&p->fds);
+	atomic_set(&p->refcnt, 1);
+
+	ret = process_hash_enter(process);
+	if (ret)
+		goto fail_hash;
+
+	return 0;
+fail_hash:
+	pipeline_detach_process(process);
+	if (p->exe_path)
+		kfree(p->exe_path);
+fail_pipeline:
+	cobalt_umm_destroy(&p->umm);
+
+	return ret;
+}
+
+static void *cobalt_process_attach(void)
+{
+	struct cobalt_process *process;
+	int ret;
+
+	process = kzalloc(sizeof(*process), GFP_KERNEL);
+	if (process == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = attach_process(process);
+	if (ret) {
+		kfree(process);
+		return ERR_PTR(ret);
+	}
+
+	INIT_LIST_HEAD(&process->resources.condq);
+	INIT_LIST_HEAD(&process->resources.mutexq);
+	INIT_LIST_HEAD(&process->resources.semq);
+	INIT_LIST_HEAD(&process->resources.monitorq);
+	INIT_LIST_HEAD(&process->resources.eventq);
+	INIT_LIST_HEAD(&process->resources.schedq);
+	INIT_LIST_HEAD(&process->sigwaiters);
+	INIT_LIST_HEAD(&process->thread_list);
+	xntree_init(&process->usems);
+	bitmap_fill(process->timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	cobalt_set_process(process);
+
+	return process;
+}
+
+static void detach_process(struct cobalt_process *process)
+{
+	struct cobalt_ppd *p = &process->sys_ppd;
+
+	if (p->exe_path)
+		kfree(p->exe_path);
+
+	rtdm_fd_cleanup(p);
+	process_hash_remove(process);
+	/*
+	 * CAUTION: the process descriptor might be immediately
+	 * released as a result of calling cobalt_umm_destroy(), so we
+	 * must do this last, not to tread on stale memory.
+	 */
+	cobalt_umm_destroy(&p->umm);
+}
+
+static void __reclaim_resource(struct cobalt_process *process,
+			       void (*reclaim)(struct cobalt_resnode *node, spl_t s),
+			       struct list_head *local,
+			       struct list_head *global)
+{
+	struct cobalt_resnode *node, *tmp;
+	LIST_HEAD(stash);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(global))
+		goto flush_local;
+
+	list_for_each_entry_safe(node, tmp, global, next) {
+		if (node->owner == process) {
+			list_del(&node->next);
+			list_add(&node->next, &stash);
+		}
+	}
+		
+	list_for_each_entry_safe(node, tmp, &stash, next) {
+		reclaim(node, s);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	XENO_BUG_ON(COBALT, !list_empty(&stash));
+
+flush_local:
+	if (list_empty(local))
+		goto out;
+
+	list_for_each_entry_safe(node, tmp, local, next) {
+		reclaim(node, s);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+#define cobalt_reclaim_resource(__process, __reclaim, __type)		\
+	__reclaim_resource(__process, __reclaim,			\
+			   &(__process)->resources.__type ## q,		\
+			   &cobalt_global_resources.__type ## q)
+
+static void cobalt_process_detach(void *arg)
+{
+	struct cobalt_process *process = arg;
+
+	cobalt_nsem_reclaim(process);
+ 	cobalt_timer_reclaim(process);
+ 	cobalt_sched_reclaim(process);
+	cobalt_reclaim_resource(process, cobalt_cond_reclaim, cond);
+	cobalt_reclaim_resource(process, cobalt_mutex_reclaim, mutex);
+	cobalt_reclaim_resource(process, cobalt_event_reclaim, event);
+	cobalt_reclaim_resource(process, cobalt_monitor_reclaim, monitor);
+	cobalt_reclaim_resource(process, cobalt_sem_reclaim, sem);
+ 	detach_process(process);
+	/*
+	 * The cobalt_process descriptor release may be deferred until
+	 * the last mapping on the private heap is gone. However, this
+	 * is potentially stale memory already.
+	 */
+}
+
+struct xnthread_personality cobalt_personality = {
+	.name = "cobalt",
+	.magic = 0,
+	.ops = {
+		.attach_process = cobalt_process_attach,
+		.detach_process = cobalt_process_detach,
+		.map_thread = cobalt_thread_map,
+		.exit_thread = cobalt_thread_exit,
+		.finalize_thread = cobalt_thread_finalize,
+	},
+};
+EXPORT_SYMBOL_GPL(cobalt_personality);
+
+__init int cobalt_init(void)
+{
+	unsigned int i, size;
+	int ret;
+
+	size = sizeof(*process_hash) * PROCESS_HASH_SIZE;
+	process_hash = kmalloc(size, GFP_KERNEL);
+	if (process_hash == NULL) {
+		printk(XENO_ERR "cannot allocate processes hash table\n");
+		return -ENOMEM;
+	}
+
+	ret = xndebug_init();
+	if (ret)
+		goto fail_debug;
+
+	for (i = 0; i < PROCESS_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&process_hash[i]);
+
+	xnsynch_init(&yield_sync, XNSYNCH_FIFO, NULL);
+
+	ret = cobalt_memdev_init();
+	if (ret)
+		goto fail_memdev;
+
+	ret = cobalt_register_personality(&cobalt_personality);
+	if (ret)
+		goto fail_register;
+
+	ret = cobalt_signal_init();
+	if (ret)
+		goto fail_siginit;
+
+	ret = pipeline_trap_kevents();
+	if (ret)
+		goto fail_kevents;
+
+	if (gid_arg != -1)
+		printk(XENO_INFO "allowing access to group %d\n", gid_arg);
+
+	return 0;
+fail_kevents:
+	cobalt_signal_cleanup();
+fail_siginit:
+	cobalt_unregister_personality(0);
+fail_register:
+	cobalt_memdev_cleanup();
+fail_memdev:
+	xnsynch_destroy(&yield_sync);
+	xndebug_cleanup();
+fail_debug:
+	kfree(process_hash);
+
+	return ret;
+}
+++ linux-patched/kernel/xenomai/posix/nsem.c	2022-03-21 12:58:29.019892569 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/thread.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/err.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/tree.h>
+#include "internal.h"
+#include "sem.h"
+#include "thread.h"
+#include <trace/events/cobalt-posix.h>
+
+DEFINE_PRIVATE_XNLOCK(named_sem_lock);
+
+struct cobalt_named_sem {
+	struct cobalt_sem *sem;
+	struct cobalt_sem_shadow __user *usem;
+	unsigned int refs;
+	struct xnid id;
+};
+
+static struct cobalt_named_sem *
+sem_search(struct cobalt_process *process, xnhandle_t handle)
+{
+	struct xnid *i;
+
+	i = xnid_fetch(&process->usems, handle);
+	if (i == NULL)
+		return NULL;
+
+	return container_of(i, struct cobalt_named_sem, id);
+}
+
+static struct cobalt_sem_shadow __user *
+sem_open(struct cobalt_process *process,
+	 struct cobalt_sem_shadow __user *ushadow,
+	 struct filename *filename, int oflags, mode_t mode,
+	 unsigned int value)
+{
+	const char *name = filename->name;
+	struct cobalt_sem_shadow shadow;
+	struct cobalt_named_sem *u, *v;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+	int rc;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return ERR_PTR(-EINVAL);
+
+  retry_bind:
+	rc = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	switch (rc) {
+	case 0:
+		/* Found */
+		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			return ERR_PTR(-EEXIST);
+
+		xnlock_get_irqsave(&named_sem_lock, s);
+		u = sem_search(process, handle);
+		if (u) {
+			++u->refs;
+			xnlock_put_irqrestore(&named_sem_lock, s);
+			return u->usem;
+		}
+		xnlock_put_irqrestore(&named_sem_lock, s);
+
+		xnlock_get_irqsave(&nklock, s);
+		sem = xnregistry_lookup(handle, NULL);
+		if (sem && sem->magic != COBALT_SEM_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return ERR_PTR(-EINVAL);
+		}
+
+		if (sem) {
+			++sem->refs;
+			xnlock_put_irqrestore(&nklock, s);
+		} else {
+			xnlock_put_irqrestore(&nklock, s);
+			goto retry_bind;
+		}
+
+		__cobalt_sem_shadow_init(sem, COBALT_NAMED_SEM_MAGIC, &shadow);
+		break;
+
+	case -EWOULDBLOCK:
+		/* Not found */
+		if ((oflags & O_CREAT) == 0)
+			return ERR_PTR(-ENOENT);
+
+		shadow.magic = 0;
+		sem = __cobalt_sem_init(&name[1], &shadow,
+					SEM_PSHARED | SEM_NAMED, value);
+		if (IS_ERR(sem)) {
+			rc = PTR_ERR(sem);
+			if (rc == -EEXIST)
+				goto retry_bind;
+			return ERR_PTR(rc);
+		}
+
+		sem->pathname = filename;
+		handle = shadow.handle;
+		break;
+
+	default:
+		return ERR_PTR(rc);
+	}
+
+	if (cobalt_copy_to_user(ushadow, &shadow, sizeof(shadow))) {
+		__cobalt_sem_destroy(handle);
+		return ERR_PTR(-EFAULT);
+	}
+
+	u = xnmalloc(sizeof(*u));
+	if (u == NULL) {
+		__cobalt_sem_destroy(handle);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	u->sem = sem;
+	u->usem = ushadow;
+	u->refs = 1;
+
+	xnlock_get_irqsave(&named_sem_lock, s);
+	v = sem_search(process, handle);
+	if (v) {
+		++v->refs;
+		xnlock_put_irqrestore(&named_sem_lock, s);
+		xnlock_get_irqsave(&nklock, s);
+		--sem->refs;
+		xnlock_put_irqrestore(&nklock, s);
+		putname(filename);
+		xnfree(u);
+		u = v;
+	} else {
+		xnid_enter(&process->usems, &u->id, handle);
+		xnlock_put_irqrestore(&named_sem_lock, s);
+	}
+
+	trace_cobalt_psem_open(name, handle, oflags, mode, value);
+
+	return u->usem;
+}
+
+static int sem_close(struct cobalt_process *process, xnhandle_t handle)
+{
+	struct cobalt_named_sem *u;
+	spl_t s;
+	int err;
+
+	xnlock_get_irqsave(&named_sem_lock, s);
+	u = sem_search(process, handle);
+	if (u == NULL) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
+	if (--u->refs) {
+		err = 0;
+		goto err_unlock;
+	}
+
+	xnid_remove(&process->usems, &u->id);
+	xnlock_put_irqrestore(&named_sem_lock, s);
+
+	__cobalt_sem_destroy(handle);
+
+	xnfree(u);
+	return 1;
+
+  err_unlock:
+	xnlock_put_irqrestore(&named_sem_lock, s);
+	return err;
+}
+
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+		  const char __user *u_name,
+		  int oflags, mode_t mode, unsigned int value)
+{
+	struct cobalt_process *process;
+	struct filename *filename;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return ERR_PTR(-EPERM);
+
+	filename = getname(u_name);
+	if (IS_ERR(filename))
+		return ERR_CAST(filename);
+
+	usm = sem_open(process, usm, filename, oflags, mode, value);
+	if (IS_ERR(usm)) {
+		trace_cobalt_psem_open_failed(filename->name, oflags, mode,
+					      value, PTR_ERR(usm));
+		putname(filename);
+	}
+
+	return usm;
+}
+
+COBALT_SYSCALL(sem_open, lostage,
+	       (struct cobalt_sem_shadow __user *__user *u_addrp,
+		const char __user *u_name,
+		int oflags, mode_t mode, unsigned int value))
+{
+	struct cobalt_sem_shadow __user *usm;
+
+	if (__xn_get_user(usm, u_addrp))
+		return -EFAULT;
+
+	usm = __cobalt_sem_open(usm, u_name, oflags, mode, value);
+	if (IS_ERR(usm))
+		return PTR_ERR(usm);
+
+	return __xn_put_user(usm, u_addrp) ? -EFAULT : 0;
+}
+
+COBALT_SYSCALL(sem_close, lostage,
+	       (struct cobalt_sem_shadow __user *usm))
+{
+	struct cobalt_process *process;
+	xnhandle_t handle;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return -EPERM;
+
+	handle = cobalt_get_handle_from_user(&usm->handle);
+	trace_cobalt_psem_close(handle);
+
+	return sem_close(process, handle);
+}
+
+static inline int sem_unlink(const char *name)
+{
+	xnhandle_t handle;
+	int ret;
+
+	if (name[0] != '/')
+		return -EINVAL;
+
+	ret = xnregistry_bind(name + 1, XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (ret == -EWOULDBLOCK)
+		return -ENOENT;
+
+	if (__cobalt_sem_destroy(handle) == -EBUSY)
+		xnregistry_unlink(xnregistry_key(handle));
+
+	return 0;
+}
+
+COBALT_SYSCALL(sem_unlink, lostage,
+	       (const char __user *u_name))
+{
+	struct filename *filename;
+	int ret;
+
+	filename = getname(u_name);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	trace_cobalt_psem_unlink(filename->name);
+	ret = sem_unlink(filename->name);
+	putname(filename);
+
+	return ret;
+}
+
+static void reclaim_named_sem(void *arg, struct xnid *i)
+{
+	struct cobalt_process *process = arg;
+	struct cobalt_named_sem *u;
+
+	u = container_of(i, struct cobalt_named_sem, id);
+	u->refs = 1;
+	sem_close(process, xnid_key(i));
+}
+
+void cobalt_nsem_reclaim(struct cobalt_process *process)
+{
+	xntree_cleanup(&process->usems, process, reclaim_named_sem);
+}
+++ linux-patched/kernel/xenomai/posix/thread.c	2022-03-21 12:58:29.015892608 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/cred.h>
+#include <linux/jhash.h>
+#include <linux/signal.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "signal.h"
+#include "timer.h"
+#include "clock.h"
+#include "sem.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-posix.h>
+
+xnticks_t cobalt_time_slice = CONFIG_XENO_OPT_RR_QUANTUM * 1000;
+
+#define PTHREAD_HSLOTS (1 << 8)	/* Must be a power of 2 */
+
+/* Process-local index, pthread_t x mm_struct (cobalt_local_hkey). */
+struct local_thread_hash {
+	pid_t pid;
+	struct cobalt_thread *thread;
+	struct cobalt_local_hkey hkey;
+	struct local_thread_hash *next;
+};
+
+/* System-wide index on task_pid_nr(). */
+struct global_thread_hash {
+	pid_t pid;
+	struct cobalt_thread *thread;
+	struct global_thread_hash *next;
+};
+
+static struct local_thread_hash *local_index[PTHREAD_HSLOTS];
+
+static struct global_thread_hash *global_index[PTHREAD_HSLOTS];
+
+static inline struct local_thread_hash *
+thread_hash(const struct cobalt_local_hkey *hkey,
+	    struct cobalt_thread *thread, pid_t pid)
+{
+	struct global_thread_hash **ghead, *gslot;
+	struct local_thread_hash **lhead, *lslot;
+	u32 hash;
+	void *p;
+	spl_t s;
+
+	p = xnmalloc(sizeof(*lslot) + sizeof(*gslot));
+	if (p == NULL)
+		return NULL;
+
+	lslot = p;
+	lslot->hkey = *hkey;
+	lslot->thread = thread;
+	lslot->pid = pid;
+	hash = jhash2((u32 *)&lslot->hkey,
+		      sizeof(lslot->hkey) / sizeof(u32), 0);
+	lhead = &local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	gslot = p + sizeof(*lslot);
+	gslot->pid = pid;
+	gslot->thread = thread;
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+	ghead = &global_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+	lslot->next = *lhead;
+	*lhead = lslot;
+	gslot->next = *ghead;
+	*ghead = gslot;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return lslot;
+}
+
+static inline void thread_unhash(const struct cobalt_local_hkey *hkey)
+{
+	struct global_thread_hash **gtail, *gslot;
+	struct local_thread_hash **ltail, *lslot;
+	pid_t pid;
+	u32 hash;
+	spl_t s;
+
+	hash = jhash2((u32 *) hkey, sizeof(*hkey) / sizeof(u32), 0);
+	ltail = &local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	lslot = *ltail;
+	while (lslot &&
+	       (lslot->hkey.u_pth != hkey->u_pth ||
+		lslot->hkey.mm != hkey->mm)) {
+		ltail = &lslot->next;
+		lslot = *ltail;
+	}
+
+	if (lslot == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return;
+	}
+
+	*ltail = lslot->next;
+	pid = lslot->pid;
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+	gtail = &global_index[hash & (PTHREAD_HSLOTS - 1)];
+	gslot = *gtail;
+	while (gslot && gslot->pid != pid) {
+		gtail = &gslot->next;
+		gslot = *gtail;
+	}
+	/* gslot must be found here. */
+	XENO_BUG_ON(COBALT, !(gslot && gtail));
+	*gtail = gslot->next;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnfree(lslot);
+}
+
+static struct cobalt_thread *
+thread_lookup(const struct cobalt_local_hkey *hkey)
+{
+	struct local_thread_hash *lslot;
+	struct cobalt_thread *thread;
+	u32 hash;
+	spl_t s;
+
+	hash = jhash2((u32 *)hkey, sizeof(*hkey) / sizeof(u32), 0);
+	lslot = local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (lslot != NULL &&
+	       (lslot->hkey.u_pth != hkey->u_pth || lslot->hkey.mm != hkey->mm))
+		lslot = lslot->next;
+
+	thread = lslot ? lslot->thread : NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+
+struct cobalt_thread *cobalt_thread_find(pid_t pid) /* nklocked, IRQs off */
+{
+	struct global_thread_hash *gslot;
+	u32 hash;
+
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+
+	gslot = global_index[hash & (PTHREAD_HSLOTS - 1)];
+	while (gslot && gslot->pid != pid)
+		gslot = gslot->next;
+
+	return gslot ? gslot->thread : NULL;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_find);
+
+struct cobalt_thread *cobalt_thread_find_local(pid_t pid) /* nklocked, IRQs off */
+{
+	struct cobalt_thread *thread;
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL || thread->hkey.mm != current->mm)
+		return NULL;
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_find_local);
+
+struct cobalt_thread *cobalt_thread_lookup(unsigned long pth) /* nklocked, IRQs off */
+{
+	struct cobalt_local_hkey hkey;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	return thread_lookup(&hkey);
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_lookup);
+
+void cobalt_thread_map(struct xnthread *curr)
+{
+	struct cobalt_thread *thread;
+
+	thread = container_of(curr, struct cobalt_thread, threadbase);
+	thread->process = cobalt_current_process();
+	XENO_BUG_ON(COBALT, thread->process == NULL);
+}
+
+struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr)
+{
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	thread = container_of(curr, struct cobalt_thread, threadbase);
+	/*
+	 * Unhash first, to prevent further access to the TCB from
+	 * userland.
+	 */
+	thread_unhash(&thread->hkey);
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_mark_deleted(thread);
+	list_del(&thread->next);
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_signal_flush(thread);
+	xnsynch_destroy(&thread->monitor_synch);
+	xnsynch_destroy(&thread->sigwait);
+
+	return NULL;
+}
+
+struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie)
+{
+	struct cobalt_thread *thread;
+
+	thread = container_of(zombie, struct cobalt_thread, threadbase);
+	xnfree(thread);
+
+	return NULL;
+}
+
+int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy,
+				     const struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	xnticks_t tslice;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC,
+			       struct cobalt_thread)) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	tslice = thread->threadbase.rrperiod;
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, &tslice);
+	if (sched_class == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+	xnthread_set_slice(&thread->threadbase, tslice);
+	if (cobalt_call_extension(thread_setsched, &thread->extref, ret,
+				  sched_class, &param) && ret)
+		goto out;
+	ret = xnthread_set_schedparam(&thread->threadbase,
+				      sched_class, &param);
+	xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread,
+				     int *policy_r,
+				     struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *base_class;
+	struct xnthread *base_thread;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC,
+			       struct cobalt_thread)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -ESRCH;
+	}
+
+	base_thread = &thread->threadbase;
+	base_class = base_thread->base_class;
+	*policy_r = base_class->policy;
+
+	param_ex->sched_priority = xnthread_base_priority(base_thread);
+	if (param_ex->sched_priority == 0) /* SCHED_FIFO/SCHED_WEAK */
+		*policy_r = SCHED_NORMAL;
+
+	if (base_class == &xnsched_class_rt) {
+		if (xnthread_test_state(base_thread, XNRRB)) {
+			u_ns2ts(&param_ex->sched_rr_quantum, base_thread->rrperiod);
+			*policy_r = SCHED_RR;
+		}
+		goto out;
+	}
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	if (base_class == &xnsched_class_weak) {
+		if (*policy_r != SCHED_WEAK)
+			param_ex->sched_priority = -param_ex->sched_priority;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	if (base_class == &xnsched_class_sporadic) {
+		param_ex->sched_ss_low_priority = base_thread->pss->param.low_prio;
+		u_ns2ts(&param_ex->sched_ss_repl_period, base_thread->pss->param.repl_period);
+		u_ns2ts(&param_ex->sched_ss_init_budget, base_thread->pss->param.init_budget);
+		param_ex->sched_ss_max_repl = base_thread->pss->param.max_repl;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	if (base_class == &xnsched_class_tp) {
+		param_ex->sched_tp_partition =
+			base_thread->tps - base_thread->sched->tp.partitions;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	if (base_class == &xnsched_class_quota) {
+		param_ex->sched_quota_group = base_thread->quota->tgid;
+		goto out;
+	}
+#endif
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static int pthread_create(struct cobalt_thread **thread_p,
+			  int policy,
+			  const struct sched_param_ex *param_ex,
+			  struct task_struct *task)
+{
+	struct cobalt_process *process = cobalt_current_process();
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	struct xnthread_init_attr iattr;
+	struct cobalt_thread *thread;
+	xnticks_t tslice;
+	int ret, n;
+	spl_t s;
+
+	thread = xnmalloc(sizeof(*thread));
+	if (thread == NULL)
+		return -EAGAIN;
+
+	tslice = cobalt_time_slice;
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, &tslice);
+	if (sched_class == NULL) {
+		xnfree(thread);
+		return -EINVAL;
+	}
+
+	iattr.name = task->comm;
+	iattr.flags = XNUSER|XNFPU;
+	iattr.personality = &cobalt_personality;
+	iattr.affinity = CPU_MASK_ALL;
+	ret = xnthread_init(&thread->threadbase, &iattr, sched_class, &param);
+	if (ret) {
+		xnfree(thread);
+		return ret;
+	}
+
+	thread->magic = COBALT_THREAD_MAGIC;
+	xnsynch_init(&thread->monitor_synch, XNSYNCH_FIFO, NULL);
+
+	xnsynch_init(&thread->sigwait, XNSYNCH_FIFO, NULL);
+	sigemptyset(&thread->sigpending);
+	for (n = 0; n < _NSIG; n++)
+		INIT_LIST_HEAD(thread->sigqueues + n);
+
+	xnthread_set_slice(&thread->threadbase, tslice);
+	cobalt_set_extref(&thread->extref, NULL, NULL);
+
+	/*
+	 * We need an anonymous registry entry to obtain a handle for
+	 * fast mutex locking.
+	 */
+	ret = xnthread_register(&thread->threadbase, "");
+	if (ret) {
+		xnsynch_destroy(&thread->monitor_synch);
+		xnsynch_destroy(&thread->sigwait);
+		__xnthread_discard(&thread->threadbase);
+		xnfree(thread);
+		return ret;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&thread->next, process ? &process->thread_list
+					     : &cobalt_global_thread_list);
+	xnlock_put_irqrestore(&nklock, s);
+
+	thread->hkey.u_pth = 0;
+	thread->hkey.mm = NULL;
+
+	*thread_p = thread;
+
+	return 0;
+}
+
+static void pthread_discard(struct cobalt_thread *thread)
+{
+	spl_t s;
+
+	xnsynch_destroy(&thread->monitor_synch);
+	xnsynch_destroy(&thread->sigwait);
+
+	xnlock_get_irqsave(&nklock, s);
+	list_del(&thread->next);
+	xnlock_put_irqrestore(&nklock, s);
+	__xnthread_discard(&thread->threadbase);
+	xnfree(thread);
+}
+
+static inline int pthread_setmode_np(int clrmask, int setmask, int *mode_r)
+{
+	const int valid_flags = XNLOCK|XNWARN|XNTRAPLB;
+	int old;
+
+	/*
+	 * The conforming mode bit is actually zero, since jumping to
+	 * this code entailed switching to primary mode already.
+	 */
+	if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0)
+		return -EINVAL;
+
+	old = xnthread_set_mode(clrmask, setmask);
+	if (mode_r)
+		*mode_r = old;
+
+	if ((clrmask & ~setmask) & XNLOCK)
+		/* Reschedule if the scheduler has been unlocked. */
+		xnsched_run();
+
+	return 0;
+}
+
+static struct cobalt_thread *thread_lookup_or_shadow(unsigned long pth,
+						     __u32 __user *u_winoff,
+						     int *promoted_r)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+
+	*promoted_r = 0;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+
+	thread = thread_lookup(&hkey);
+	if (thread == NULL) {
+		if (u_winoff == NULL)
+			return ERR_PTR(-ESRCH);
+			
+		thread = cobalt_thread_shadow(&hkey, u_winoff);
+		if (!IS_ERR(thread))
+			*promoted_r = 1;
+	}
+
+	return thread;
+}
+
+int cobalt_thread_setschedparam_ex(unsigned long pth,
+				   int policy,
+				   const struct sched_param_ex *param_ex,
+				   __u32 __user *u_winoff,
+				   int __user *u_promoted)
+{
+	struct cobalt_thread *thread;
+	int ret, promoted;
+
+	trace_cobalt_pthread_setschedparam(pth, policy, param_ex);
+
+	thread = thread_lookup_or_shadow(pth, u_winoff, &promoted);
+	if (IS_ERR(thread))
+		return PTR_ERR(thread);
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(thread_setschedparam_ex, conforming,
+	       (unsigned long pth,
+		int policy,
+		const struct sched_param_ex __user *u_param,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return cobalt_thread_setschedparam_ex(pth, policy, &param_ex,
+					      u_winoff, u_promoted);
+}
+
+int cobalt_thread_getschedparam_ex(unsigned long pth,
+				   int *policy_r,
+				   struct sched_param_ex *param_ex)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		return -ESRCH;
+
+	ret = __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex);
+	if (ret)
+		return ret;
+
+	trace_cobalt_pthread_getschedparam(pth, *policy_r, param_ex);
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_getschedparam_ex, current,
+	       (unsigned long pth,
+		int __user *u_policy,
+		struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_thread_getschedparam_ex(pth, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_param, &param_ex, sizeof(param_ex));
+}
+
+int cobalt_thread_setschedprio(unsigned long pth,
+			       int prio,
+			       __u32 __user *u_winoff,
+			       int __user *u_promoted)
+{
+	struct sched_param_ex param_ex;
+	struct cobalt_thread *thread;
+	int ret, policy, promoted;
+
+	trace_cobalt_pthread_setschedprio(pth, prio);
+
+	thread = thread_lookup_or_shadow(pth, u_winoff, &promoted);
+	if (IS_ERR(thread))
+		return PTR_ERR(thread);
+
+	ret = __cobalt_thread_getschedparam_ex(thread, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	param_ex.sched_priority = prio;
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, &param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(thread_setschedprio, conforming,
+	       (unsigned long pth,
+		int prio,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted);
+}
+
+int __cobalt_thread_create(unsigned long pth, int policy,
+			   struct sched_param_ex *param_ex,
+			   int xid, __u32 __user *u_winoff)
+{
+	struct cobalt_thread *thread = NULL;
+	struct task_struct *p = current;
+	struct cobalt_local_hkey hkey;
+	int ret;
+
+	trace_cobalt_pthread_create(pth, policy, param_ex);
+
+	/*
+	 * We have been passed the pthread_t identifier the user-space
+	 * Cobalt library has assigned to our caller; we'll index our
+	 * internal pthread_t descriptor in kernel space on it.
+	 */
+	hkey.u_pth = pth;
+	hkey.mm = p->mm;
+
+	ret = pthread_create(&thread, policy, param_ex, p);
+	if (ret)
+		return ret;
+
+	ret = cobalt_map_user(&thread->threadbase, u_winoff);
+	if (ret) {
+		pthread_discard(thread);
+		return ret;
+	}
+
+	if (!thread_hash(&hkey, thread, task_pid_vnr(p))) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	thread->hkey = hkey;
+
+	if (xid > 0 && cobalt_push_personality(xid) == NULL) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	return xnthread_harden();
+fail:
+	xnthread_cancel(&thread->threadbase);
+
+	return ret;
+}
+
+COBALT_SYSCALL(thread_create, init,
+	       (unsigned long pth, int policy,
+		struct sched_param_ex __user *u_param,
+		int xid,
+		__u32 __user *u_winoff))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex));
+	if (ret)
+		return ret;
+
+	return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
+struct cobalt_thread *
+cobalt_thread_shadow(struct cobalt_local_hkey *hkey,
+		     __u32 __user *u_winoff)
+{
+	struct cobalt_thread *thread = NULL;
+	struct sched_param_ex param_ex;
+	int ret;
+
+	if (xnthread_current())
+		return ERR_PTR(-EBUSY);
+
+	param_ex.sched_priority = 0;
+	trace_cobalt_pthread_create(hkey->u_pth, SCHED_NORMAL, &param_ex);
+	ret = pthread_create(&thread, SCHED_NORMAL, &param_ex, current);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = cobalt_map_user(&thread->threadbase, u_winoff);
+	if (ret) {
+		pthread_discard(thread);
+		return ERR_PTR(ret);
+	}
+
+	if (!thread_hash(hkey, thread, task_pid_vnr(current))) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	thread->hkey = *hkey;
+
+	xnthread_harden();
+
+	return thread;
+fail:
+	xnthread_cancel(&thread->threadbase);
+
+	return ERR_PTR(ret);
+}
+
+COBALT_SYSCALL(thread_setmode, primary,
+	       (int clrmask, int setmask, int __user *u_mode_r))
+{
+	int ret, old;
+
+	trace_cobalt_pthread_setmode(clrmask, setmask);
+
+	ret = pthread_setmode_np(clrmask, setmask, &old);
+	if (ret)
+		return ret;
+
+	if (u_mode_r && cobalt_copy_to_user(u_mode_r, &old, sizeof(old)))
+		return -EFAULT;
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_setname, current,
+	       (unsigned long pth, const char __user *u_name))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	char name[XNOBJECT_NAME_LEN];
+	struct task_struct *p;
+	spl_t s;
+
+	if (cobalt_strncpy_from_user(name, u_name,
+				     sizeof(name) - 1) < 0)
+		return -EFAULT;
+
+	name[sizeof(name) - 1] = '\0';
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+
+	trace_cobalt_pthread_setname(pth, name);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = thread_lookup(&hkey);
+	if (thread == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -ESRCH;
+	}
+
+	ksformat(thread->threadbase.name,
+		 XNOBJECT_NAME_LEN - 1, "%s", name);
+	p = xnthread_host_task(&thread->threadbase);
+	get_task_struct(p);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	knamecpy(p->comm, name);
+	put_task_struct(p);
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_kill, conforming,
+	       (unsigned long pth, int sig))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret;
+	spl_t s;
+
+	trace_cobalt_pthread_kill(pth, sig);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		ret = -ESRCH;
+	else
+		ret = __cobalt_kill(thread, sig, 0);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(thread_join, primary, (unsigned long pth))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	trace_cobalt_pthread_join(pth);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (thread == NULL)
+		return -ESRCH;
+
+	return xnthread_join(&thread->threadbase, false);
+}
+
+COBALT_SYSCALL(thread_getpid, current, (unsigned long pth))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	pid_t pid;
+	spl_t s;
+
+	trace_cobalt_pthread_pid(pth);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		pid = -ESRCH;
+	else
+		pid = xnthread_host_pid(&thread->threadbase);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return pid;
+}
+
+COBALT_SYSCALL(thread_getstat, current,
+	       (pid_t pid, struct cobalt_threadstat __user *u_stat))
+{
+	struct cobalt_threadstat stat;
+	struct cobalt_thread *p;
+	struct xnthread *thread;
+	xnticks_t xtime;
+	spl_t s;
+
+	trace_cobalt_pthread_stat(pid);
+
+	if (pid == 0) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+		xnlock_get_irqsave(&nklock, s);
+	} else {
+		xnlock_get_irqsave(&nklock, s);
+		p = cobalt_thread_find(pid);
+		if (p == NULL) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -ESRCH;
+		}
+		thread = &p->threadbase;
+	}
+
+	/* We have to hold the nklock to keep most values consistent. */
+	stat.cpu = xnsched_cpu(thread->sched);
+	stat.cprio = xnthread_current_priority(thread);
+	xtime = xnstat_exectime_get_total(&thread->stat.account);
+	if (thread->sched->curr == thread)
+		xtime += xnstat_exectime_now() -
+			xnstat_exectime_get_last_switch(thread->sched);
+	stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime);
+	stat.msw = xnstat_counter_get(&thread->stat.ssw);
+	stat.csw = xnstat_counter_get(&thread->stat.csw);
+	stat.xsc = xnstat_counter_get(&thread->stat.xsc);
+	stat.pf = xnstat_counter_get(&thread->stat.pf);
+	stat.status = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		stat.status |= XNLOCK;
+	stat.timeout = xnthread_get_timeout(thread,
+					    xnclock_read_monotonic(&nkclock));
+	strcpy(stat.name, thread->name);
+	strcpy(stat.personality, thread->personality->name);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return cobalt_copy_to_user(u_stat, &stat, sizeof(stat));
+}
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+int cobalt_thread_extend(struct cobalt_extension *ext,
+			 void *priv)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+	struct xnthread_personality *prev;
+
+	trace_cobalt_pthread_extend(thread->hkey.u_pth, ext->core.name);
+
+	prev = cobalt_push_personality(ext->core.xid);
+	if (prev == NULL)
+		return -EINVAL;
+
+	cobalt_set_extref(&thread->extref, ext, priv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_extend);
+
+void cobalt_thread_restrict(void)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+
+	trace_cobalt_pthread_restrict(thread->hkey.u_pth,
+		      thread->threadbase.personality->name);
+	cobalt_pop_personality(&cobalt_personality);
+	cobalt_set_extref(&thread->extref, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_restrict);
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+const char *cobalt_trace_parse_sched_params(struct trace_seq *p, int policy,
+					    struct sched_param_ex *params)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	switch (policy) {
+	case SCHED_QUOTA:
+		trace_seq_printf(p, "priority=%d, group=%d",
+				 params->sched_priority,
+				 params->sched_quota_group);
+		break;
+	case SCHED_TP:
+		trace_seq_printf(p, "priority=%d, partition=%d",
+				 params->sched_priority,
+				 params->sched_tp_partition);
+		break;
+	case SCHED_NORMAL:
+		break;
+	case SCHED_SPORADIC:
+		trace_seq_printf(p, "priority=%d, low_priority=%d, "
+				 "budget=(%ld.%09ld), period=(%ld.%09ld), "
+				 "maxrepl=%d",
+				 params->sched_priority,
+				 params->sched_ss_low_priority,
+				 params->sched_ss_init_budget.tv_sec,
+				 params->sched_ss_init_budget.tv_nsec,
+				 params->sched_ss_repl_period.tv_sec,
+				 params->sched_ss_repl_period.tv_nsec,
+				 params->sched_ss_max_repl);
+		break;
+	case SCHED_RR:
+	case SCHED_FIFO:
+	case SCHED_COBALT:
+	case SCHED_WEAK:
+	default:
+		trace_seq_printf(p, "priority=%d", params->sched_priority);
+		break;
+	}
+	trace_seq_putc(p, '\0');
+
+	return ret;
+}
+++ linux-patched/kernel/xenomai/posix/Makefile	2022-03-21 12:58:29.012892638 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/kernel
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y :=		\
+	clock.o		\
+	cond.o		\
+	corectl.o	\
+	event.o		\
+	io.o		\
+	memory.o	\
+	monitor.o	\
+	mqueue.o	\
+	mutex.o		\
+	nsem.o		\
+	process.o	\
+	sched.o		\
+	sem.o		\
+	signal.o	\
+	syscall.o	\
+	thread.o	\
+	timer.o		\
+	timerfd.o
+
+syscall_entries := $(srctree)/$(src)/gen-syscall-entries.sh
+
+quiet_cmd_syscall_entries = GEN     $@
+      cmd_syscall_entries = $(CONFIG_SHELL) '$(syscall_entries)' $(filter-out FORCE,$^) > $@
+
+$(obj)/syscall_entries.h: $(syscall_entries) $(wildcard $(srctree)/$(src)/*.c) FORCE
+	$(call if_changed,syscall_entries)
+
+target += syscall_entries.h
+
+$(obj)/syscall.o: $(obj)/syscall_entries.h
+
+xenomai-$(CONFIG_XENO_ARCH_SYS3264) += compat.o syscall32.o
+++ linux-patched/kernel/xenomai/posix/syscall.c	2022-03-21 12:58:29.008892677 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/event.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/kconfig.h>
+#include <linux/unistd.h>
+#include <cobalt/uapi/corectl.h>
+#include <cobalt/kernel/tree.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/init.h>
+#include <pipeline/kevents.h>
+#include <pipeline/vdso_fallback.h>
+#include <asm/syscall.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "mutex.h"
+#include "cond.h"
+#include "mqueue.h"
+#include "sem.h"
+#include "signal.h"
+#include "timer.h"
+#include "monitor.h"
+#include "clock.h"
+#include "event.h"
+#include "timerfd.h"
+#include "io.h"
+#include "corectl.h"
+#include "../debug.h"
+#include <trace/events/cobalt-posix.h>
+
+/* Syscall must run into the Linux domain. */
+#define __xn_exec_lostage    0x1
+/* Syscall must run into the Xenomai domain. */
+#define __xn_exec_histage    0x2
+/* Shadow syscall: caller must be mapped. */
+#define __xn_exec_shadow     0x4
+/* Switch back toggle; caller must return to its original mode. */
+#define __xn_exec_switchback 0x8
+/* Exec in current domain. */
+#define __xn_exec_current    0x10
+/* Exec in conforming domain, Xenomai or Linux. */
+#define __xn_exec_conforming 0x20
+/* Attempt syscall restart in the opposite domain upon -ENOSYS. */
+#define __xn_exec_adaptive   0x40
+/* Do not restart syscall upon signal receipt. */
+#define __xn_exec_norestart  0x80
+/* Shorthand for shadow init syscall. */
+#define __xn_exec_init       __xn_exec_lostage
+/* Shorthand for shadow syscall in Xenomai space. */
+#define __xn_exec_primary   (__xn_exec_shadow|__xn_exec_histage)
+/* Shorthand for shadow syscall in Linux space. */
+#define __xn_exec_secondary (__xn_exec_shadow|__xn_exec_lostage)
+/* Shorthand for syscall in Linux space with switchback if shadow. */
+#define __xn_exec_downup    (__xn_exec_lostage|__xn_exec_switchback)
+/* Shorthand for non-restartable primary syscall. */
+#define __xn_exec_nonrestartable (__xn_exec_primary|__xn_exec_norestart)
+/* Domain probing syscall starting in conforming mode. */
+#define __xn_exec_probing   (__xn_exec_conforming|__xn_exec_adaptive)
+/* Hand over mode selection to syscall.  */
+#define __xn_exec_handover  (__xn_exec_current|__xn_exec_adaptive)
+
+typedef long (*cobalt_syshand)(unsigned long arg1, unsigned long arg2,
+			       unsigned long arg3, unsigned long arg4,
+			       unsigned long arg5);
+
+static void prepare_for_signal(struct task_struct *p,
+			       struct xnthread *thread,
+			       struct pt_regs *regs,
+			       int sysflags)
+{
+	int notify = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_info(thread, XNKICKED)) {
+		if (signal_pending(p)) {
+			__xn_error_return(regs,
+					  (sysflags & __xn_exec_norestart) ?
+					  -EINTR : -ERESTARTSYS);
+			notify = !xnthread_test_state(thread, XNSSTEP);
+			xnthread_clear_info(thread, XNBREAK);
+		}
+		xnthread_clear_info(thread, XNKICKED);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_test_cancel();
+
+	xnthread_relax(notify, SIGDEBUG_MIGRATE_SIGNAL);
+}
+
+static COBALT_SYSCALL(migrate, current, (int domain))
+{
+	struct xnthread *thread = xnthread_current();
+
+	if (is_secondary_domain()) {
+		if (domain == COBALT_PRIMARY) {
+			if (thread == NULL)
+				return -EPERM;
+			/*
+			 * Paranoid: a corner case where userland
+			 * fiddles with SIGSHADOW while the target
+			 * thread is still waiting to be started.
+			 */
+			if (xnthread_test_state(thread, XNDORMANT))
+				return 0;
+
+			return xnthread_harden() ? : 1;
+		}
+		return 0;
+	}
+
+	/* We are running on the head stage, apply relax request. */
+	if (domain == COBALT_SECONDARY) {
+		xnthread_relax(0, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+static COBALT_SYSCALL(trace, current,
+		      (int op, unsigned long a1,
+		       unsigned long a2, unsigned long a3))
+{
+	int ret = -EINVAL;
+
+	switch (op) {
+	case __xntrace_op_max_begin:
+		ret = xntrace_max_begin(a1);
+		break;
+
+	case __xntrace_op_max_end:
+		ret = xntrace_max_end(a1);
+		break;
+
+	case __xntrace_op_max_reset:
+		ret = xntrace_max_reset();
+		break;
+
+	case __xntrace_op_user_start:
+		ret = xntrace_user_start();
+		break;
+
+	case __xntrace_op_user_stop:
+		ret = xntrace_user_stop(a1);
+		break;
+
+	case __xntrace_op_user_freeze:
+		ret = xntrace_user_freeze(a1, a2);
+		break;
+
+	case __xntrace_op_special:
+		ret = xntrace_special(a1 & 0xFF, a2);
+		break;
+
+	case __xntrace_op_special_u64:
+		ret = xntrace_special_u64(a1 & 0xFF,
+					  (((u64) a2) << 32) | a3);
+		break;
+
+	case __xntrace_op_latpeak_freeze:
+		xntrace_latpeak_freeze(a1);
+		ret = 0;
+		break;
+
+	}
+	return ret;
+}
+
+static COBALT_SYSCALL(ftrace_puts, current,
+		      (const char __user *str))
+{
+	char buf[256];
+	unsigned len;
+
+	len = cobalt_strncpy_from_user(buf, str, sizeof(buf));
+	if (len < 0)
+		return -EFAULT;
+
+#ifdef CONFIG_TRACING
+	__trace_puts(_THIS_IP_, buf, len);
+#endif
+
+	return 0;
+}
+
+static COBALT_SYSCALL(archcall, current,
+		      (unsigned long a1, unsigned long a2,
+		       unsigned long a3, unsigned long a4,
+		       unsigned long a5))
+{
+	return xnarch_local_syscall(a1, a2, a3, a4, a5);
+}
+
+static COBALT_SYSCALL(get_current, current,
+		      (xnhandle_t __user *u_handle))
+{
+	struct xnthread *cur = xnthread_current();
+
+	if (cur == NULL)
+		return -EPERM;
+
+	return cobalt_copy_to_user(u_handle, &cur->handle,
+				      sizeof(*u_handle));
+}
+
+static COBALT_SYSCALL(backtrace, lostage,
+		      (int nr, unsigned long __user *u_backtrace, int reason))
+{
+	unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	int ret;
+
+	/*
+	 * In case backtrace() in userland is broken or fails. We may
+	 * want to know about this in kernel space however, for future
+	 * use.
+	 */
+	if (nr <= 0)
+		return 0;
+	/*
+	 * We may omit the older frames if we can't store the full
+	 * backtrace.
+	 */
+	if (nr > SIGSHADOW_BACKTRACE_DEPTH)
+		nr = SIGSHADOW_BACKTRACE_DEPTH;
+	/*
+	 * Fetch the backtrace array, filled with PC values as seen
+	 * from the relaxing thread in user-space. This can't fail
+	 */
+	ret = cobalt_copy_from_user(backtrace, u_backtrace, nr * sizeof(long));
+	if (ret)
+		return ret;
+
+	xndebug_trace_relax(nr, backtrace, reason);
+
+	return 0;
+}
+
+static COBALT_SYSCALL(serialdbg, current,
+		      (const char __user *u_msg, int len))
+{
+	char buf[128];
+	int n;
+
+	while (len > 0) {
+		n = len;
+		if (n > sizeof(buf))
+			n = sizeof(buf);
+		if (cobalt_copy_from_user(buf, u_msg, n))
+			return -EFAULT;
+		raw_printk("%.*s", n, buf);
+		u_msg += n;
+		len -= n;
+	}
+
+	return 0;
+}
+
+static void stringify_feature_set(unsigned long fset, char *buf, int size)
+{
+	unsigned long feature;
+	int nc, nfeat;
+
+	*buf = '\0';
+
+	for (feature = 1, nc = nfeat = 0; fset != 0 && size > 0; feature <<= 1) {
+		if (fset & feature) {
+			nc = ksformat(buf, size, "%s%s",
+				      nfeat > 0 ? " " : "",
+				      get_feature_label(feature));
+			nfeat++;
+			size -= nc;
+			buf += nc;
+			fset &= ~feature;
+		}
+	}
+}
+
+static COBALT_SYSCALL(bind, lostage,
+		      (struct cobalt_bindreq __user *u_breq))
+{
+	unsigned long featreq, featmis;
+	struct cobalt_bindreq breq;
+	struct cobalt_featinfo *f;
+	int abirev;
+
+	if (cobalt_copy_from_user(&breq, u_breq, sizeof(breq)))
+		return -EFAULT;
+
+	f = &breq.feat_ret;
+	featreq = breq.feat_req;
+	if (!realtime_core_running() && (featreq & __xn_feat_control) == 0)
+		return -EAGAIN;
+
+	/*
+	 * Calculate the missing feature set:
+	 * kernel_unavailable_set & user_mandatory_set.
+	 */
+	featmis = (~XENOMAI_FEAT_DEP & (featreq & XENOMAI_FEAT_MAN));
+	abirev = breq.abi_rev;
+
+	/*
+	 * Pass back the supported feature set and the ABI revision
+	 * level to user-space.
+	 */
+	f->feat_all = XENOMAI_FEAT_DEP;
+	stringify_feature_set(XENOMAI_FEAT_DEP, f->feat_all_s,
+			      sizeof(f->feat_all_s));
+	f->feat_man = featreq & XENOMAI_FEAT_MAN;
+	stringify_feature_set(f->feat_man, f->feat_man_s,
+			      sizeof(f->feat_man_s));
+	f->feat_mis = featmis;
+	stringify_feature_set(featmis, f->feat_mis_s,
+			      sizeof(f->feat_mis_s));
+	f->feat_req = featreq;
+	stringify_feature_set(featreq, f->feat_req_s,
+			      sizeof(f->feat_req_s));
+	f->feat_abirev = XENOMAI_ABI_REV;
+	collect_arch_features(f);
+
+	pipeline_collect_features(f);
+	f->vdso_offset = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso);
+
+	if (cobalt_copy_to_user(u_breq, &breq, sizeof(breq)))
+		return -EFAULT;
+
+	/*
+	 * If some mandatory features the user-space code relies on
+	 * are missing at kernel level, we cannot go further.
+	 */
+	if (featmis)
+		return -EINVAL;
+
+	if (!check_abi_revision(abirev))
+		return -ENOEXEC;
+
+	return cobalt_bind_core(featreq);
+}
+
+static COBALT_SYSCALL(extend, lostage, (unsigned int magic))
+{
+	return cobalt_bind_personality(magic);
+}
+
+static int CoBaLt_ni(void)
+{
+	return -ENOSYS;
+}
+
+/*
+ * We have a single syscall table for all ABI models, i.e. 64bit
+ * native + 32bit emulation) or plain 32bit.
+ *
+ * The syscall table is set up in a single step, based on three
+ * subsequent sources of initializers:
+ *
+ * - first, all syscall entries are defaulted to a placeholder
+ * returning -ENOSYS (__COBALT_CALL_NI), as the table may be sparse.
+ *
+ * - then __COBALT_CALL_ENTRY() produces a native call entry
+ * (e.g. pure 64bit call handler for a 64bit architecture, 32bit
+ * handler for a 32bit architecture), optionally followed by a set of
+ * 32bit syscall entries offset by an arch-specific base index, which
+ * default to the native calls. These nitty-gritty details are defined
+ * by <asm/xenomai/syscall32.h>. 32bit architectures - or 64bit ones
+ * for which we don't support any 32bit ABI model - will simply define
+ * __COBALT_CALL32_ENTRY() as an empty macro.
+ *
+ * - finally, 32bit thunk entries are generated by including
+ * <asm/xenomai/syscall32-table.h>, overriding the default handlers
+ * installed during the previous step.
+ *
+ * For instance, with CONFIG_IA32_EMULATION support enabled in an
+ * x86_64 kernel, sc_cobalt_mq_timedreceive would appear twice in the
+ * table, as:
+ *
+ * [sc_cobalt_mq_timedreceive] = CoBaLt_mq_timedreceive,
+ * ...
+ * [sc_cobalt_mq_timedreceive + __COBALT_IA32_BASE] = CoBaLt32emu_mq_timedreceive,
+ *
+ * CoBaLt32emu_mq_timedreceive() would do the required thunking for
+ * dealing with the 32<->64bit conversion of arguments. On the other
+ * hand, sc_cobalt_sched_yield - which do not require any thunk -
+ * would also appear twice, but both entries would point at the native
+ * syscall implementation:
+ *
+ * [sc_cobalt_sched_yield] = CoBaLt_sched_yield,
+ * ...
+ * [sc_cobalt_sched_yield + __COBALT_IA32_BASE] = CoBaLt_sched_yield,
+ *
+ * Accordingly, applications targeting the ia32 model issue syscalls
+ * in the range [__COBALT_IA32_BASE..__COBALT_IA32_BASE +
+ * __NR_COBALT_SYSCALLS-1], whilst native (32/64bit) ones issue
+ * syscalls in the range [0..__NR_COBALT_SYSCALLS-1].
+ *
+ * In short, this is an incremental process where the arch-specific
+ * code can override the 32bit syscall entries, pointing at the thunk
+ * routines it may need for handing 32bit calls over their respective
+ * 64bit implementation.
+ *
+ * By convention, there is NO pure 32bit syscall, which means that
+ * each 32bit syscall defined by a compat ABI interface MUST match a
+ * native (64bit) syscall. This is important as we share the call
+ * modes (i.e. __xn_exec_ bits) between all ABI models.
+ *
+ * --rpm
+ */
+#define __syshand__(__name)	((cobalt_syshand)(CoBaLt_ ## __name))
+
+#define __COBALT_NI	__syshand__(ni)
+
+#define __COBALT_CALL_NI				\
+	[0 ... __NR_COBALT_SYSCALLS-1] = __COBALT_NI,	\
+	__COBALT_CALL32_INITHAND(__COBALT_NI)
+
+#define __COBALT_CALL_NFLAGS				\
+	[0 ... __NR_COBALT_SYSCALLS-1] = 0,		\
+	__COBALT_CALL32_INITMODE(0)
+
+#define __COBALT_CALL_ENTRY(__name)				\
+	[sc_cobalt_ ## __name] = __syshand__(__name),		\
+	__COBALT_CALL32_ENTRY(__name, __syshand__(__name))
+
+#define __COBALT_MODE(__name, __mode)	\
+	[sc_cobalt_ ## __name] = __xn_exec_##__mode,
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#include "syscall32.h"
+#endif
+
+#include "syscall_entries.h"
+
+static const cobalt_syshand cobalt_syscalls[] = {
+	__COBALT_CALL_NI
+	__COBALT_CALL_ENTRIES
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#include <asm/xenomai/syscall32-table.h>
+#endif
+};
+
+static const int cobalt_sysmodes[] = {
+	__COBALT_CALL_NFLAGS
+	__COBALT_CALL_MODES
+};
+
+static inline int allowed_syscall(struct cobalt_process *process,
+				  struct xnthread *thread,
+				  int sysflags, int nr)
+{
+	if (nr == sc_cobalt_bind)
+		return 1;
+	
+	if (process == NULL)
+		return 0;
+
+	if (thread == NULL && (sysflags & __xn_exec_shadow))
+		return 0;
+
+	return cap_raised(current_cap(), CAP_SYS_NICE);
+}
+
+int handle_head_syscall(bool caller_is_relaxed, struct pt_regs *regs)
+{
+	struct cobalt_process *process;
+	int switched, sigs, sysflags;
+	struct xnthread *thread;
+	cobalt_syshand handler;
+	struct task_struct *p;
+	unsigned long args[6];
+	unsigned int nr, code;
+	long ret;
+
+	if (!__xn_syscall_p(regs))
+		goto linux_syscall;
+
+	thread = xnthread_current();
+	code = __xn_syscall(regs);
+	if (code >= ARRAY_SIZE(cobalt_syscalls))
+		goto bad_syscall;
+
+	nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+	trace_cobalt_head_sysentry(code);
+
+	process = cobalt_current_process();
+	if (process == NULL) {
+		process = cobalt_search_process(current->mm);
+		cobalt_set_process(process);
+	}
+
+	handler = cobalt_syscalls[code];
+	sysflags = cobalt_sysmodes[nr];
+
+	/*
+	 * Executing Cobalt services requires CAP_SYS_NICE, except for
+	 * sc_cobalt_bind which does its own checks.
+	 */
+	if (unlikely(!allowed_syscall(process, thread, sysflags, nr))) {
+		/*
+		 * Exclude get_current from reporting, it is used to probe the
+		 * execution context.
+		 */
+		if (XENO_DEBUG(COBALT) && nr != sc_cobalt_get_current)
+			printk(XENO_WARNING
+			       "syscall <%d> denied to %s[%d]\n",
+			       nr, current->comm, task_pid_nr(current));
+		__xn_error_return(regs, -EPERM);
+		goto ret_handled;
+	}
+
+	if (sysflags & __xn_exec_conforming)
+		/*
+		 * If the conforming exec bit is set, turn the exec
+		 * bitmask for the syscall into the most appropriate
+		 * setup for the caller, i.e. Xenomai domain for
+		 * shadow threads, Linux otherwise.
+		 */
+		sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
+
+	/*
+	 * Here we have to dispatch the syscall execution properly,
+	 * depending on:
+	 *
+	 * o Whether the syscall must be run into the Linux or Xenomai
+	 * domain, or indifferently in the current Xenomai domain.
+	 *
+	 * o Whether the caller currently runs in the Linux or Xenomai
+	 * domain.
+	 */
+restart:
+	/*
+	 * Process adaptive syscalls by restarting them in the
+	 * opposite domain upon receiving -ENOSYS from the syscall
+	 * handler.
+	 */
+	switched = 0;
+	if (sysflags & __xn_exec_lostage) {
+		/*
+		 * The syscall must run from the Linux domain.
+		 */
+		if (!caller_is_relaxed) {
+			/*
+			 * Request originates from the Xenomai domain:
+			 * relax the caller then invoke the syscall
+			 * handler right after.
+			 */
+			xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+			switched = 1;
+		} else
+			/*
+			 * Request originates from the Linux domain:
+			 * propagate the event to our Linux-based
+			 * handler, so that the syscall is executed
+			 * from there.
+			 */
+			return KEVENT_PROPAGATE;
+	} else if (sysflags & (__xn_exec_histage | __xn_exec_current)) {
+		/*
+		 * Syscall must run either from the Xenomai domain, or
+		 * from the calling domain.
+		 *
+		 * If the request originates from the Linux domain,
+		 * hand it over to our secondary-mode dispatcher.
+		 * Otherwise, invoke the syscall handler immediately.
+		 */
+		if (caller_is_relaxed)
+			return KEVENT_PROPAGATE;
+	}
+
+	/*
+	 * 'thread' has to be valid from that point: all syscalls
+	 * regular threads may call have been pipelined to the root
+	 * handler (lostage ones), or rejected by allowed_syscall().
+	 */
+
+	p = current;
+	pipeline_get_syscall_args(p, regs, args);
+
+	ret = handler(args[0], args[1], args[2], args[3], args[4]);
+	if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) {
+		if (switched) {
+			ret = xnthread_harden();
+			if (ret) {
+				switched = 0;
+				goto done;
+			}
+		} else /* Mark the primary -> secondary transition. */
+			xnthread_set_localinfo(thread, XNDESCENT);
+		sysflags ^=
+		    (__xn_exec_lostage | __xn_exec_histage |
+		     __xn_exec_adaptive);
+		goto restart;
+	}
+done:
+	__xn_status_return(regs, ret);
+	sigs = 0;
+	if (!xnsched_root_p()) {
+		if (signal_pending(p) ||
+		    xnthread_test_info(thread, XNKICKED)) {
+			sigs = 1;
+			prepare_for_signal(p, thread, regs, sysflags);
+		} else if (xnthread_test_state(thread, XNWEAK) &&
+			   thread->res_count == 0) {
+			if (switched)
+				switched = 0;
+			else
+				xnthread_relax(0, 0);
+		}
+	}
+	if (!sigs && (sysflags & __xn_exec_switchback) && switched)
+		/* -EPERM will be trapped later if needed. */
+		xnthread_harden();
+
+ret_handled:
+	/* Update the stats and userland-visible state. */
+	if (thread) {
+		xnthread_clear_localinfo(thread, XNDESCENT);
+		xnstat_counter_inc(&thread->stat.xsc);
+		xnthread_sync_window(thread);
+	}
+
+	trace_cobalt_head_sysexit(__xn_reg_rval(regs));
+
+	return KEVENT_STOP;
+
+linux_syscall:
+	if (xnsched_root_p())
+		/*
+		 * The call originates from the Linux domain, either
+		 * from a relaxed shadow or from a regular Linux task;
+		 * just propagate the event so that we will fall back
+		 * to handle_root_syscall().
+		 */
+		return KEVENT_PROPAGATE;
+
+	if (!__xn_rootcall_p(regs, &code))
+		goto bad_syscall;
+
+	if (pipeline_handle_vdso_fallback(code, regs))
+		return KEVENT_STOP;
+
+	/*
+	 * We know this is a Cobalt thread since it runs over the head
+	 * domain, however the current syscall should be handled by
+	 * the host kernel instead.  Before this happens, we have to
+	 * re-enter the root domain.
+	 */
+	xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+
+	return KEVENT_PROPAGATE;
+
+bad_syscall:
+	printk(XENO_WARNING "bad syscall <%#x>\n", code);
+
+	__xn_error_return(regs, -ENOSYS);
+
+	return KEVENT_STOP;
+}
+
+int handle_root_syscall(struct pt_regs *regs)
+{
+	int sysflags, switched, sigs;
+	struct xnthread *thread;
+	cobalt_syshand handler;
+	struct task_struct *p;
+	unsigned long args[6];
+	unsigned int nr, code;
+	long ret;
+
+	/*
+	 * Catch cancellation requests pending for user shadows
+	 * running mostly in secondary mode, i.e. XNWEAK. In that
+	 * case, we won't run prepare_for_signal() that frequently, so
+	 * check for cancellation here.
+	 */
+	xnthread_test_cancel();
+
+	if (!__xn_syscall_p(regs))
+		/* Fall back to Linux syscall handling. */
+		return KEVENT_PROPAGATE;
+
+	thread = xnthread_current();
+	/* code has already been checked in the head domain handler. */
+	code = __xn_syscall(regs);
+	nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+	trace_cobalt_root_sysentry(code);
+
+	/* Processing a Xenomai syscall. */
+
+	handler = cobalt_syscalls[code];
+	sysflags = cobalt_sysmodes[nr];
+
+	if (thread && (sysflags & __xn_exec_conforming))
+		sysflags |= __xn_exec_histage;
+restart:
+	/*
+	 * Process adaptive syscalls by restarting them in the
+	 * opposite domain upon receiving -ENOSYS from the syscall
+	 * handler.
+	 */
+	switched = 0;
+	if (sysflags & __xn_exec_histage) {
+		/*
+		 * This request originates from the Linux domain but
+		 * should run into the Xenomai domain: harden the
+		 * caller before invoking the syscall handler.
+		 */
+		ret = xnthread_harden();
+		if (ret) {
+			__xn_error_return(regs, ret);
+			goto ret_handled;
+		}
+		switched = 1;
+	} else {
+		/*
+		 * We want to run the syscall in the current Linux
+		 * domain. This is a slow path, so proceed with any
+		 * pending schedparam update on the fly.
+		 */
+		if (thread)
+			xnthread_propagate_schedparam(thread);
+	}
+
+	p = current;
+	pipeline_get_syscall_args(p, regs, args);
+
+	ret = handler(args[0], args[1], args[2], args[3], args[4]);
+	if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) {
+		sysflags ^= __xn_exec_histage;
+		if (switched) {
+			xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+			sysflags &= ~__xn_exec_adaptive;
+			 /* Mark the primary -> secondary transition. */
+			xnthread_set_localinfo(thread, XNDESCENT);
+		}
+		goto restart;
+	}
+
+	__xn_status_return(regs, ret);
+
+	sigs = 0;
+	if (!xnsched_root_p()) {
+		/*
+		 * We may have gained a shadow TCB from the syscall we
+		 * just invoked, so make sure to fetch it.
+		 */
+		thread = xnthread_current();
+		if (signal_pending(p)) {
+			sigs = 1;
+			prepare_for_signal(p, thread, regs, sysflags);
+		} else if (xnthread_test_state(thread, XNWEAK) &&
+			   thread->res_count == 0)
+			sysflags |= __xn_exec_switchback;
+	}
+	if (!sigs && (sysflags & __xn_exec_switchback)
+	    && (switched || xnsched_primary_p()))
+		xnthread_relax(0, 0);
+
+ret_handled:
+	/* Update the stats and userland-visible state. */
+	if (thread) {
+		xnthread_clear_localinfo(thread, XNDESCENT|XNHICCUP);
+		xnstat_counter_inc(&thread->stat.xsc);
+		xnthread_sync_window(thread);
+	}
+
+	trace_cobalt_root_sysexit(__xn_reg_rval(regs));
+
+	return KEVENT_STOP;
+}
+
+long cobalt_restart_syscall_placeholder(struct restart_block *param)
+{
+	return -EINVAL;
+}
+++ linux-patched/kernel/xenomai/posix/event.c	2022-03-21 12:58:29.005892706 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/compat.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "event.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+/*
+ * Cobalt event notification services
+ *
+ * An event flag group is a synchronization object represented by a
+ * regular native integer; every available bit in such word can be
+ * used to map a user-defined event flag.  When a flag is set, the
+ * associated event is said to have occurred.
+ *
+ * Xenomai threads and interrupt handlers can use event flags to
+ * signal the occurrence of events to other threads; those threads can
+ * either wait for the events to occur in a conjunctive manner (all
+ * awaited events must have occurred to wake up), or in a disjunctive
+ * way (at least one of the awaited events must have occurred to wake
+ * up).
+ *
+ * We expose this non-POSIX feature through the internal API, as a
+ * fast IPC mechanism available to the Copperplate interface.
+ */
+
+struct event_wait_context {
+	struct xnthread_wait_context wc;
+	unsigned int value;
+	int mode;
+};
+
+COBALT_SYSCALL(event_init, current,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int value, int flags))
+{
+	struct cobalt_event_shadow shadow;
+	struct cobalt_event_state *state;
+	int pshared, synflags, ret;
+	struct cobalt_event *event;
+	struct cobalt_umm *umm;
+	unsigned long stateoff;
+	spl_t s;
+
+	trace_cobalt_event_init(u_event, value, flags);
+
+	event = xnmalloc(sizeof(*event));
+	if (event == NULL)
+		return -ENOMEM;
+
+	pshared = (flags & COBALT_EVENT_SHARED) != 0;
+	umm = &cobalt_ppd_get(pshared)->umm;
+	state = cobalt_umm_alloc(umm, sizeof(*state));
+	if (state == NULL) {
+		xnfree(event);
+		return -EAGAIN;
+	}
+
+	ret = xnregistry_enter_anon(event, &event->resnode.handle);
+	if (ret) {
+		cobalt_umm_free(umm, state);
+		xnfree(event);
+		return ret;
+	}
+
+	event->state = state;
+	event->flags = flags;
+	synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO;
+	xnsynch_init(&event->synch, synflags, NULL);
+	state->value = value;
+	state->flags = 0;
+	state->nwaiters = 0;
+	stateoff = cobalt_umm_offset(umm, state);
+	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&event->resnode, event, pshared);
+	event->magic = COBALT_EVENT_MAGIC;
+	xnlock_put_irqrestore(&nklock, s);
+
+	shadow.flags = flags;
+	shadow.handle = event->resnode.handle;
+	shadow.state_offset = (__u32)stateoff;
+
+	return cobalt_copy_to_user(u_event, &shadow, sizeof(*u_event));
+}
+
+int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
+			unsigned int bits,
+			unsigned int __user *u_bits_r,
+			int mode, const struct timespec64 *ts)
+{
+	unsigned int rbits = 0, testval;
+	xnticks_t timeout = XN_INFINITE;
+	struct cobalt_event_state *state;
+	xntmode_t tmode = XN_RELATIVE;
+	struct event_wait_context ewc;
+	struct cobalt_event *event;
+	xnhandle_t handle;
+	int ret = 0, info;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+
+		timeout = ts2ns(ts);
+		if (timeout) {
+			timeout++;
+			tmode = XN_ABSOLUTE;
+		} else
+			timeout = XN_NONBLOCK;
+		trace_cobalt_event_timedwait(u_event, bits, mode, ts);
+	} else
+		trace_cobalt_event_wait(u_event, bits, mode);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	state = event->state;
+
+	if (bits == 0) {
+		/*
+		 * Special case: we don't wait for any event, we only
+		 * return the current flag group value.
+		 */
+		rbits = state->value;
+		goto out;
+	}
+
+	state->flags |= COBALT_EVENT_PENDED;
+	rbits = state->value & bits;
+	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
+	if (rbits && rbits == testval)
+		goto done;
+
+	if (timeout == XN_NONBLOCK) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	ewc.value = bits;
+	ewc.mode = mode;
+	xnthread_prepare_wait(&ewc.wc);
+	state->nwaiters++;
+	info = xnsynch_sleep_on(&event->synch, timeout, tmode);
+	if (info & XNRMID) {
+		ret = -EIDRM;
+		goto out;
+	}
+	if (info & (XNBREAK|XNTIMEO)) {
+		state->nwaiters--;
+		ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+	} else
+		rbits = ewc.value;
+done:
+	if (!xnsynch_pended_p(&event->synch))
+		state->flags &= ~COBALT_EVENT_PENDED;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (ret == 0 &&
+	    cobalt_copy_to_user(u_bits_r, &rbits, sizeof(rbits)))
+		return -EFAULT;
+
+	return ret;
+}
+
+int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode, const struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_timespec64(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL(event_wait, primary,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int bits,
+		unsigned int __user *u_bits_r,
+		int mode, const struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_u_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL(event_wait64, primary,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int bits,
+		unsigned int __user *u_bits_r,
+		int mode, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts);
+}
+
+COBALT_SYSCALL(event_sync, current,
+	       (struct cobalt_event_shadow __user *u_event))
+{
+	unsigned int bits, waitval, testval;
+	struct xnthread_wait_context *wc;
+	struct cobalt_event_state *state;
+	struct event_wait_context *ewc;
+	struct cobalt_event *event;
+	struct xnthread *p, *tmp;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Userland has already updated the bitmask, our job is to
+	 * wake up any thread which could be satisfied by its current
+	 * value.
+	 */
+	state = event->state;
+	bits = state->value;
+
+	xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
+		wc = xnthread_get_wait_context(p);
+		ewc = container_of(wc, struct event_wait_context, wc);
+		waitval = ewc->value & bits;
+		testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
+		if (waitval && waitval == testval) {
+			state->nwaiters--;
+			ewc->value = waitval;
+			xnsynch_wakeup_this_sleeper(&event->synch, p);
+		}
+	}
+
+	xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(event_destroy, current,
+	       (struct cobalt_event_shadow __user *u_event))
+{
+	struct cobalt_event *event;
+	xnhandle_t handle;
+	spl_t s;
+
+	trace_cobalt_event_destroy(u_event);
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	cobalt_event_reclaim(&event->resnode, s); /* drops lock */
+
+	return 0;
+}
+
+COBALT_SYSCALL(event_inquire, current,
+	       (struct cobalt_event_shadow __user *u_event,
+		struct cobalt_event_info __user *u_info,
+		pid_t __user *u_waitlist,
+		size_t waitsz))
+{
+	int nrpend = 0, nrwait = 0, nrpids, ret = 0;
+	unsigned long pstamp, nstamp = 0;
+	struct cobalt_event_info info;
+	struct cobalt_event *event;
+	pid_t *t = NULL, fbuf[16];
+	struct xnthread *thread;
+	xnhandle_t handle;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	nrpids = waitsz / sizeof(pid_t);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		pstamp = nstamp;
+		event = xnregistry_lookup(handle, &nstamp);
+		if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		/*
+		 * Allocate memory to return the wait list without
+		 * holding any lock, then revalidate the handle.
+		 */
+		if (t == NULL) {
+			nrpend = 0;
+			if (!xnsynch_pended_p(&event->synch))
+				break;
+			xnsynch_for_each_sleeper(thread, &event->synch)
+				nrpend++;
+			if (u_waitlist == NULL)
+				break;
+			xnlock_put_irqrestore(&nklock, s);
+			if (nrpids > nrpend)
+				nrpids = nrpend;
+			if (nrpend <= ARRAY_SIZE(fbuf))
+				t = fbuf; /* Use fast buffer. */
+			else {
+				t = xnmalloc(nrpend * sizeof(pid_t));
+				if (t == NULL)
+					return -ENOMEM;
+			}
+			xnlock_get_irqsave(&nklock, s);
+		} else if (pstamp == nstamp)
+			break;
+		else {
+			xnlock_put_irqrestore(&nklock, s);
+			if (t != fbuf)
+				xnfree(t);
+			t = NULL;
+			xnlock_get_irqsave(&nklock, s);
+		}
+	}
+
+	info.flags = event->flags;
+	info.value = event->value;
+	info.nrwait = nrpend;
+
+	if (xnsynch_pended_p(&event->synch) && u_waitlist != NULL) {
+		xnsynch_for_each_sleeper(thread, &event->synch) {
+			if (nrwait >= nrpids)
+				break;
+			t[nrwait++] = xnthread_host_pid(thread);
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = cobalt_copy_to_user(u_info, &info, sizeof(info));
+	if (ret == 0 && nrwait > 0)
+		ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t));
+
+	if (t && t != fbuf)
+		xnfree(t);
+
+	return ret ?: nrwait;
+}
+
+void cobalt_event_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_event *event;
+	struct cobalt_umm *umm;
+	int pshared;
+
+	event = container_of(node, struct cobalt_event, resnode);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&event->synch);
+	pshared = (event->flags & COBALT_EVENT_SHARED) != 0;
+	xnlock_put_irqrestore(&nklock, s);
+
+	umm = &cobalt_ppd_get(pshared)->umm;
+	cobalt_umm_free(umm, event->state);
+	xnfree(event);
+}
+++ linux-patched/kernel/xenomai/posix/compat.c	2022-03-21 12:58:29.001892745 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/event.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/err.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <cobalt/kernel/compat.h>
+#include <asm/xenomai/syscall.h>
+#include <xenomai/posix/mqueue.h>
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *u_cts)
+{
+	struct old_timespec32 cts;
+
+	if (u_cts == NULL || !access_rok(u_cts, sizeof(*u_cts)))
+		return -EFAULT;
+
+	if (__xn_get_user(cts.tv_sec, &u_cts->tv_sec) ||
+		__xn_get_user(cts.tv_nsec, &u_cts->tv_nsec))
+		return -EFAULT;
+
+	ts->tv_sec = cts.tv_sec;
+	ts->tv_nsec = cts.tv_nsec;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timespec);
+
+int sys32_put_timespec(struct old_timespec32 __user *u_cts,
+		       const struct timespec64 *ts)
+{
+	struct old_timespec32 cts;
+
+	if (u_cts == NULL || !access_wok(u_cts, sizeof(*u_cts)))
+		return -EFAULT;
+
+	cts.tv_sec = ts->tv_sec;
+	cts.tv_nsec = ts->tv_nsec;
+
+	if (__xn_put_user(cts.tv_sec, &u_cts->tv_sec) ||
+	    __xn_put_user(cts.tv_nsec, &u_cts->tv_nsec))
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timespec);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits)
+{
+	int ret = sys32_get_timespec(&its->it_value, &cits->it_value);
+
+	return ret ?: sys32_get_timespec(&its->it_interval, &cits->it_interval);
+}
+EXPORT_SYMBOL_GPL(sys32_get_itimerspec);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its)
+{
+	int ret = sys32_put_timespec(&cits->it_value, &its->it_value);
+
+	return ret ?: sys32_put_timespec(&cits->it_interval, &its->it_interval);
+}
+EXPORT_SYMBOL_GPL(sys32_put_itimerspec);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv)
+{
+	return (ctv == NULL ||
+		!access_rok(ctv, sizeof(*ctv)) ||
+		__xn_get_user(tv->tv_sec, &ctv->tv_sec) ||
+		__xn_get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timeval);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv)
+{
+	return (ctv == NULL ||
+		!access_wok(ctv, sizeof(*ctv)) ||
+		__xn_put_user(tv->tv_sec, &ctv->tv_sec) ||
+		__xn_put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timeval);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx)
+{
+	struct __kernel_old_timeval time;
+	int ret;
+
+	memset(tx, 0, sizeof(*tx));
+
+	ret = sys32_get_timeval(&time, &ctx->time);
+	if (ret)
+		return ret;
+
+	tx->time.tv_sec = time.tv_sec;
+	tx->time.tv_usec = time.tv_usec;
+
+	if (!access_rok(ctx, sizeof(*ctx)) ||
+	    __xn_get_user(tx->modes, &ctx->modes) ||
+	    __xn_get_user(tx->offset, &ctx->offset) ||
+	    __xn_get_user(tx->freq, &ctx->freq) ||
+	    __xn_get_user(tx->maxerror, &ctx->maxerror) ||
+	    __xn_get_user(tx->esterror, &ctx->esterror) ||
+	    __xn_get_user(tx->status, &ctx->status) ||
+	    __xn_get_user(tx->constant, &ctx->constant) ||
+	    __xn_get_user(tx->precision, &ctx->precision) ||
+	    __xn_get_user(tx->tolerance, &ctx->tolerance) ||
+	    __xn_get_user(tx->tick, &ctx->tick) ||
+	    __xn_get_user(tx->ppsfreq, &ctx->ppsfreq) ||
+	    __xn_get_user(tx->jitter, &ctx->jitter) ||
+	    __xn_get_user(tx->shift, &ctx->shift) ||
+	    __xn_get_user(tx->stabil, &ctx->stabil) ||
+	    __xn_get_user(tx->jitcnt, &ctx->jitcnt) ||
+	    __xn_get_user(tx->calcnt, &ctx->calcnt) ||
+	    __xn_get_user(tx->errcnt, &ctx->errcnt) ||
+	    __xn_get_user(tx->stbcnt, &ctx->stbcnt))
+	  return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timex);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx)
+{
+	struct __kernel_old_timeval time;
+	int ret;
+
+	time.tv_sec = tx->time.tv_sec;
+	time.tv_usec = tx->time.tv_usec;
+
+	ret = sys32_put_timeval(&ctx->time, &time);
+	if (ret)
+		return ret;
+
+	if (!access_wok(ctx, sizeof(*ctx)) ||
+	    __xn_put_user(tx->modes, &ctx->modes) ||
+	    __xn_put_user(tx->offset, &ctx->offset) ||
+	    __xn_put_user(tx->freq, &ctx->freq) ||
+	    __xn_put_user(tx->maxerror, &ctx->maxerror) ||
+	    __xn_put_user(tx->esterror, &ctx->esterror) ||
+	    __xn_put_user(tx->status, &ctx->status) ||
+	    __xn_put_user(tx->constant, &ctx->constant) ||
+	    __xn_put_user(tx->precision, &ctx->precision) ||
+	    __xn_put_user(tx->tolerance, &ctx->tolerance) ||
+	    __xn_put_user(tx->tick, &ctx->tick) ||
+	    __xn_put_user(tx->ppsfreq, &ctx->ppsfreq) ||
+	    __xn_put_user(tx->jitter, &ctx->jitter) ||
+	    __xn_put_user(tx->shift, &ctx->shift) ||
+	    __xn_put_user(tx->stabil, &ctx->stabil) ||
+	    __xn_put_user(tx->jitcnt, &ctx->jitcnt) ||
+	    __xn_put_user(tx->calcnt, &ctx->calcnt) ||
+	    __xn_put_user(tx->errcnt, &ctx->errcnt) ||
+	    __xn_put_user(tx->stbcnt, &ctx->stbcnt))
+	  return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timex);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize)
+{
+	int rdpos, wrpos, rdlim = cfdsize / sizeof(compat_ulong_t);
+
+	if (cfds == NULL || !access_rok(cfds, cfdsize))
+		return -EFAULT;
+
+	for (rdpos = 0, wrpos = 0; rdpos < rdlim; rdpos++, wrpos++)
+		if (__xn_get_user(fds->fds_bits[wrpos], cfds->fds_bits + rdpos))
+			return -EFAULT;
+
+	return 0;
+}
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize)
+{
+	int rdpos, wrpos, wrlim = fdsize / sizeof(long);
+
+	if (cfds == NULL || !access_wok(cfds, wrlim * sizeof(compat_ulong_t)))
+		return -EFAULT;
+
+	for (rdpos = 0, wrpos = 0; wrpos < wrlim; rdpos++, wrpos++)
+		if (__xn_put_user(fds->fds_bits[rdpos], cfds->fds_bits + wrpos))
+			return -EFAULT;
+
+	return 0;
+}
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp)
+{
+	struct compat_sched_param_ex cpex;
+
+	if (u_cp == NULL || cobalt_copy_from_user(&cpex, u_cp, sizeof(cpex)))
+		return -EFAULT;
+
+	p->sched_priority = cpex.sched_priority;
+
+	switch (policy) {
+	case SCHED_SPORADIC:
+		p->sched_ss_low_priority = cpex.sched_ss_low_priority;
+		p->sched_ss_max_repl = cpex.sched_ss_max_repl;
+		p->sched_ss_repl_period.tv_sec = cpex.sched_ss_repl_period.tv_sec;
+		p->sched_ss_repl_period.tv_nsec = cpex.sched_ss_repl_period.tv_nsec;
+		p->sched_ss_init_budget.tv_sec = cpex.sched_ss_init_budget.tv_sec;
+		p->sched_ss_init_budget.tv_nsec = cpex.sched_ss_init_budget.tv_nsec;
+		break;
+	case SCHED_RR:
+		p->sched_rr_quantum.tv_sec = cpex.sched_rr_quantum.tv_sec;
+		p->sched_rr_quantum.tv_nsec = cpex.sched_rr_quantum.tv_nsec;
+		break;
+	case SCHED_TP:
+		p->sched_tp_partition = cpex.sched_tp_partition;
+		break;
+	case SCHED_QUOTA:
+		p->sched_quota_group = cpex.sched_quota_group;
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_param_ex);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p)
+{
+	struct compat_sched_param_ex cpex;
+
+	if (u_cp == NULL)
+		return -EFAULT;
+
+	cpex.sched_priority = p->sched_priority;
+
+	switch (policy) {
+	case SCHED_SPORADIC:
+		cpex.sched_ss_low_priority = p->sched_ss_low_priority;
+		cpex.sched_ss_max_repl = p->sched_ss_max_repl;
+		cpex.sched_ss_repl_period.tv_sec = p->sched_ss_repl_period.tv_sec;
+		cpex.sched_ss_repl_period.tv_nsec = p->sched_ss_repl_period.tv_nsec;
+		cpex.sched_ss_init_budget.tv_sec = p->sched_ss_init_budget.tv_sec;
+		cpex.sched_ss_init_budget.tv_nsec = p->sched_ss_init_budget.tv_nsec;
+		break;
+	case SCHED_RR:
+		cpex.sched_rr_quantum.tv_sec = p->sched_rr_quantum.tv_sec;
+		cpex.sched_rr_quantum.tv_nsec = p->sched_rr_quantum.tv_nsec;
+		break;
+	case SCHED_TP:
+		cpex.sched_tp_partition = p->sched_tp_partition;
+		break;
+	case SCHED_QUOTA:
+		cpex.sched_quota_group = p->sched_quota_group;
+		break;
+	}
+
+	return cobalt_copy_to_user(u_cp, &cpex, sizeof(cpex));
+}
+EXPORT_SYMBOL_GPL(sys32_put_param_ex);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap)
+{
+	struct compat_mq_attr cattr;
+
+	if (u_cap == NULL ||
+	    cobalt_copy_from_user(&cattr, u_cap, sizeof(cattr)))
+		return -EFAULT;
+
+	ap->mq_flags = cattr.mq_flags;
+	ap->mq_maxmsg = cattr.mq_maxmsg;
+	ap->mq_msgsize = cattr.mq_msgsize;
+	ap->mq_curmsgs = cattr.mq_curmsgs;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_mqattr);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap)
+{
+	struct compat_mq_attr cattr;
+
+	cattr.mq_flags = ap->mq_flags;
+	cattr.mq_maxmsg = ap->mq_maxmsg;
+	cattr.mq_msgsize = ap->mq_msgsize;
+	cattr.mq_curmsgs = ap->mq_curmsgs;
+
+	return u_cap == NULL ? -EFAULT :
+		cobalt_copy_to_user(u_cap, &cattr, sizeof(cattr));
+}
+EXPORT_SYMBOL_GPL(sys32_put_mqattr);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev)
+{
+	struct compat_sigevent cev;
+	compat_int_t *cp;
+	int ret, *p;
+
+	if (u_cev == NULL)
+		return -EFAULT;
+
+	ret = cobalt_copy_from_user(&cev, u_cev, sizeof(cev));
+	if (ret)
+		return ret;
+
+	memset(ev, 0, sizeof(*ev));
+	ev->sigev_value.sival_ptr = compat_ptr(cev.sigev_value.sival_ptr);
+	ev->sigev_signo = cev.sigev_signo;
+	ev->sigev_notify = cev.sigev_notify;
+	/*
+	 * Extensions may define extra fields we don't know about in
+	 * the padding area, so we have to load it entirely.
+	 */
+	p = ev->_sigev_un._pad;
+	cp = cev._sigev_un._pad;
+	while (p < &ev->_sigev_un._pad[ARRAY_SIZE(ev->_sigev_un._pad)] &&
+	       cp < &cev._sigev_un._pad[ARRAY_SIZE(cev._sigev_un._pad)])
+		*p++ = *cp++;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigevent);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset)
+{
+#ifdef __BIG_ENDIAN
+	compat_sigset_t v;
+
+	if (cobalt_copy_from_user(&v, u_cset, sizeof(compat_sigset_t)))
+		return -EFAULT;
+	switch (_NSIG_WORDS) {
+	case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 );
+	case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 );
+	case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 );
+	case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 );
+	}
+#else
+	if (cobalt_copy_from_user(set, u_cset, sizeof(compat_sigset_t)))
+		return -EFAULT;
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set)
+{
+#ifdef __BIG_ENDIAN
+	compat_sigset_t v;
+	switch (_NSIG_WORDS) {
+	case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+	case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+	case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+	case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+	}
+	return cobalt_copy_to_user(u_cset, &v, sizeof(*u_cset)) ? -EFAULT : 0;
+#else
+	return cobalt_copy_to_user(u_cset, set, sizeof(*u_cset)) ? -EFAULT : 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(sys32_put_sigset);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval)
+{
+	union compat_sigval cval;
+	int ret;
+
+	if (u_cval == NULL)
+		return -EFAULT;
+
+	ret = cobalt_copy_from_user(&cval, u_cval, sizeof(cval));
+	if (ret)
+		return ret;
+
+	val->sival_ptr = compat_ptr(cval.sival_ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun)
+{
+	struct compat_siginfo __user *u_p = u_si;
+	int ret;
+
+	if (u_p == NULL)
+		return -EFAULT;
+
+	ret = __xn_put_user(si->si_signo, &u_p->si_signo);
+	ret |= __xn_put_user(si->si_errno, &u_p->si_errno);
+	ret |= __xn_put_user(si->si_code, &u_p->si_code);
+
+	/*
+	 * Copy the generic/standard siginfo bits to userland.
+	 */
+	switch (si->si_code) {
+	case SI_TIMER:
+		ret |= __xn_put_user(si->si_tid, &u_p->si_tid);
+		ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr);
+		ret |= __xn_put_user(overrun, &u_p->si_overrun);
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr);
+		fallthrough;
+	case SI_USER:
+		ret |= __xn_put_user(si->si_pid, &u_p->si_pid);
+		ret |= __xn_put_user(si->si_uid, &u_p->si_uid);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sys32_put_siginfo);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg)
+{
+	compat_uptr_t tmp1, tmp2, tmp3;
+
+	if (u_cmsg == NULL ||
+	    !access_rok(u_cmsg, sizeof(*u_cmsg)) ||
+	    __xn_get_user(tmp1, &u_cmsg->msg_name) ||
+	    __xn_get_user(msg->msg_namelen, &u_cmsg->msg_namelen) ||
+	    __xn_get_user(tmp2, &u_cmsg->msg_iov) ||
+	    __xn_get_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) ||
+	    __xn_get_user(tmp3, &u_cmsg->msg_control) ||
+	    __xn_get_user(msg->msg_controllen, &u_cmsg->msg_controllen) ||
+	    __xn_get_user(msg->msg_flags, &u_cmsg->msg_flags))
+		return -EFAULT;
+
+	if (msg->msg_namelen > sizeof(struct sockaddr_storage))
+		msg->msg_namelen = sizeof(struct sockaddr_storage);
+
+	msg->msg_name = compat_ptr(tmp1);
+	msg->msg_iov = compat_ptr(tmp2);
+	msg->msg_control = compat_ptr(tmp3);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_msghdr);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg)
+{
+	if (u_cmmsg == NULL ||
+	    !access_rok(u_cmmsg, sizeof(*u_cmmsg)) ||
+	    __xn_get_user(mmsg->msg_len, &u_cmmsg->msg_len))
+		return -EFAULT;
+
+	return sys32_get_msghdr(&mmsg->msg_hdr, &u_cmmsg->msg_hdr);
+}
+EXPORT_SYMBOL_GPL(sys32_get_mmsghdr);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg)
+{
+	if (u_cmsg == NULL ||
+	    !access_wok(u_cmsg, sizeof(*u_cmsg)) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_name), &u_cmsg->msg_name) ||
+	    __xn_put_user(msg->msg_namelen, &u_cmsg->msg_namelen) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_iov), &u_cmsg->msg_iov) ||
+	    __xn_put_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_control), &u_cmsg->msg_control) ||
+	    __xn_put_user(msg->msg_controllen, &u_cmsg->msg_controllen) ||
+	    __xn_put_user(msg->msg_flags, &u_cmsg->msg_flags))
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_msghdr);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg)
+{
+	if (u_cmmsg == NULL ||
+	    !access_wok(u_cmmsg, sizeof(*u_cmmsg)) ||
+	    __xn_put_user(mmsg->msg_len, &u_cmmsg->msg_len))
+		return -EFAULT;
+
+	return sys32_put_msghdr(&u_cmmsg->msg_hdr, &mmsg->msg_hdr);
+}
+EXPORT_SYMBOL_GPL(sys32_put_mmsghdr);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *u_ciov,
+		    int ciovlen)
+{
+	const struct compat_iovec __user *p;
+	struct compat_iovec ciov;
+	int ret, n;
+
+	for (n = 0, p = u_ciov; n < ciovlen; n++, p++) {
+		ret = cobalt_copy_from_user(&ciov, p, sizeof(ciov));
+		if (ret)
+			return ret;
+		iov[n].iov_base = compat_ptr(ciov.iov_base);
+		iov[n].iov_len = ciov.iov_len;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_iovec);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen)
+{
+	struct compat_iovec __user *p;
+	struct compat_iovec ciov;
+	int ret, n;
+
+	for (n = 0, p = u_ciov; n < iovlen; n++, p++) {
+		ciov.iov_base = ptr_to_compat(iov[n].iov_base);
+		ciov.iov_len = iov[n].iov_len;
+		ret = cobalt_copy_to_user(p, &ciov, sizeof(*p));
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_iovec);
+++ linux-patched/kernel/xenomai/posix/event.h	2022-03-21 12:58:28.998892774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/memory.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_EVENT_H
+#define _COBALT_POSIX_EVENT_H
+
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/event.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_event {
+	unsigned int magic;
+	unsigned int value;
+	int flags;
+	struct xnsynch synch;
+	struct cobalt_event_state *state;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
+			unsigned int bits,
+			unsigned int __user *u_bits_r,
+			int mode, const struct timespec64 *ts);
+
+int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits, unsigned int __user *u_bits_r,
+			  int mode,
+			  const struct __kernel_timespec __user *u_ts);
+
+COBALT_SYSCALL_DECL(event_init,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int value,
+		     int flags));
+
+COBALT_SYSCALL_DECL(event_wait,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(event_wait64,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(event_sync,
+		    (struct cobalt_event_shadow __user *u_evtsh));
+
+COBALT_SYSCALL_DECL(event_destroy,
+		    (struct cobalt_event_shadow __user *u_evtsh));
+
+COBALT_SYSCALL_DECL(event_inquire,
+		    (struct cobalt_event_shadow __user *u_event,
+		     struct cobalt_event_info __user *u_info,
+		     pid_t __user *u_waitlist,
+		     size_t waitsz));
+
+void cobalt_event_reclaim(struct cobalt_resnode *node,
+			  spl_t s);
+
+#endif /* !_COBALT_POSIX_EVENT_H */
+++ linux-patched/kernel/xenomai/posix/memory.c	2022-03-21 12:58:28.995892803 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall32.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <rtdm/driver.h>
+#include <cobalt/kernel/vdso.h>
+#include "process.h"
+#include "memory.h"
+
+#define UMM_PRIVATE  0	/* Per-process user-mapped memory heap */
+#define UMM_SHARED   1	/* Shared user-mapped memory heap */
+#define SYS_GLOBAL   2	/* System heap (not mmapped) */
+
+struct xnvdso *nkvdso;
+EXPORT_SYMBOL_GPL(nkvdso);
+
+static void umm_vmopen(struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm = vma->vm_private_data;
+
+	atomic_inc(&umm->refcount);
+}
+
+static void umm_vmclose(struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm = vma->vm_private_data;
+
+	cobalt_umm_destroy(umm);
+}
+
+static struct vm_operations_struct umm_vmops = {
+	.open = umm_vmopen,
+	.close = umm_vmclose,
+};
+
+static struct cobalt_umm *umm_from_fd(struct rtdm_fd *fd)
+{
+	struct cobalt_process *process;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return NULL;
+
+	if (rtdm_fd_minor(fd) == UMM_PRIVATE)
+		return &process->sys_ppd.umm;
+
+	return &cobalt_kernel_ppd.umm;
+}
+
+static int umm_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm;
+	size_t len;
+	int ret;
+
+	umm = umm_from_fd(fd);
+	if (fd == NULL)
+		return -ENODEV;
+
+	len = vma->vm_end - vma->vm_start;
+	if (len != xnheap_get_size(&umm->heap))
+		return -EINVAL;
+
+	vma->vm_private_data = umm;
+	vma->vm_ops = &umm_vmops;
+	if (xnarch_cache_aliasing())
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	ret = rtdm_mmap_vmem(vma, xnheap_get_membase(&umm->heap));
+	if (ret)
+		return ret;
+
+	atomic_inc(&umm->refcount);
+
+	return 0;
+}
+
+#ifndef CONFIG_MMU
+static unsigned long umm_get_unmapped_area(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags)
+{
+	struct cobalt_umm *umm;
+
+	umm = umm_from_fd(fd);
+	if (umm == NULL)
+		return -ENODEV;
+
+	if (pgoff == 0)
+		return (unsigned long)xnheap_get_membase(&umm->heap);
+
+	return pgoff << PAGE_SHIFT;
+}
+#else
+#define umm_get_unmapped_area	NULL
+#endif
+
+static int stat_umm(struct rtdm_fd *fd,
+		    struct cobalt_umm __user *u_stat)
+{
+	struct cobalt_memdev_stat stat;
+	struct cobalt_umm *umm;
+	spl_t s;
+	
+	umm = umm_from_fd(fd);
+	if (umm == NULL)
+		return -ENODEV;
+
+	xnlock_get_irqsave(&umm->heap.lock, s);
+	stat.size = xnheap_get_size(&umm->heap);
+	stat.free = xnheap_get_free(&umm->heap);
+	xnlock_put_irqrestore(&umm->heap.lock, s);
+
+	return rtdm_safe_copy_to_user(fd, u_stat, &stat, sizeof(stat));
+}
+
+static int do_umm_ioctls(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg)
+{
+	int ret;
+
+	switch (request) {
+	case MEMDEV_RTIOC_STAT:
+		ret = stat_umm(fd, arg);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int umm_ioctl_rt(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg)
+{
+	return do_umm_ioctls(fd, request, arg);
+}
+
+static int umm_ioctl_nrt(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg)
+{
+	return do_umm_ioctls(fd, request, arg);
+}
+
+static int sysmem_open(struct rtdm_fd *fd, int oflags)
+{
+	if ((oflags & O_ACCMODE) != O_RDONLY)
+		return -EACCES;
+
+	return 0;
+}
+
+static int do_sysmem_ioctls(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct cobalt_memdev_stat stat;
+	spl_t s;
+	int ret;
+
+	switch (request) {
+	case MEMDEV_RTIOC_STAT:
+		xnlock_get_irqsave(&cobalt_heap.lock, s);
+		stat.size = xnheap_get_size(&cobalt_heap);
+		stat.free = xnheap_get_free(&cobalt_heap);
+		xnlock_put_irqrestore(&cobalt_heap.lock, s);
+		ret = rtdm_safe_copy_to_user(fd, arg, &stat, sizeof(stat));
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int sysmem_ioctl_rt(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+	return do_sysmem_ioctls(fd, request, arg);
+}
+
+static int sysmem_ioctl_nrt(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+	return do_sysmem_ioctls(fd, request, arg);
+}
+
+static struct rtdm_driver umm_driver = {
+	.profile_info	=	RTDM_PROFILE_INFO(umm,
+						  RTDM_CLASS_MEMORY,
+						  RTDM_SUBCLASS_GENERIC,
+						  0),
+	.device_flags	=	RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR,
+	.device_count	=	2,
+	.ops = {
+		.ioctl_rt		=	umm_ioctl_rt,
+		.ioctl_nrt		=	umm_ioctl_nrt,
+		.mmap			=	umm_mmap,
+		.get_unmapped_area	=	umm_get_unmapped_area,
+	},
+};
+
+static struct rtdm_device umm_devices[] = {
+	[ UMM_PRIVATE ] = {
+		.driver = &umm_driver,
+		.label = COBALT_MEMDEV_PRIVATE,
+		.minor = UMM_PRIVATE,
+	},
+	[ UMM_SHARED ] = {
+		.driver = &umm_driver,
+		.label = COBALT_MEMDEV_SHARED,
+		.minor = UMM_SHARED,
+	},
+};
+
+static struct rtdm_driver sysmem_driver = {
+	.profile_info	=	RTDM_PROFILE_INFO(sysmem,
+						  RTDM_CLASS_MEMORY,
+						  SYS_GLOBAL,
+						  0),
+	.device_flags	=	RTDM_NAMED_DEVICE,
+	.device_count	=	1,
+	.ops = {
+		.open		=	sysmem_open,
+		.ioctl_rt	=	sysmem_ioctl_rt,
+		.ioctl_nrt	=	sysmem_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device sysmem_device = {
+	.driver = &sysmem_driver,
+	.label = COBALT_MEMDEV_SYS,
+};
+
+static inline void init_vdso(void)
+{
+	nkvdso->features = XNVDSO_FEATURES;
+	nkvdso->wallclock_offset = nkclock.wallclock_offset;
+}
+
+int cobalt_memdev_init(void)
+{
+	int ret;
+
+	ret = cobalt_umm_init(&cobalt_kernel_ppd.umm,
+			      CONFIG_XENO_OPT_SHARED_HEAPSZ * 1024, NULL);
+	if (ret)
+		return ret;
+
+	cobalt_umm_set_name(&cobalt_kernel_ppd.umm, "shared heap");
+
+	nkvdso = cobalt_umm_alloc(&cobalt_kernel_ppd.umm, sizeof(*nkvdso));
+	if (nkvdso == NULL) {
+		ret = -ENOMEM;
+		goto fail_vdso;
+	}
+
+	init_vdso();
+
+	ret = rtdm_dev_register(umm_devices + UMM_PRIVATE);
+	if (ret)
+		goto fail_private;
+
+	ret = rtdm_dev_register(umm_devices + UMM_SHARED);
+	if (ret)
+		goto fail_shared;
+
+	ret = rtdm_dev_register(&sysmem_device);
+	if (ret)
+		goto fail_sysmem;
+
+	return 0;
+
+fail_sysmem:
+	rtdm_dev_unregister(umm_devices + UMM_SHARED);
+fail_shared:
+	rtdm_dev_unregister(umm_devices + UMM_PRIVATE);
+fail_private:
+	cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso);
+fail_vdso:
+	cobalt_umm_destroy(&cobalt_kernel_ppd.umm);
+
+	return ret;
+}
+
+void cobalt_memdev_cleanup(void)
+{
+	rtdm_dev_unregister(&sysmem_device);
+	rtdm_dev_unregister(umm_devices + UMM_SHARED);
+	rtdm_dev_unregister(umm_devices + UMM_PRIVATE);
+	cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso);
+	cobalt_umm_destroy(&cobalt_kernel_ppd.umm);
+}
+
+int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
+		    void (*release)(struct cobalt_umm *umm))
+{
+	void *basemem;
+	int ret;
+
+	secondary_mode_only();
+
+	/* We don't support CPUs with VIVT caches and the like. */
+	BUG_ON(xnarch_cache_aliasing());
+
+	size = PAGE_ALIGN(size);
+	basemem = vmalloc_kernel(size, __GFP_ZERO);
+	if (basemem == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&umm->heap, basemem, size);
+	if (ret) {
+		vfree(basemem);
+		return ret;
+	}
+
+	umm->release = release;
+	atomic_set(&umm->refcount, 1);
+	smp_mb();
+
+	return 0;
+}
+
+void cobalt_umm_destroy(struct cobalt_umm *umm)
+{
+	secondary_mode_only();
+
+	if (atomic_dec_and_test(&umm->refcount)) {
+		xnheap_destroy(&umm->heap);
+		vfree(xnheap_get_membase(&umm->heap));
+		if (umm->release)
+			umm->release(umm);
+	}
+}
+++ linux-patched/kernel/xenomai/posix/syscall32.c	2022-03-21 12:58:28.991892842 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/clock.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <cobalt/uapi/syscall.h>
+#include <cobalt/kernel/time.h>
+#include <xenomai/rtdm/internal.h>
+#include "internal.h"
+#include "syscall32.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "sem.h"
+#include "sched.h"
+#include "clock.h"
+#include "timer.h"
+#include "timerfd.h"
+#include "signal.h"
+#include "monitor.h"
+#include "event.h"
+#include "mqueue.h"
+#include "io.h"
+#include "../debug.h"
+
+COBALT_SYSCALL32emu(thread_create, init,
+		    (compat_ulong_t pth,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     int xid,
+		     __u32 __user *u_winoff))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
+COBALT_SYSCALL32emu(thread_setschedparam_ex, conforming,
+		    (compat_ulong_t pth,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_thread_setschedparam_ex(pth, policy, &param_ex,
+					      u_winoff, u_promoted);
+}
+
+COBALT_SYSCALL32emu(thread_getschedparam_ex, current,
+		    (compat_ulong_t pth,
+		     int __user *u_policy,
+		     struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_thread_getschedparam_ex(pth, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+
+	return ret ?: sys32_put_param_ex(policy, u_param, &param_ex);
+}
+
+COBALT_SYSCALL32emu(thread_setschedprio, conforming,
+		    (compat_ulong_t pth,
+		     int prio,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted);
+}
+
+static inline int sys32_fetch_timeout(struct timespec64 *ts,
+				      const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT :
+		sys32_get_timespec(ts, u_ts);
+}
+
+COBALT_SYSCALL32emu(sem_open, lostage,
+		    (compat_uptr_t __user *u_addrp,
+		     const char __user *u_name,
+		     int oflags, mode_t mode, unsigned int value))
+{
+	struct cobalt_sem_shadow __user *usm;
+	compat_uptr_t cusm;
+
+	if (__xn_get_user(cusm, u_addrp))
+		return -EFAULT;
+
+	usm = __cobalt_sem_open(compat_ptr(cusm), u_name, oflags, mode, value);
+	if (IS_ERR(usm))
+		return PTR_ERR(usm);
+
+	return __xn_put_user(ptr_to_compat(usm), u_addrp) ? -EFAULT : 0;
+}
+
+COBALT_SYSCALL32emu(sem_timedwait, primary,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct old_timespec32 __user *u_ts))
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = sys32_fetch_timeout(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+COBALT_SYSCALL32emu(sem_timedwait64, primary,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_sem_timedwait64(u_sem, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_getres, current,
+		    (clockid_t clock_id,
+		     struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	return u_ts ? sys32_put_timespec(u_ts, &ts) : 0;
+}
+
+COBALT_SYSCALL32emu(clock_getres64, current,
+		    (clockid_t clock_id,
+		     struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_getres64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_gettime, current,
+		    (clockid_t clock_id,
+		     struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	return sys32_put_timespec(u_ts, &ts);
+}
+
+COBALT_SYSCALL32emu(clock_gettime64, current,
+		    (clockid_t clock_id,
+		     struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_gettime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_settime, current,
+		    (clockid_t clock_id,
+		     const struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = sys32_get_timespec(&ts, u_ts);
+	if (ret)
+		return ret;
+
+	return __cobalt_clock_settime(clock_id, &ts);
+}
+
+COBALT_SYSCALL32emu(clock_settime64, current,
+		    (clockid_t clock_id,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_settime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_adjtime, current,
+		    (clockid_t clock_id, struct old_timex32 __user *u_tx))
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	ret = sys32_get_timex(&tx, u_tx);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return sys32_put_timex(u_tx, &tx);
+}
+
+COBALT_SYSCALL32emu(clock_adjtime64, current,
+		    (clockid_t clock_id, struct __kernel_timex __user *u_tx))
+{
+	return __cobalt_clock_adjtime64(clock_id, u_tx);
+}
+
+
+COBALT_SYSCALL32emu(clock_nanosleep, primary,
+		    (clockid_t clock_id, int flags,
+		     const struct old_timespec32 __user *u_rqt,
+		     struct old_timespec32 __user *u_rmt))
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	ret = sys32_get_timespec(&rqt, u_rqt);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp)
+		ret = sys32_put_timespec(u_rmt, rmtp);
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(clock_nanosleep64, nonrestartable,
+		    (clockid_t clock_id, int flags,
+		     const struct __kernel_timespec __user *u_rqt,
+		     struct __kernel_timespec __user *u_rmt))
+{
+	return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt);
+}
+
+
+COBALT_SYSCALL32emu(mutex_timedlock, primary,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts, sys32_fetch_timeout);
+}
+
+COBALT_SYSCALL32emu(mutex_timedlock64, primary,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock64(u_mx, u_ts);
+}
+
+COBALT_SYSCALL32emu(cond_wait_prologue, nonrestartable,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx,
+		     int *u_err,
+		     unsigned int timed,
+		     struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts,
+					   timed ? sys32_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL32emu(mq_open, lostage,
+		    (const char __user *u_name, int oflags,
+		     mode_t mode, struct compat_mq_attr __user *u_attr))
+{
+	struct mq_attr _attr, *attr = &_attr;
+	int ret;
+
+	if ((oflags & O_CREAT) && u_attr) {
+		ret = sys32_get_mqattr(&_attr, u_attr);
+		if (ret)
+			return ret;
+	} else
+		attr = NULL;
+
+	return __cobalt_mq_open(u_name, oflags, mode, attr);
+}
+
+COBALT_SYSCALL32emu(mq_getattr, current,
+		    (mqd_t uqd, struct compat_mq_attr __user *u_attr))
+{
+	struct mq_attr attr;
+	int ret;
+
+	ret = __cobalt_mq_getattr(uqd, &attr);
+	if (ret)
+		return ret;
+
+	return sys32_put_mqattr(u_attr, &attr);
+}
+
+COBALT_SYSCALL32emu(mq_timedsend, primary,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio,
+				     u_ts, u_ts ? sys32_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL32emu(mq_timedsend64, primary,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts);
+}
+
+COBALT_SYSCALL32emu(mq_timedreceive, primary,
+		    (mqd_t uqd, void __user *u_buf,
+		     compat_ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct old_timespec32 __user *u_ts))
+{
+	compat_ssize_t clen;
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&clen, u_len, sizeof(*u_len));
+	if (ret)
+		return ret;
+
+	len = clen;
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio,
+				       u_ts, u_ts ? sys32_fetch_timeout : NULL);
+	clen = len;
+
+	return ret ?: cobalt_copy_to_user(u_len, &clen, sizeof(*u_len));
+}
+
+COBALT_SYSCALL32emu(mq_timedreceive64, primary,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedreceive64(uqd, u_buf, u_len, u_prio, u_ts);
+}
+
+static inline int mq_fetch_timeout(struct timespec64 *ts,
+				   const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+COBALT_SYSCALL32emu(mq_notify, primary,
+		    (mqd_t fd, const struct compat_sigevent *__user u_cev))
+{
+	struct sigevent sev;
+	int ret;
+
+	if (u_cev) {
+		ret = sys32_get_sigevent(&sev, u_cev);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_mq_notify(fd, u_cev ? &sev : NULL);
+}
+
+COBALT_SYSCALL32emu(sched_weightprio, current,
+		    (int policy,
+		     const struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param);
+	if (ret)
+		return ret;
+
+	return __cobalt_sched_weightprio(policy, &param_ex);
+}
+
+static union sched_config *
+sys32_fetch_config(int policy, const void __user *u_config, size_t *len)
+{
+	union compat_sched_config *cbuf;
+	union sched_config *buf;
+	int ret, n;
+
+	if (u_config == NULL)
+		return ERR_PTR(-EFAULT);
+
+	if (policy == SCHED_QUOTA && *len < sizeof(cbuf->quota))
+		return ERR_PTR(-EINVAL);
+
+	cbuf = xnmalloc(*len);
+	if (cbuf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cobalt_copy_from_user(cbuf, u_config, *len);
+	if (ret) {
+		buf = ERR_PTR(ret);
+		goto out;
+	}
+
+	switch (policy) {
+	case SCHED_TP:
+		*len = sched_tp_confsz(cbuf->tp.nr_windows);
+		break;
+	case SCHED_QUOTA:
+		break;
+	default:
+		buf = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	buf = xnmalloc(*len);
+	if (buf == NULL) {
+		buf = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	if (policy == SCHED_QUOTA)
+		memcpy(&buf->quota, &cbuf->quota, sizeof(cbuf->quota));
+	else {
+		buf->tp.op = cbuf->tp.op;
+		buf->tp.nr_windows = cbuf->tp.nr_windows;
+		for (n = 0; n < buf->tp.nr_windows; n++) {
+			buf->tp.windows[n].ptid = cbuf->tp.windows[n].ptid;
+			buf->tp.windows[n].offset.tv_sec = cbuf->tp.windows[n].offset.tv_sec;
+			buf->tp.windows[n].offset.tv_nsec = cbuf->tp.windows[n].offset.tv_nsec;
+			buf->tp.windows[n].duration.tv_sec = cbuf->tp.windows[n].duration.tv_sec;
+			buf->tp.windows[n].duration.tv_nsec = cbuf->tp.windows[n].duration.tv_nsec;
+		}
+	}
+out:
+	xnfree(cbuf);
+
+	return buf;
+}
+
+static int sys32_ack_config(int policy, const union sched_config *config,
+			    void __user *u_config)
+{
+	union compat_sched_config __user *u_p = u_config;
+
+	if (policy != SCHED_QUOTA)
+		return 0;
+
+	return u_config == NULL ? -EFAULT :
+		cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+				       sizeof(u_p->quota.info));
+}
+
+static ssize_t sys32_put_config(int policy,
+				void __user *u_config, size_t u_len,
+				const union sched_config *config, size_t len)
+{
+	union compat_sched_config __user *u_p = u_config;
+	int n, ret;
+
+	if (u_config == NULL)
+		return -EFAULT;
+
+	if (policy == SCHED_QUOTA) {
+		if (u_len < sizeof(u_p->quota))
+			return -EINVAL;
+		return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+					      sizeof(u_p->quota.info)) ?:
+			sizeof(u_p->quota.info);
+	}
+
+	/* SCHED_TP */
+
+	if (u_len < compat_sched_tp_confsz(config->tp.nr_windows))
+		return -ENOSPC;
+
+	__xn_put_user(config->tp.op, &u_p->tp.op);
+	__xn_put_user(config->tp.nr_windows, &u_p->tp.nr_windows);
+
+	for (n = 0, ret = 0; n < config->tp.nr_windows; n++) {
+		ret |= __xn_put_user(config->tp.windows[n].ptid,
+				     &u_p->tp.windows[n].ptid);
+		ret |= __xn_put_user(config->tp.windows[n].offset.tv_sec,
+				     &u_p->tp.windows[n].offset.tv_sec);
+		ret |= __xn_put_user(config->tp.windows[n].offset.tv_nsec,
+				     &u_p->tp.windows[n].offset.tv_nsec);
+		ret |= __xn_put_user(config->tp.windows[n].duration.tv_sec,
+				     &u_p->tp.windows[n].duration.tv_sec);
+		ret |= __xn_put_user(config->tp.windows[n].duration.tv_nsec,
+				     &u_p->tp.windows[n].duration.tv_nsec);
+	}
+
+	return ret ?: u_len;
+}
+
+COBALT_SYSCALL32emu(sched_setconfig_np, conforming,
+		    (int cpu, int policy,
+		     union compat_sched_config __user *u_config,
+		     size_t len))
+{
+	return __cobalt_sched_setconfig_np(cpu, policy, u_config, len,
+					   sys32_fetch_config, sys32_ack_config);
+}
+
+COBALT_SYSCALL32emu(sched_getconfig_np, conformin,
+		    (int cpu, int policy,
+		     union compat_sched_config __user *u_config,
+		     size_t len))
+{
+	return __cobalt_sched_getconfig_np(cpu, policy, u_config, len,
+					   sys32_fetch_config, sys32_put_config);
+}
+
+COBALT_SYSCALL32emu(sched_setscheduler_ex, conforming,
+		    (compat_pid_t pid,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_sched_setscheduler_ex(pid, policy, &param_ex,
+					    u_winoff, u_promoted);
+}
+
+COBALT_SYSCALL32emu(sched_getscheduler_ex, current,
+		    (compat_pid_t pid,
+		     int __user *u_policy,
+		     struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_sched_getscheduler_ex(pid, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+
+	return ret ?: sys32_put_param_ex(policy, u_param, &param_ex);
+}
+
+COBALT_SYSCALL32emu(timer_create, current,
+		    (clockid_t clock,
+		     const struct compat_sigevent __user *u_sev,
+		     timer_t __user *u_tm))
+{
+	struct sigevent sev, *evp = NULL;
+	int ret;
+
+	if (u_sev) {
+		evp = &sev;
+		ret = sys32_get_sigevent(&sev, u_sev);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_timer_create(clock, evp, u_tm);
+}
+
+COBALT_SYSCALL32emu(timer_settime, primary,
+		    (timer_t tm, int flags,
+		     const struct old_itimerspec32 __user *u_newval,
+		     struct old_itimerspec32 __user *u_oldval))
+{
+	struct itimerspec64 newv, oldv, *oldvp = &oldv;
+	int ret;
+
+	if (u_oldval == NULL)
+		oldvp = NULL;
+
+	ret = sys32_get_itimerspec(&newv, u_newval);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timer_settime(tm, flags, &newv, oldvp);
+	if (ret)
+		return ret;
+
+	if (oldvp) {
+		ret = sys32_put_itimerspec(u_oldval, oldvp);
+		if (ret)
+			__cobalt_timer_settime(tm, flags, oldvp, NULL);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(timer_gettime, current,
+		    (timer_t tm, struct old_itimerspec32 __user *u_val))
+{
+	struct itimerspec64 val;
+	int ret;
+
+	ret = __cobalt_timer_gettime(tm, &val);
+
+	return ret ?: sys32_put_itimerspec(u_val, &val);
+}
+
+COBALT_SYSCALL32emu(timerfd_settime, primary,
+		    (int fd, int flags,
+		     const struct old_itimerspec32 __user *new_value,
+		     struct old_itimerspec32 __user *old_value))
+{
+	struct itimerspec64 ovalue, value;
+	int ret;
+
+	ret = sys32_get_itimerspec(&value, new_value);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue);
+	if (ret)
+		return ret;
+
+	if (old_value) {
+		ret = sys32_put_itimerspec(old_value, &ovalue);
+		value.it_value.tv_sec = 0;
+		value.it_value.tv_nsec = 0;
+		__cobalt_timerfd_settime(fd, flags, &value, NULL);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(timerfd_gettime, current,
+		    (int fd, struct old_itimerspec32 __user *curr_value))
+{
+	struct itimerspec64 value;
+	int ret;
+
+	ret = __cobalt_timerfd_gettime(fd, &value);
+
+	return ret ?: sys32_put_itimerspec(curr_value, &value);
+}
+
+COBALT_SYSCALL32emu(sigwait, primary,
+		    (const compat_sigset_t __user *u_set,
+		     int __user *u_sig))
+{
+	sigset_t set;
+	int ret, sig;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	sig = __cobalt_sigwait(&set);
+	if (sig < 0)
+		return sig;
+
+	return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig));
+}
+
+COBALT_SYSCALL32emu(sigtimedwait, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si,
+		     const struct old_timespec32 __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	ret = sys32_get_timespec(&timeout, u_timeout);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigtimedwait64, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si,
+		     const struct __kernel_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	ret = cobalt_get_timespec64(&timeout, u_timeout);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigwaitinfo, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si))
+{
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigwaitinfo(&set, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigpending, primary, (compat_old_sigset_t __user *u_set))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+
+	return sys32_put_sigset((compat_sigset_t *)u_set, &curr->sigpending);
+}
+
+COBALT_SYSCALL32emu(sigqueue, conforming,
+		    (pid_t pid, int sig,
+		     const union compat_sigval __user *u_value))
+{
+	union sigval val;
+	int ret;
+
+	ret = sys32_get_sigval(&val, u_value);
+
+	return ret ?: __cobalt_sigqueue(pid, sig, &val);
+}
+
+COBALT_SYSCALL32emu(monitor_wait, nonrestartable,
+		    (struct cobalt_monitor_shadow __user *u_mon,
+		     int event, const struct old_timespec32 __user *u_ts,
+		     int __user *u_ret))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = sys32_get_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL32emu(monitor_wait64, nonrestartable,
+		    (struct cobalt_monitor_shadow __user *u_mon, int event,
+		     const struct __kernel_timespec __user *u_ts,
+		     int __user *u_ret))
+{
+	return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret);
+}
+
+COBALT_SYSCALL32emu(event_wait, primary,
+		    (struct cobalt_event_shadow __user *u_event,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode, const struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = sys32_get_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL32emu(event_wait64, primary,
+		    (struct cobalt_event_shadow __user *u_event,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts);
+}
+
+COBALT_SYSCALL32emu(select, primary,
+		    (int nfds,
+		     compat_fd_set __user *u_rfds,
+		     compat_fd_set __user *u_wfds,
+		     compat_fd_set __user *u_xfds,
+		     struct old_timeval32 __user *u_tv))
+{
+	return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, true);
+}
+
+COBALT_SYSCALL32emu(recvmsg, handover,
+		    (int fd, struct compat_msghdr __user *umsg,
+		     int flags))
+{
+	struct user_msghdr m;
+	ssize_t ret;
+
+	ret = sys32_get_msghdr(&m, umsg);
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_recvmsg(fd, &m, flags);
+	if (ret < 0)
+		return ret;
+
+	return sys32_put_msghdr(umsg, &m) ?: ret;
+}
+
+static int get_timespec32(struct timespec64 *ts,
+			  const void __user *u_ts)
+{
+	return sys32_get_timespec(ts, u_ts);
+}
+
+static int get_mmsg32(struct mmsghdr *mmsg, void __user *u_mmsg)
+{
+	return sys32_get_mmsghdr(mmsg, u_mmsg);
+}
+
+static int put_mmsg32(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return sys32_put_mmsghdr(q, mmsg);
+}
+
+COBALT_SYSCALL32emu(recvmmsg, primary,
+	       (int ufd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct old_timespec32 *u_timeout))
+{
+	return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg32, put_mmsg32,
+				  get_timespec32);
+}
+
+COBALT_SYSCALL32emu(recvmmsg64, primary,
+		    (int ufd, struct compat_mmsghdr __user *u_msgvec,
+		     unsigned int vlen, unsigned int flags,
+		     struct __kernel_timespec *u_timeout))
+{
+	return __rtdm_fd_recvmmsg64(ufd, u_msgvec, vlen, flags, u_timeout,
+				    get_mmsg32, put_mmsg32);
+}
+
+COBALT_SYSCALL32emu(sendmsg, handover,
+		    (int fd, struct compat_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	int ret;
+
+	ret = sys32_get_msghdr(&m, umsg);
+
+	return ret ?: rtdm_fd_sendmsg(fd, &m, flags);
+}
+
+static int put_mmsglen32(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return __xn_put_user(mmsg->msg_len, &q->msg_len);
+}
+
+COBALT_SYSCALL32emu(sendmmsg, primary,
+		    (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags))
+{
+	return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags,
+				  get_mmsg32, put_mmsglen32);
+}
+
+COBALT_SYSCALL32emu(mmap, lostage,
+		    (int fd, struct compat_rtdm_mmap_request __user *u_crma,
+		     compat_uptr_t __user *u_caddrp))
+{
+	struct _rtdm_mmap_request rma;
+	compat_uptr_t u_caddr;
+	void *u_addr = NULL;
+	int ret;
+
+	if (u_crma == NULL ||
+	    !access_rok(u_crma, sizeof(*u_crma)) ||
+	    __xn_get_user(rma.length, &u_crma->length) ||
+	    __xn_get_user(rma.offset, &u_crma->offset) ||
+	    __xn_get_user(rma.prot, &u_crma->prot) ||
+	    __xn_get_user(rma.flags, &u_crma->flags))
+	  return -EFAULT;
+
+	ret = rtdm_fd_mmap(fd, &rma, &u_addr);
+	if (ret)
+		return ret;
+
+	u_caddr = ptr_to_compat(u_addr);
+
+	return cobalt_copy_to_user(u_caddrp, &u_caddr, sizeof(u_caddr));
+}
+
+COBALT_SYSCALL32emu(backtrace, current,
+		    (int nr, compat_ulong_t __user *u_backtrace,
+		     int reason))
+{
+	compat_ulong_t cbacktrace[SIGSHADOW_BACKTRACE_DEPTH];
+	unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	int ret, n;
+
+	if (nr <= 0)
+		return 0;
+
+	if (nr > SIGSHADOW_BACKTRACE_DEPTH)
+		nr = SIGSHADOW_BACKTRACE_DEPTH;
+
+	ret = cobalt_copy_from_user(cbacktrace, u_backtrace,
+				       nr * sizeof(compat_ulong_t));
+	if (ret)
+		return ret;
+
+	for (n = 0; n < nr; n++)
+		backtrace [n] = cbacktrace[n];
+
+	xndebug_trace_relax(nr, backtrace, reason);
+
+	return 0;
+}
+++ linux-patched/kernel/xenomai/posix/clock.c	2022-03-21 12:58:28.987892881 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/monitor.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/bitmap.h>
+#include <cobalt/kernel/clock.h>
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+static struct xnclock *external_clocks[COBALT_MAX_EXTCLOCKS];
+
+DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+
+#define do_ext_clock(__clock_id, __handler, __ret, __args...)	\
+({								\
+	struct xnclock *__clock;				\
+	int __val = 0, __nr;					\
+	spl_t __s;						\
+								\
+	if (!__COBALT_CLOCK_EXT_P(__clock_id))			\
+		__val = -EINVAL;				\
+	else {							\
+		__nr = __COBALT_CLOCK_EXT_INDEX(__clock_id);	\
+		xnlock_get_irqsave(&nklock, __s);		\
+		if (!test_bit(__nr, cobalt_clock_extids)) {	\
+			xnlock_put_irqrestore(&nklock, __s);	\
+			__val = -EINVAL;			\
+		} else {					\
+			__clock = external_clocks[__nr];	\
+			(__ret) = xnclock_ ## __handler(__clock, ##__args); \
+			xnlock_put_irqrestore(&nklock, __s);	\
+		}						\
+	}							\
+	__val;							\
+})
+
+int __cobalt_clock_getres(clockid_t clock_id, struct timespec64 *ts)
+{
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns2ts(ts, 1);
+		break;
+	default:
+		ret = do_ext_clock(clock_id, get_resolution, ns);
+		if (ret)
+			return ret;
+		ns2ts(ts, ns);
+	}
+
+	trace_cobalt_clock_getres(clock_id, ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_getres, current,
+	       (clockid_t clock_id, struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (u_ts && cobalt_put_u_timespec(u_ts, &ts))
+		return -EFAULT;
+
+	trace_cobalt_clock_getres(clock_id, &ts);
+
+	return 0;
+}
+
+int __cobalt_clock_getres64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_timespec64(&ts, u_ts))
+		return -EFAULT;
+
+	trace_cobalt_clock_getres(clock_id, &ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_getres64, current,
+	       (clockid_t clock_id, struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_getres64(clock_id, u_ts);
+}
+
+int __cobalt_clock_gettime(clockid_t clock_id, struct timespec64 *ts)
+{
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+		ns2ts(ts, xnclock_read_realtime(&nkclock));
+		break;
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns2ts(ts, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_HOST_REALTIME:
+		if (pipeline_get_host_time(ts) != 0)
+			return -EINVAL;
+		break;
+	default:
+		ret = do_ext_clock(clock_id, read_monotonic, ns);
+		if (ret)
+			return ret;
+		ns2ts(ts, ns);
+	}
+
+	trace_cobalt_clock_gettime(clock_id, ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_gettime, current,
+	       (clockid_t clock_id, struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_u_timespec(u_ts, &ts))
+		return -EFAULT;
+
+	return 0;
+}
+
+int __cobalt_clock_gettime64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_timespec64(&ts, u_ts))
+		return -EFAULT;
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_gettime64, current,
+	       (clockid_t clock_id, struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_gettime64(clock_id, u_ts);
+}
+
+int __cobalt_clock_settime(clockid_t clock_id, const struct timespec64 *ts)
+{
+	int _ret, ret = 0;
+
+	if ((unsigned long)ts->tv_nsec >= ONE_BILLION)
+		return -EINVAL;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+		ret = pipeline_set_wallclock(ts2ns(ts));
+		break;
+	default:
+		_ret = do_ext_clock(clock_id, set_time, ret, ts);
+		if (_ret || ret)
+			return _ret ?: ret;
+	}
+
+	trace_cobalt_clock_settime(clock_id, ts);
+
+	return ret;
+}
+
+int __cobalt_clock_adjtime(clockid_t clock_id, struct __kernel_timex *tx)
+{
+	int _ret, ret = 0;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+	case CLOCK_HOST_REALTIME:
+		return -EOPNOTSUPP;
+	default:
+		_ret = do_ext_clock(clock_id, adjust_time, ret, tx);
+		if (_ret || ret)
+			return _ret ?: ret;
+	}
+
+	trace_cobalt_clock_adjtime(clock_id, tx);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_settime, current,
+	       (clockid_t clock_id, const struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+
+	if (cobalt_get_u_timespec(&ts, u_ts))
+		return -EFAULT;
+
+	return __cobalt_clock_settime(clock_id, &ts);
+}
+
+int __cobalt_clock_settime64(clockid_t clock_id,
+			const struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts64;
+
+	if (cobalt_get_timespec64(&ts64, u_ts))
+		return -EFAULT;
+
+	return __cobalt_clock_settime(clock_id, &ts64);
+}
+
+COBALT_SYSCALL(clock_settime64, current,
+	       (clockid_t clock_id, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_settime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL(clock_adjtime, current,
+	       (clockid_t clock_id, struct __user_old_timex __user *u_tx))
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx)))
+		return -EFAULT;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_tx, &tx, sizeof(tx));
+}
+
+int __cobalt_clock_adjtime64(clockid_t clock_id,
+			struct __kernel_timex __user *u_tx)
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx)))
+		return -EFAULT;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_tx, &tx, sizeof(tx));
+}
+
+COBALT_SYSCALL(clock_adjtime64, current,
+	       (clockid_t clock_id, struct __kernel_timex __user *u_tx))
+{
+	return __cobalt_clock_adjtime64(clock_id, u_tx);
+}
+
+int __cobalt_clock_nanosleep(clockid_t clock_id, int flags,
+			     const struct timespec64 *rqt,
+			     struct timespec64 *rmt)
+{
+	struct restart_block *restart;
+	struct xnthread *cur;
+	xnsticks_t timeout, rem;
+	spl_t s;
+
+	trace_cobalt_clock_nanosleep(clock_id, flags, rqt);
+
+	if (clock_id != CLOCK_MONOTONIC &&
+	    clock_id != CLOCK_MONOTONIC_RAW &&
+	    clock_id != CLOCK_REALTIME)
+		return -EOPNOTSUPP;
+
+	if (rqt->tv_sec < 0)
+		return -EINVAL;
+
+	if ((unsigned long)rqt->tv_nsec >= ONE_BILLION)
+		return -EINVAL;
+
+	if (flags & ~TIMER_ABSTIME)
+		return -EINVAL;
+
+	cur = xnthread_current();
+
+	if (xnthread_test_localinfo(cur, XNSYSRST)) {
+		xnthread_clear_localinfo(cur, XNSYSRST);
+
+		restart = cobalt_get_restart_block(current);
+
+		if (restart->fn != cobalt_restart_syscall_placeholder) {
+			if (rmt) {
+				xnlock_get_irqsave(&nklock, s);
+				rem = xntimer_get_timeout_stopped(&cur->rtimer);
+				xnlock_put_irqrestore(&nklock, s);
+				ns2ts(rmt, rem > 1 ? rem : 0);
+			}
+			return -EINTR;
+		}
+
+		timeout = restart->nanosleep.expires;
+	} else
+		timeout = ts2ns(rqt);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnthread_suspend(cur, XNDELAY, timeout + 1,
+			 clock_flag(flags, clock_id), NULL);
+
+	if (xnthread_test_info(cur, XNBREAK)) {
+		if (signal_pending(current)) {
+			restart = cobalt_get_restart_block(current);
+			restart->nanosleep.expires =
+				(flags & TIMER_ABSTIME) ? timeout :
+				    xntimer_get_timeout_stopped(&cur->rtimer);
+			xnlock_put_irqrestore(&nklock, s);
+			restart->fn = cobalt_restart_syscall_placeholder;
+
+			xnthread_set_localinfo(cur, XNSYSRST);
+
+			return -ERESTARTSYS;
+		}
+
+		if (flags == 0 && rmt) {
+			rem = xntimer_get_timeout_stopped(&cur->rtimer);
+			xnlock_put_irqrestore(&nklock, s);
+			ns2ts(rmt, rem > 1 ? rem : 0);
+		} else
+			xnlock_put_irqrestore(&nklock, s);
+
+		return -EINTR;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_nanosleep, primary,
+	       (clockid_t clock_id, int flags,
+		const struct __user_old_timespec __user *u_rqt,
+		struct __user_old_timespec __user *u_rmt))
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	if (cobalt_get_u_timespec(&rqt, u_rqt))
+		return -EFAULT;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp) {
+		if (cobalt_put_u_timespec(u_rmt, rmtp))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt)
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	if (cobalt_get_timespec64(&rqt, u_rqt))
+		return -EFAULT;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp) {
+		if (cobalt_put_timespec64(rmtp, u_rmt))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(clock_nanosleep64, primary,
+	       (clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt))
+{
+	return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt);
+}
+
+int cobalt_clock_register(struct xnclock *clock, const cpumask_t *affinity,
+			  clockid_t *clk_id)
+{
+	int ret, nr;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	nr = find_first_zero_bit(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+	if (nr >= COBALT_MAX_EXTCLOCKS) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EAGAIN;
+	}
+
+	/*
+	 * CAUTION: a bit raised in cobalt_clock_extids means that the
+	 * corresponding entry in external_clocks[] is valid. The
+	 * converse assumption is NOT true.
+	 */
+	__set_bit(nr, cobalt_clock_extids);
+	external_clocks[nr] = clock;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = xnclock_register(clock, affinity);
+	if (ret)
+		return ret;
+
+	clock->id = nr;
+	*clk_id = __COBALT_CLOCK_EXT(clock->id);
+
+	trace_cobalt_clock_register(clock->name, *clk_id);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_register);
+
+void cobalt_clock_deregister(struct xnclock *clock)
+{
+	trace_cobalt_clock_deregister(clock->name, clock->id);
+	clear_bit(clock->id, cobalt_clock_extids);
+	smp_mb__after_atomic();
+	external_clocks[clock->id] = NULL;
+	xnclock_deregister(clock);
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_deregister);
+
+struct xnclock *cobalt_clock_find(clockid_t clock_id)
+{
+	struct xnclock *clock = ERR_PTR(-EINVAL);
+	spl_t s;
+	int nr;
+
+	if (clock_id == CLOCK_MONOTONIC ||
+	    clock_id == CLOCK_MONOTONIC_RAW ||
+	    clock_id == CLOCK_REALTIME)
+		return &nkclock;
+
+	if (__COBALT_CLOCK_EXT_P(clock_id)) {
+		nr = __COBALT_CLOCK_EXT_INDEX(clock_id);
+		xnlock_get_irqsave(&nklock, s);
+		if (test_bit(nr, cobalt_clock_extids))
+			clock = external_clocks[nr];
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return clock;
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_find);
+++ linux-patched/kernel/xenomai/posix/monitor.c	2022-03-21 12:58:28.984892911 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall32.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "monitor.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+/*
+ * The Cobalt monitor is a double-wait condition object, serializing
+ * accesses through a gate. It behaves like a mutex + two condition
+ * variables combo with extended signaling logic. Folding several
+ * conditions and the serialization support into a single object
+ * performs better on low end hw caches and allows for specific
+ * optimizations, compared to using separate general-purpose mutex and
+ * condvars. This object is used by the Copperplate interface
+ * internally when it runs over the Cobalt core.
+ *
+ * Threads can wait for some resource(s) to be granted (consumer
+ * side), or wait for the available resource(s) to drain (producer
+ * side).  Therefore, signals are thread-directed for the grant side,
+ * and monitor-directed for the drain side.
+ *
+ * Typically, a consumer would wait for the GRANT condition to be
+ * satisfied, signaling the DRAINED condition when more resources
+ * could be made available if the protocol implements output
+ * contention (e.g. the write side of a message queue waiting for the
+ * consumer to release message slots). Conversely, a producer would
+ * wait for the DRAINED condition to be satisfied, issuing GRANT
+ * signals once more resources have been made available to the
+ * consumer.
+ *
+ * Implementation-wise, the monitor logic is shared with the Cobalt
+ * thread object.
+ */
+COBALT_SYSCALL(monitor_init, current,
+	       (struct cobalt_monitor_shadow __user *u_mon,
+		clockid_t clk_id, int flags))
+{
+	struct cobalt_monitor_shadow shadow;
+	struct cobalt_monitor_state *state;
+	struct cobalt_monitor *mon;
+	int pshared, tmode, ret;
+	struct cobalt_umm *umm;
+	unsigned long stateoff;
+	spl_t s;
+
+	tmode = clock_flag(TIMER_ABSTIME, clk_id);
+	if (tmode < 0)
+		return -EINVAL;
+
+	mon = xnmalloc(sizeof(*mon));
+	if (mon == NULL)
+		return -ENOMEM;
+
+	pshared = (flags & COBALT_MONITOR_SHARED) != 0;
+	umm = &cobalt_ppd_get(pshared)->umm;
+	state = cobalt_umm_alloc(umm, sizeof(*state));
+	if (state == NULL) {
+		xnfree(mon);
+		return -EAGAIN;
+	}
+
+	ret = xnregistry_enter_anon(mon, &mon->resnode.handle);
+	if (ret) {
+		cobalt_umm_free(umm, state);
+		xnfree(mon);
+		return ret;
+	}
+
+	mon->state = state;
+	xnsynch_init(&mon->gate, XNSYNCH_PI, &state->owner);
+	xnsynch_init(&mon->drain, XNSYNCH_PRIO, NULL);
+	mon->flags = flags;
+	mon->tmode = tmode;
+	INIT_LIST_HEAD(&mon->waiters);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&mon->resnode, monitor, pshared);
+	mon->magic = COBALT_MONITOR_MAGIC;
+	xnlock_put_irqrestore(&nklock, s);
+
+	state->flags = 0;
+	stateoff = cobalt_umm_offset(umm, state);
+	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
+	shadow.flags = flags;
+	shadow.handle = mon->resnode.handle;
+	shadow.state_offset = (__u32)stateoff;
+
+	return cobalt_copy_to_user(u_mon, &shadow, sizeof(*u_mon));
+}
+
+/* nklock held, irqs off */
+static int monitor_enter(xnhandle_t handle, struct xnthread *curr)
+{
+	struct cobalt_monitor *mon;
+	int info;
+
+	mon = xnregistry_lookup(handle, NULL); /* (Re)validate. */
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		return -EINVAL;
+
+	info = xnsynch_acquire(&mon->gate, XN_INFINITE, XN_RELATIVE);
+	if (info)
+		/* Break or error, no timeout possible. */
+		return info & XNBREAK ? -EINTR : -EINVAL;
+
+	mon->state->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
+
+	return 0;
+}
+
+COBALT_SYSCALL(monitor_enter, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct xnthread *curr = xnthread_current();
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	ret = monitor_enter(handle, curr);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+/* nklock held, irqs off */
+static void monitor_wakeup(struct cobalt_monitor *mon)
+{
+	struct cobalt_monitor_state *state = mon->state;
+	struct cobalt_thread *thread, *tmp;
+	struct xnthread *p;
+	int bcast;
+
+	/*
+	 * Having the GRANT signal pending does not necessarily mean
+	 * that somebody is actually waiting for it, so we have to
+	 * check both conditions below.
+	 */
+	bcast = (state->flags & COBALT_MONITOR_BROADCAST) != 0;
+	if ((state->flags & COBALT_MONITOR_GRANTED) == 0 ||
+	    list_empty(&mon->waiters))
+		goto drain;
+
+	/*
+	 * Unblock waiters requesting a grant, either those who
+	 * received it only or all of them, depending on the broadcast
+	 * bit.
+	 *
+	 * We update the PENDED flag to inform userland about the
+	 * presence of waiters, so that it may decide not to issue any
+	 * syscall for exiting the monitor if nobody else is waiting
+	 * at the gate.
+	 */
+	list_for_each_entry_safe(thread, tmp, &mon->waiters, monitor_link) {
+		p = &thread->threadbase;
+		/*
+		 * A thread might receive a grant signal albeit it
+		 * does not wait on a monitor, or it might have timed
+		 * out before we got there, so we really have to check
+		 * that ->wchan does match our sleep queue.
+		 */
+		if (bcast ||
+		    (p->u_window->grant_value && p->wchan == &thread->monitor_synch)) {
+			xnsynch_wakeup_this_sleeper(&thread->monitor_synch, p);
+			list_del_init(&thread->monitor_link);
+		}
+	}
+drain:
+	/*
+	 * Unblock threads waiting for a drain event if that signal is
+	 * pending, either one or all, depending on the broadcast
+	 * flag.
+	 */
+	if ((state->flags & COBALT_MONITOR_DRAINED) != 0 &&
+	    xnsynch_pended_p(&mon->drain)) {
+		if (bcast)
+			xnsynch_flush(&mon->drain, 0);
+		else
+			xnsynch_wakeup_one_sleeper(&mon->drain);
+	}
+
+	if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
+		state->flags &= ~COBALT_MONITOR_PENDED;
+}
+
+int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct timespec64 *ts,
+			  int __user *u_ret)
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+	struct cobalt_monitor_state *state;
+	xnticks_t timeout = XN_INFINITE;
+	int ret = 0, opret = 0, info;
+	struct cobalt_monitor *mon;
+	struct xnsynch *synch;
+	xnhandle_t handle;
+	xntmode_t tmode;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+
+		timeout = ts2ns(ts) + 1;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * The current thread might have sent signals to the monitor
+	 * it wants to sleep on: wake up satisfied waiters before
+	 * going to sleep.
+	 */
+	state = mon->state;
+	if (state->flags & COBALT_MONITOR_SIGNALED)
+		monitor_wakeup(mon);
+
+	synch = &curr->monitor_synch;
+	if (event & COBALT_MONITOR_WAITDRAIN)
+		synch = &mon->drain;
+	else {
+		curr->threadbase.u_window->grant_value = 0;
+		list_add_tail(&curr->monitor_link, &mon->waiters);
+	}
+
+	/*
+	 * Tell userland that somebody is now waiting for a signal, so
+	 * that later exiting the monitor on the producer side will
+	 * trigger a wakeup syscall.
+	 *
+	 * CAUTION: we must raise the PENDED flag while holding the
+	 * gate mutex, to prevent a signal from sneaking in from a
+	 * remote CPU without the producer issuing the corresponding
+	 * wakeup call when dropping the gate lock.
+	 */
+	state->flags |= COBALT_MONITOR_PENDED;
+
+	tmode = ts ? mon->tmode : XN_RELATIVE;
+
+	/* Release the gate prior to waiting, all atomically. */
+	xnsynch_release(&mon->gate, &curr->threadbase);
+
+	info = xnsynch_sleep_on(synch, timeout, tmode);
+	if (info) {
+		if ((event & COBALT_MONITOR_WAITDRAIN) == 0 &&
+		    !list_empty(&curr->monitor_link))
+			list_del_init(&curr->monitor_link);
+
+		if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
+			state->flags &= ~COBALT_MONITOR_PENDED;
+
+		if (info & XNBREAK) {
+			opret = -EINTR;
+			goto out;
+		}
+		if (info & XNTIMEO)
+			opret = -ETIMEDOUT;
+	}
+
+	ret = monitor_enter(handle, &curr->threadbase);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	__xn_put_user(opret, u_ret);
+
+	return ret;
+}
+
+int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon,
+			    int event,
+			    const struct __kernel_timespec __user *u_ts,
+			    int __user *u_ret)
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_timespec64(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL(monitor_wait, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon,
+	       int event, const struct __user_old_timespec __user *u_ts,
+	       int __user *u_ret))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_u_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL(monitor_wait64, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon, int event,
+		const struct __kernel_timespec __user *u_ts, int __user *u_ret))
+{
+	return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret);
+}
+
+COBALT_SYSCALL(monitor_sync, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		ret = -EINVAL;
+	else if (mon->state->flags & COBALT_MONITOR_SIGNALED) {
+		monitor_wakeup(mon);
+		xnsynch_release(&mon->gate, curr);
+		xnsched_run();
+		ret = monitor_enter(handle, curr);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(monitor_exit, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		ret = -EINVAL;
+	else {
+		if (mon->state->flags & COBALT_MONITOR_SIGNALED)
+			monitor_wakeup(mon);
+
+		xnsynch_release(&mon->gate, curr);
+		xnsched_run();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(monitor_destroy, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor_state *state;
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	state = mon->state;
+	if ((state->flags & COBALT_MONITOR_PENDED) != 0 ||
+	    xnsynch_pended_p(&mon->drain) || !list_empty(&mon->waiters)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	/*
+	 * A monitor must be destroyed by the thread currently holding
+	 * its gate lock.
+	 */
+	if (xnsynch_owner_check(&mon->gate, curr)) {
+		ret = -EPERM;
+		goto fail;
+	}
+
+	cobalt_monitor_reclaim(&mon->resnode, s); /* drops lock */
+
+	xnsched_run();
+
+	return 0;
+ fail:
+	xnlock_put_irqrestore(&nklock, s);
+	
+	return ret;
+}
+
+void cobalt_monitor_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_monitor *mon;
+	struct cobalt_umm *umm;
+	int pshared;
+
+	mon = container_of(node, struct cobalt_monitor, resnode);
+	pshared = (mon->flags & COBALT_MONITOR_SHARED) != 0;
+	xnsynch_destroy(&mon->gate);
+	xnsynch_destroy(&mon->drain);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	cobalt_mark_deleted(mon);
+	xnlock_put_irqrestore(&nklock, s);
+
+	umm = &cobalt_ppd_get(pshared)->umm;
+	cobalt_umm_free(umm, mon->state);
+	xnfree(mon);
+}
+++ linux-patched/kernel/xenomai/posix/syscall32.h	2022-03-21 12:58:28.980892950 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/gen-syscall-entries.sh	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SYSCALL32_H
+#define _COBALT_POSIX_SYSCALL32_H
+
+#include <cobalt/kernel/compat.h>
+
+struct cobalt_mutex_shadow;
+struct cobalt_event_shadow;
+struct cobalt_cond_shadow;
+struct cobalt_sem_shadow;
+struct cobalt_monitor_shadow;
+
+COBALT_SYSCALL32emu_DECL(thread_create,
+			 (compat_ulong_t pth,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param_ex,
+			  int xid,
+			  __u32 __user *u_winoff));
+
+COBALT_SYSCALL32emu_DECL(thread_setschedparam_ex,
+			 (compat_ulong_t pth,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(thread_getschedparam_ex,
+			 (compat_ulong_t pth,
+			  int __user *u_policy,
+			  struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(thread_setschedprio,
+			 (compat_ulong_t pth,
+			  int prio,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(clock_getres,
+			 (clockid_t clock_id,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_getres64,
+			 (clockid_t clock_id,
+			  struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_gettime,
+			 (clockid_t clock_id,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_gettime64,
+			 (clockid_t clock_id,
+			  struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_settime,
+			 (clockid_t clock_id,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_settime64,
+			 (clockid_t clock_id,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_adjtime,
+			 (clockid_t clock_id,
+			  struct old_timex32 __user *u_tx));
+
+COBALT_SYSCALL32emu_DECL(clock_adjtime64,
+			 (clockid_t clock_id,
+			  struct __kernel_timex __user *u_tx));
+
+COBALT_SYSCALL32emu_DECL(clock_nanosleep,
+			 (clockid_t clock_id, int flags,
+			  const struct old_timespec32 __user *u_rqt,
+			  struct old_timespec32 __user *u_rmt));
+
+COBALT_SYSCALL32emu_DECL(clock_nanosleep64,
+			 (clockid_t clock_id, int flags,
+			  const struct __kernel_timespec __user *u_rqt,
+			  struct __kernel_timespec __user *u_rmt));
+
+
+COBALT_SYSCALL32emu_DECL(mutex_timedlock,
+			 (struct cobalt_mutex_shadow __user *u_mx,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mutex_timedlock64,
+			 (struct cobalt_mutex_shadow __user *u_mx,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(cond_wait_prologue,
+			 (struct cobalt_cond_shadow __user *u_cnd,
+			  struct cobalt_mutex_shadow __user *u_mx,
+			  int *u_err,
+			  unsigned int timed,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_open,
+			 (const char __user *u_name, int oflags,
+			  mode_t mode, struct compat_mq_attr __user *u_attr));
+
+COBALT_SYSCALL32emu_DECL(mq_getattr,
+			 (mqd_t uqd, struct compat_mq_attr __user *u_attr));
+
+COBALT_SYSCALL32emu_DECL(mq_timedsend,
+			 (mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedsend64,
+			 (mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedreceive,
+			 (mqd_t uqd, void __user *u_buf,
+			  compat_ssize_t __user *u_len,
+			  unsigned int __user *u_prio,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedreceive64,
+			 (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+			  unsigned int __user *u_prio,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_notify,
+			 (mqd_t fd, const struct compat_sigevent *__user u_cev));
+
+COBALT_SYSCALL32emu_DECL(sched_weightprio,
+			 (int policy,
+			  const struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(sched_setconfig_np,
+			 (int cpu, int policy,
+			  union compat_sched_config __user *u_config,
+			  size_t len));
+
+COBALT_SYSCALL32emu_DECL(sched_getconfig_np,
+			 (int cpu, int policy,
+			  union compat_sched_config __user *u_config,
+			  size_t len));
+
+COBALT_SYSCALL32emu_DECL(sched_setscheduler_ex,
+			 (compat_pid_t pid,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(sched_getscheduler_ex,
+			 (compat_pid_t pid,
+			  int __user *u_policy,
+			  struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(timer_create,
+			 (clockid_t clock,
+			  const struct compat_sigevent __user *u_sev,
+			  timer_t __user *u_tm));
+
+COBALT_SYSCALL32emu_DECL(timer_settime,
+			 (timer_t tm, int flags,
+			  const struct old_itimerspec32 __user *u_newval,
+			  struct old_itimerspec32 __user *u_oldval));
+
+COBALT_SYSCALL32emu_DECL(timer_gettime,
+			 (timer_t tm,
+			  struct old_itimerspec32 __user *u_val));
+
+COBALT_SYSCALL32emu_DECL(timerfd_settime,
+			 (int fd, int flags,
+			  const struct old_itimerspec32 __user *new_value,
+			  struct old_itimerspec32 __user *old_value));
+
+COBALT_SYSCALL32emu_DECL(timerfd_gettime,
+			 (int fd, struct old_itimerspec32 __user *value));
+
+COBALT_SYSCALL32emu_DECL(sigwait,
+			 (const compat_sigset_t __user *u_set,
+			  int __user *u_sig));
+
+COBALT_SYSCALL32emu_DECL(sigtimedwait,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si,
+			  const struct old_timespec32 __user *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sigtimedwait64,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si,
+			  const struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sigwaitinfo,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si));
+
+COBALT_SYSCALL32emu_DECL(sigpending,
+			 (compat_old_sigset_t __user *u_set));
+
+COBALT_SYSCALL32emu_DECL(sigqueue,
+			 (pid_t pid, int sig,
+			  const union compat_sigval __user *u_value));
+
+COBALT_SYSCALL32emu_DECL(monitor_wait,
+			 (struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct old_timespec32 __user *u_ts,
+			  int __user *u_ret));
+
+COBALT_SYSCALL32emu_DECL(monitor_wait64,
+			 (struct cobalt_monitor_shadow __user *u_mon,
+			  int event,
+			  const struct __kernel_timespec __user *u_ts,
+			  int __user *u_ret));
+
+COBALT_SYSCALL32emu_DECL(event_wait,
+			 (struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode, const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(event_wait64,
+			 (struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(select,
+			 (int nfds,
+			  compat_fd_set __user *u_rfds,
+			  compat_fd_set __user *u_wfds,
+			  compat_fd_set __user *u_xfds,
+			  struct old_timeval32 __user *u_tv));
+
+COBALT_SYSCALL32emu_DECL(recvmsg,
+			 (int fd, struct compat_msghdr __user *umsg,
+			  int flags));
+
+COBALT_SYSCALL32emu_DECL(recvmmsg,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec,
+			  unsigned int vlen,
+			  unsigned int flags, struct old_timespec32 *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(recvmmsg64,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec,
+			  unsigned int vlen,
+			  unsigned int flags,
+			  struct __kernel_timespec *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sendmsg,
+			 (int fd, struct compat_msghdr __user *umsg,
+			  int flags));
+
+COBALT_SYSCALL32emu_DECL(sendmmsg,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+			  unsigned int flags));
+
+COBALT_SYSCALL32emu_DECL(mmap,
+			 (int fd,
+			  struct compat_rtdm_mmap_request __user *u_rma,
+			  compat_uptr_t __user *u_addrp));
+
+COBALT_SYSCALL32emu_DECL(backtrace,
+			 (int nr, compat_ulong_t __user *u_backtrace,
+			  int reason));
+
+COBALT_SYSCALL32emu_DECL(sem_open,
+			 (compat_uptr_t __user *u_addrp,
+			  const char __user *u_name,
+			  int oflags, mode_t mode, unsigned int value));
+
+COBALT_SYSCALL32emu_DECL(sem_timedwait,
+			 (struct cobalt_sem_shadow __user *u_sem,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(sem_timedwait64,
+			 (struct cobalt_sem_shadow __user *u_sem,
+			  const struct __kernel_timespec __user *u_ts));
+
+#endif /* !_COBALT_POSIX_SYSCALL32_H */
+++ linux-patched/kernel/xenomai/posix/gen-syscall-entries.sh	2022-03-21 12:58:28.977892979 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mutex.c	1970-01-01 01:00:00.000000000 +0100
+#! /bin/sh
+
+set -e
+
+shift
+
+awk '
+match($0, /COBALT_SYSCALL\([^,]*,[ \t]*[^,]*/)  {
+	str=substr($0, RSTART + 15, RLENGTH - 15)
+	match(str, /[^, \t]*/)
+	syscall=substr(str, RSTART, RLENGTH)
+
+	if (syscall == "") {
+		print "Failed to find syscall name in line " $0 > "/dev/stderr"
+		exit 1
+	}
+
+	calls = calls "	__COBALT_CALL_ENTRY(" syscall ") \\\n"
+	modes = modes "	__COBALT_MODE(" str ") \\\n"
+	next
+}
+
+/COBALT_SYSCALL\(/  {
+	print "Failed to parse line " $0 > "/dev/stderr"
+	exit 1
+}
+
+END {
+	print "#define __COBALT_CALL_ENTRIES \\\n" calls "	/* end */"
+	print "#define __COBALT_CALL_MODES \\\n" modes "	/* end */"
+}
+' $*
+++ linux-patched/kernel/xenomai/posix/mutex.c	2022-03-21 12:58:28.973893018 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/io.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "internal.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "clock.h"
+#include <cobalt/kernel/time.h>
+
+static int cobalt_mutex_init_inner(struct cobalt_mutex_shadow *shadow,
+				   struct cobalt_mutex *mutex,
+				   struct cobalt_mutex_state *state,
+				   const struct cobalt_mutexattr *attr)
+{
+	int synch_flags = XNSYNCH_PRIO | XNSYNCH_OWNER;
+	struct cobalt_umm *umm;
+	spl_t s;
+	int ret;
+
+	ret = xnregistry_enter_anon(mutex, &mutex->resnode.handle);
+	if (ret < 0)
+		return ret;
+
+	umm = &cobalt_ppd_get(attr->pshared)->umm;
+	shadow->handle = mutex->resnode.handle;
+	shadow->magic = COBALT_MUTEX_MAGIC;
+	shadow->lockcnt = 0;
+	shadow->attr = *attr;
+	shadow->state_offset = cobalt_umm_offset(umm, state);
+
+	mutex->magic = COBALT_MUTEX_MAGIC;
+
+	if (attr->protocol == PTHREAD_PRIO_PROTECT) {
+		state->ceiling = attr->ceiling + 1;
+		xnsynch_init_protect(&mutex->synchbase, synch_flags,
+				     &state->owner, &state->ceiling);
+	} else {
+		state->ceiling = 0;
+		if (attr->protocol == PTHREAD_PRIO_INHERIT)
+			synch_flags |= XNSYNCH_PI;
+		xnsynch_init(&mutex->synchbase, synch_flags, &state->owner);
+	}
+
+	state->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK
+			? COBALT_MUTEX_ERRORCHECK : 0);
+	mutex->attr = *attr;
+	INIT_LIST_HEAD(&mutex->conds);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&mutex->resnode, mutex, attr->pshared);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+/* must be called with nklock locked, interrupts off. */
+int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
+				     struct cobalt_mutex *mutex,
+				     const struct timespec64 *ts)
+{
+	int ret;
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+		ret = xnsynch_acquire(&mutex->synchbase, ts2ns(ts) + 1, XN_REALTIME);
+	} else
+		ret = xnsynch_acquire(&mutex->synchbase, XN_INFINITE, XN_RELATIVE);
+
+	if (ret) {
+		if (ret & XNBREAK)
+			return -EINTR;
+		if (ret & XNTIMEO)
+			return -ETIMEDOUT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cobalt_mutex_release(struct xnthread *curr,
+			 struct cobalt_mutex *mutex)
+{	/* nklock held, irqs off */
+	struct cobalt_mutex_state *state;
+	struct cobalt_cond *cond;
+	unsigned long flags;
+	int need_resched;
+
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex))
+		 return -EINVAL;
+
+	if (mutex->resnode.scope !=
+	    cobalt_current_resources(mutex->attr.pshared))
+		return -EPERM;
+
+	/*
+	 * We are about to release a mutex which is still pending PP
+	 * (i.e. we never got scheduled out while holding it). Clear
+	 * the lazy handle.
+	 */
+	if (mutex->resnode.handle == curr->u_window->pp_pending)
+		curr->u_window->pp_pending = XN_NO_HANDLE;
+
+	state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner);
+	flags = state->flags;
+	need_resched = 0;
+	if ((flags & COBALT_MUTEX_COND_SIGNAL)) {
+		state->flags = flags & ~COBALT_MUTEX_COND_SIGNAL;
+		if (!list_empty(&mutex->conds)) {
+			list_for_each_entry(cond, &mutex->conds, mutex_link)
+				need_resched |=
+				cobalt_cond_deferred_signals(cond);
+		}
+	}
+	need_resched |= xnsynch_release(&mutex->synchbase, curr);
+
+	return need_resched;
+}
+
+int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts,
+				   int (*fetch_timeout)(struct timespec64 *ts,
+							const void __user *u_ts))
+{
+	struct xnthread *curr = xnthread_current();
+	struct timespec64 ts, *tsp = NULL;
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	/* We need a valid thread handle for the fast lock. */
+	if (curr->handle == XN_NO_HANDLE)
+		return -EPERM;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+redo:
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (mutex->resnode.scope !=
+	    cobalt_current_resources(mutex->attr.pshared)) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	xnthread_commit_ceiling(curr);
+
+	if (xnsynch_owner_check(&mutex->synchbase, curr)) {
+		/* Check if we can take the mutex immediately */
+		ret = xnsynch_try_acquire(&mutex->synchbase);
+		if (ret != -EBUSY)
+			goto out;
+
+		if (fetch_timeout) {
+			xnlock_put_irqrestore(&nklock, s);
+			ret = fetch_timeout(&ts, u_ts);
+			if (ret)
+				return ret;
+
+			fetch_timeout = NULL;
+			tsp = &ts;
+			goto redo; /* Revalidate handle. */
+		}
+		ret = __cobalt_mutex_acquire_unchecked(curr, mutex, tsp);
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	/* We already own the mutex, something looks wrong. */
+
+	ret = -EBUSY;
+	switch(mutex->attr.type) {
+	case PTHREAD_MUTEX_NORMAL:
+		/* Attempting to relock a normal mutex, deadlock. */
+		if (IS_ENABLED(XENO_OPT_DEBUG_USER))
+			printk(XENO_WARNING
+			       "thread %s deadlocks on non-recursive mutex\n",
+			       curr->name);
+		/* Make the caller hang. */
+		__cobalt_mutex_acquire_unchecked(curr, mutex, NULL);
+		break;
+
+	case PTHREAD_MUTEX_ERRORCHECK:
+	case PTHREAD_MUTEX_RECURSIVE:
+		/*
+		 * Recursive mutexes are handled in user-space, so
+		 * these cases should never happen.
+		 */
+		ret = -EINVAL;
+		break;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_check_init, current,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	int err;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	mutex = xnregistry_lookup(handle, NULL);
+	if (cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex)))
+		/* mutex is already in a queue. */
+		err = -EBUSY;
+	else
+		err = 0;
+
+	xnlock_put_irqrestore(&nklock, s);
+	return err;
+}
+
+COBALT_SYSCALL(mutex_init, current,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct cobalt_mutexattr __user *u_attr))
+{
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutex_shadow mx;
+	struct cobalt_mutexattr attr;
+	struct cobalt_mutex *mutex;
+	int ret;
+
+	if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr)))
+		return -EFAULT;
+
+	mutex = xnmalloc(sizeof(*mutex));
+	if (mutex == NULL)
+		return -ENOMEM;
+
+	state = cobalt_umm_alloc(&cobalt_ppd_get(attr.pshared)->umm,
+				 sizeof(*state));
+	if (state == NULL) {
+		xnfree(mutex);
+		return -EAGAIN;
+	}
+
+	ret = cobalt_mutex_init_inner(&mx, mutex, state, &attr);
+	if (ret) {
+		xnfree(mutex);
+		cobalt_umm_free(&cobalt_ppd_get(attr.pshared)->umm, state);
+		return ret;
+	}
+
+	return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx));
+}
+
+COBALT_SYSCALL(mutex_destroy, current,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex_shadow mx;
+	struct cobalt_mutex *mutex;
+	spl_t s;
+	int ret;
+
+	if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx)))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(mx.handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+	if (cobalt_current_resources(mutex->attr.pshared) !=
+	    mutex->resnode.scope) {
+		ret = -EPERM;
+		goto fail;
+	}
+	if (xnsynch_fast_owner_check(mutex->synchbase.fastlock,
+					XN_NO_HANDLE) != 0 ||
+	    !list_empty(&mutex->conds)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	cobalt_mutex_reclaim(&mutex->resnode, s); /* drops lock */
+
+	cobalt_mark_deleted(&mx);
+
+	return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx));
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_trylock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct xnthread *curr = xnthread_current();
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	xnthread_commit_ceiling(curr);
+
+	ret = xnsynch_try_acquire(&mutex->synchbase);
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_lock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, NULL, NULL);
+}
+
+static inline int mutex_fetch_timeout(struct timespec64 *ts,
+				      const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+static inline int mutex_fetch_timeout64(struct timespec64 *ts,
+					const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx,
+			       const void __user *u_ts)
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts,
+					      mutex_fetch_timeout64);
+}
+
+COBALT_SYSCALL(mutex_timedlock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts, mutex_fetch_timeout);
+}
+
+COBALT_SYSCALL(mutex_timedlock64, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock64(u_mx, u_ts);
+}
+
+COBALT_SYSCALL(mutex_unlock, nonrestartable,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex *mutex;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	ret = cobalt_mutex_release(curr, mutex);
+	if (ret > 0) {
+		xnsched_run();
+		ret = 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+void cobalt_mutex_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutex *mutex;
+	int pshared;
+
+	mutex = container_of(node, struct cobalt_mutex, resnode);
+	state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner);
+	pshared = mutex->attr.pshared;
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&mutex->synchbase);
+	cobalt_mark_deleted(mutex);
+	xnlock_put_irqrestore(&nklock, s);
+
+	cobalt_umm_free(&cobalt_ppd_get(pshared)->umm, state);
+	xnfree(mutex);
+}
+
+struct xnsynch *lookup_lazy_pp(xnhandle_t handle)
+{				/* nklock held, irqs off */
+	struct cobalt_mutex *mutex;
+
+	/* Only mutexes may be PP-enabled. */
+	
+	mutex = xnregistry_lookup(handle, NULL);
+	if (mutex == NULL ||
+	    !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex) ||
+	    mutex->attr.protocol != PTHREAD_PRIO_PROTECT)
+		return NULL;
+
+	return &mutex->synchbase;
+}
+++ linux-patched/kernel/xenomai/posix/io.h	2022-03-21 12:58:28.969893057 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/monitor.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_IO_H
+#define _COBALT_POSIX_IO_H
+
+#include <rtdm/rtdm.h>
+#include <xenomai/posix/syscall.h>
+#include <cobalt/kernel/select.h>
+
+int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds,
+		    void __user *u_xfds, void __user *u_tv, bool compat);
+
+COBALT_SYSCALL_DECL(open,
+		    (const char __user *u_path, int oflag));
+
+COBALT_SYSCALL_DECL(socket,
+		    (int protocol_family,
+		     int socket_type, int protocol));
+
+COBALT_SYSCALL_DECL(close, (int fd));
+
+COBALT_SYSCALL_DECL(fcntl, (int fd, int cmd, long arg));
+
+COBALT_SYSCALL_DECL(ioctl,
+		    (int fd, unsigned int request, void __user *arg));
+
+COBALT_SYSCALL_DECL(read,
+		    (int fd, void __user *buf, size_t size));
+
+COBALT_SYSCALL_DECL(write,
+		    (int fd, const void __user *buf, size_t size));
+
+COBALT_SYSCALL_DECL(recvmsg,
+		    (int fd, struct user_msghdr __user *umsg, int flags));
+
+COBALT_SYSCALL_DECL(recvmmsg,
+		    (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags, struct __user_old_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(recvmmsg64,
+		    (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags,
+		     struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sendmsg,
+		    (int fd, struct user_msghdr __user *umsg, int flags));
+
+COBALT_SYSCALL_DECL(sendmmsg,
+		    (int fd, struct mmsghdr __user *u_msgvec,
+		     unsigned int vlen, unsigned int flags));
+
+COBALT_SYSCALL_DECL(mmap,
+		    (int fd, struct _rtdm_mmap_request __user *u_rma,
+		     void __user * __user *u_addrp));
+
+COBALT_SYSCALL_DECL(select,
+		    (int nfds,
+		     fd_set __user *u_rfds,
+		     fd_set __user *u_wfds,
+		     fd_set __user *u_xfds,
+		     struct __kernel_old_timeval __user *u_tv));
+
+#endif /* !_COBALT_POSIX_IO_H */
+++ linux-patched/kernel/xenomai/posix/monitor.h	2022-03-21 12:58:28.966893086 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/cond.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MONITOR_H
+#define _COBALT_POSIX_MONITOR_H
+
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/monitor.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_monitor {
+	unsigned int magic;
+	struct xnsynch gate;
+	struct xnsynch drain;
+	struct cobalt_monitor_state *state;
+	struct list_head waiters;
+	int flags;
+	xntmode_t tmode;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct timespec64 *ts,
+			  int __user *u_ret);
+
+int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon,
+			    int event,
+			    const struct __kernel_timespec __user *u_ts,
+			    int __user *u_ret);
+
+COBALT_SYSCALL_DECL(monitor_init,
+		    (struct cobalt_monitor_shadow __user *u_monsh,
+		     clockid_t clk_id,
+		     int flags));
+
+COBALT_SYSCALL_DECL(monitor_enter,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_sync,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_exit,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_wait,
+		    (struct cobalt_monitor_shadow __user *u_monsh,
+		     int event, const struct __user_old_timespec __user *u_ts,
+		     int __user *u_ret));
+
+COBALT_SYSCALL_DECL(monitor_wait64,
+		    (struct cobalt_monitor_shadow __user *u_monsh, int event,
+		     const struct __kernel_timespec __user *u_ts,
+		     int __user *u_ret));
+
+COBALT_SYSCALL_DECL(monitor_destroy,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+void cobalt_monitor_reclaim(struct cobalt_resnode *node,
+			    spl_t s);
+
+#endif /* !_COBALT_POSIX_MONITOR_H */
+++ linux-patched/kernel/xenomai/posix/cond.h	2022-03-21 12:58:28.962893125 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_COND_H
+#define _COBALT_POSIX_COND_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/cond.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_mutex;
+
+struct cobalt_cond {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	struct list_head mutex_link;
+	struct cobalt_cond_state *state;
+	struct cobalt_condattr attr;
+	struct cobalt_mutex *mutex;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd,
+				struct cobalt_mutex_shadow __user *u_mx,
+				int *u_err,
+				void __user *u_ts,
+				int (*fetch_timeout)(struct timespec64 *ts,
+						     const void __user *u_ts));
+COBALT_SYSCALL_DECL(cond_init,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     const struct cobalt_condattr __user *u_attr));
+
+COBALT_SYSCALL_DECL(cond_destroy,
+		    (struct cobalt_cond_shadow __user *u_cnd));
+
+COBALT_SYSCALL_DECL(cond_wait_prologue,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx,
+		     int *u_err,
+		     unsigned int timed,
+		     struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(cond_wait_epilogue,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx));
+
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
+
+void cobalt_cond_reclaim(struct cobalt_resnode *node,
+			 spl_t s);
+
+#endif /* !_COBALT_POSIX_COND_H */
+++ linux-patched/kernel/xenomai/posix/thread.h	2022-03-21 12:58:28.958893164 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/memory.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_THREAD_H
+#define _COBALT_POSIX_THREAD_H
+
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/signal.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/sched.h>
+/* CAUTION: rtdm/cobalt.h reads this header. */
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/extension.h>
+
+#define PTHREAD_PROCESS_PRIVATE 0
+#define PTHREAD_PROCESS_SHARED  1
+
+#define PTHREAD_CREATE_JOINABLE 0
+#define PTHREAD_CREATE_DETACHED 1
+
+#define PTHREAD_INHERIT_SCHED  0
+#define PTHREAD_EXPLICIT_SCHED 1
+
+#define PTHREAD_MUTEX_NORMAL     0
+#define PTHREAD_MUTEX_RECURSIVE  1
+#define PTHREAD_MUTEX_ERRORCHECK 2
+#define PTHREAD_MUTEX_DEFAULT    0
+
+struct cobalt_thread;
+struct cobalt_threadstat;
+
+/*
+ * pthread_mutexattr_t and pthread_condattr_t fit on 32 bits, for
+ * compatibility with libc.
+ */
+
+/* The following definitions are copied from linuxthread pthreadtypes.h. */
+struct _pthread_fastlock {
+	long int __status;
+	int __spinlock;
+};
+
+typedef struct {
+	struct _pthread_fastlock __c_lock;
+	long __c_waiting;
+	char __padding[48 - sizeof (struct _pthread_fastlock)
+		       - sizeof (long) - sizeof (long long)];
+	long long __align;
+} pthread_cond_t;
+
+enum {
+	PTHREAD_PRIO_NONE,
+	PTHREAD_PRIO_INHERIT,
+	PTHREAD_PRIO_PROTECT
+};
+
+typedef struct {
+	int __m_reserved;
+	int __m_count;
+	long __m_owner;
+	int __m_kind;
+	struct _pthread_fastlock __m_lock;
+} pthread_mutex_t;
+
+struct cobalt_local_hkey {
+	/** pthread_t from userland. */
+	unsigned long u_pth;
+	/** kernel mm context. */
+	struct mm_struct *mm;
+};
+
+struct cobalt_thread {
+	unsigned int magic;
+	struct xnthread threadbase;
+	struct cobalt_extref extref;
+	struct cobalt_process *process;
+	struct list_head next;	/* in global/process thread_list */
+
+	/** Signal management. */
+	sigset_t sigpending;
+	struct list_head sigqueues[_NSIG]; /* in cobalt_sigpending */
+	struct xnsynch sigwait;
+	struct list_head signext;
+
+	/** Monitor wait object and link holder. */
+	struct xnsynch monitor_synch;
+	struct list_head monitor_link;
+
+	struct cobalt_local_hkey hkey;
+};
+
+struct cobalt_sigwait_context {
+	struct xnthread_wait_context wc;
+	sigset_t *set;
+	struct siginfo *si;
+};
+
+static inline struct cobalt_thread *cobalt_current_thread(void)
+{
+	struct xnthread *curr = xnthread_current();
+	return curr ? container_of(curr, struct cobalt_thread, threadbase) : NULL;
+}
+
+int __cobalt_thread_create(unsigned long pth, int policy,
+			   struct sched_param_ex __user *u_param,
+			   int xid, __u32 __user *u_winoff);
+
+int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy,
+				     const struct sched_param_ex *param_ex);
+
+int cobalt_thread_setschedparam_ex(unsigned long pth,
+				   int policy,
+				   const struct sched_param_ex *param_ex,
+				   __u32 __user *u_winoff,
+				   int __user *u_promoted);
+
+int cobalt_thread_getschedparam_ex(unsigned long pth,
+				   int *policy_r,
+				   struct sched_param_ex *param_ex);
+
+int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread,
+				     int *policy_r,
+				     struct sched_param_ex *param_ex);
+
+int cobalt_thread_setschedprio(unsigned long pth,
+			       int prio,
+			       __u32 __user *u_winoff,
+			       int __user *u_promoted);
+
+struct cobalt_thread *cobalt_thread_find(pid_t pid);
+
+struct cobalt_thread *cobalt_thread_find_local(pid_t pid);
+
+struct cobalt_thread *cobalt_thread_lookup(unsigned long pth);
+
+COBALT_SYSCALL_DECL(thread_create,
+		    (unsigned long pth, int policy,
+		     struct sched_param_ex __user *u_param,
+		     int xid, __u32 __user *u_winoff));
+
+struct cobalt_thread *
+cobalt_thread_shadow(struct cobalt_local_hkey *lhkey,
+		     __u32 __user *u_winoff);
+
+COBALT_SYSCALL_DECL(thread_setmode,
+		    (int clrmask, int setmask, int __user *u_mode_r));
+
+COBALT_SYSCALL_DECL(thread_setname,
+		    (unsigned long pth, const char __user *u_name));
+
+COBALT_SYSCALL_DECL(thread_kill, (unsigned long pth, int sig));
+
+COBALT_SYSCALL_DECL(thread_join, (unsigned long pth));
+
+COBALT_SYSCALL_DECL(thread_getpid, (unsigned long pth));
+
+COBALT_SYSCALL_DECL(thread_getstat,
+		    (pid_t pid, struct cobalt_threadstat __user *u_stat));
+
+COBALT_SYSCALL_DECL(thread_setschedparam_ex,
+		    (unsigned long pth,
+		     int policy,
+		     const struct sched_param_ex __user *u_param,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+COBALT_SYSCALL_DECL(thread_getschedparam_ex,
+		    (unsigned long pth,
+		     int __user *u_policy,
+		     struct sched_param_ex __user *u_param));
+
+COBALT_SYSCALL_DECL(thread_setschedprio,
+		    (unsigned long pth,
+		     int prio,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+void cobalt_thread_map(struct xnthread *curr);
+
+struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr);
+
+struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie);
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+int cobalt_thread_extend(struct cobalt_extension *ext,
+			 void *priv);
+
+void cobalt_thread_restrict(void);
+
+static inline
+int cobalt_thread_extended_p(const struct cobalt_thread *thread,
+			     const struct cobalt_extension *ext)
+{
+	return thread->extref.extension == ext;
+}
+
+#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+static inline
+int cobalt_thread_extended_p(const struct cobalt_thread *thread,
+			     const struct cobalt_extension *ext)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+extern xnticks_t cobalt_time_slice;
+
+#endif /* !_COBALT_POSIX_THREAD_H */
+++ linux-patched/kernel/xenomai/posix/memory.h	2022-03-21 12:58:28.955893193 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_MEMORY_H
+#define _COBALT_POSIX_MEMORY_H
+
+#include <cobalt/kernel/ppd.h>
+
+#define cobalt_umm_set_name(__umm, __fmt, __args...)	\
+	xnheap_set_name(&(__umm)->heap, (__fmt), ## __args)
+
+static inline
+void *cobalt_umm_alloc(struct cobalt_umm *umm, __u32 size)
+{
+	return xnheap_alloc(&umm->heap, size);
+}
+
+static inline
+void *cobalt_umm_zalloc(struct cobalt_umm *umm, __u32 size)
+{
+	return xnheap_zalloc(&umm->heap, size);
+}
+
+static inline
+void cobalt_umm_free(struct cobalt_umm *umm, void *p)
+{
+	xnheap_free(&umm->heap, p);
+}
+
+static inline
+__u32 cobalt_umm_offset(struct cobalt_umm *umm, void *p)
+{
+	return p - xnheap_get_membase(&umm->heap);
+}
+
+int cobalt_memdev_init(void);
+
+void cobalt_memdev_cleanup(void);
+
+int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
+		    void (*release)(struct cobalt_umm *umm));
+
+void cobalt_umm_destroy(struct cobalt_umm *umm);
+
+#endif /* !_COBALT_POSIX_MEMORY_H */
+++ linux-patched/kernel/xenomai/posix/clock.h	2022-03-21 12:58:28.951893232 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timer.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_CLOCK_H
+#define _COBALT_POSIX_CLOCK_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/cpumask.h>
+#include <cobalt/uapi/time.h>
+#include <xenomai/posix/syscall.h>
+
+#define ONE_BILLION             1000000000
+
+struct xnclock;
+
+static inline void ns2ts(struct timespec64 *ts, xnticks_t nsecs)
+{
+	ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec);
+}
+
+static inline void u_ns2ts(struct __user_old_timespec *ts, xnticks_t nsecs)
+{
+	ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec);
+}
+
+static inline xnticks_t ts2ns(const struct timespec64 *ts)
+{
+	xnticks_t nsecs = ts->tv_nsec;
+
+	if (ts->tv_sec)
+		nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline xnticks_t u_ts2ns(const struct __user_old_timespec *ts)
+{
+	xnticks_t nsecs = ts->tv_nsec;
+
+	if (ts->tv_sec)
+		nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline xnticks_t tv2ns(const struct __kernel_old_timeval *tv)
+{
+	xnticks_t nsecs = tv->tv_usec * 1000;
+
+	if (tv->tv_sec)
+		nsecs += (xnticks_t)tv->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline void ticks2tv(struct __kernel_old_timeval *tv, xnticks_t ticks)
+{
+	unsigned long nsecs;
+
+	tv->tv_sec = xnclock_divrem_billion(ticks, &nsecs);
+	tv->tv_usec = nsecs / 1000;
+}
+
+static inline xnticks_t clock_get_ticks(clockid_t clock_id)
+{
+	return clock_id == CLOCK_REALTIME ?
+		xnclock_read_realtime(&nkclock) :
+		xnclock_read_monotonic(&nkclock);
+}
+
+static inline int clock_flag(int flag, clockid_t clock_id)
+{
+	if ((flag & TIMER_ABSTIME) == 0)
+		return XN_RELATIVE;
+
+	if (clock_id == CLOCK_REALTIME)
+		return XN_REALTIME;
+
+	return XN_ABSOLUTE;
+}
+
+int __cobalt_clock_getres(clockid_t clock_id,
+			  struct timespec64 *ts);
+
+int __cobalt_clock_getres64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_gettime(clockid_t clock_id,
+			   struct timespec64 *ts);
+
+int __cobalt_clock_gettime64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_settime(clockid_t clock_id,
+			   const struct timespec64 *ts);
+
+int __cobalt_clock_settime64(clockid_t clock_id,
+			const struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_adjtime(clockid_t clock_id,
+			   struct __kernel_timex *tx);
+
+int __cobalt_clock_adjtime64(clockid_t clock_id,
+			struct __kernel_timex __user *u_tx);
+
+int __cobalt_clock_nanosleep(clockid_t clock_id, int flags,
+			     const struct timespec64 *rqt,
+			     struct timespec64 *rmt);
+
+int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt);
+
+COBALT_SYSCALL_DECL(clock_getres,
+		    (clockid_t clock_id, struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_getres64,
+		    (clockid_t clock_id, struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_gettime,
+		    (clockid_t clock_id, struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_gettime64,
+		    (clockid_t clock_id, struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_settime,
+		    (clockid_t clock_id, const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_settime64,
+		    (clockid_t clock_id,
+			 const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_adjtime,
+		    (clockid_t clock_id, struct __user_old_timex __user *u_tx));
+
+COBALT_SYSCALL_DECL(clock_adjtime64,
+		    (clockid_t clock_id, struct __kernel_timex __user *u_tx));
+
+COBALT_SYSCALL_DECL(clock_nanosleep,
+		    (clockid_t clock_id, int flags,
+		     const struct __user_old_timespec __user *u_rqt,
+		     struct __user_old_timespec __user *u_rmt));
+
+COBALT_SYSCALL_DECL(clock_nanosleep64,
+		    (clockid_t clock_id, int flags,
+		     const struct __kernel_timespec __user *u_rqt,
+		     struct __kernel_timespec __user *u_rmt));
+
+int cobalt_clock_register(struct xnclock *clock,
+			  const cpumask_t *affinity,
+			  clockid_t *clk_id);
+
+void cobalt_clock_deregister(struct xnclock *clock);
+
+struct xnclock *cobalt_clock_find(clockid_t clock_id);
+
+extern DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+
+#endif /* !_COBALT_POSIX_CLOCK_H */
+++ linux-patched/kernel/xenomai/posix/timer.c	2022-03-21 12:58:28.948893262 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-tp.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include "internal.h"
+#include "thread.h"
+#include "timer.h"
+#include "clock.h"
+#include "signal.h"
+
+void cobalt_timer_handler(struct xntimer *xntimer)
+{
+	struct cobalt_timer *timer;
+	/*
+	 * Deliver the timer notification via a signal (unless
+	 * SIGEV_NONE was given). If we can't do this because the
+	 * target thread disappeared, then stop the timer. It will go
+	 * away when timer_delete() is called, or the owner's process
+	 * exits, whichever comes first.
+	 */
+	timer = container_of(xntimer, struct cobalt_timer, timerbase);
+	if (timer->sigp.si.si_signo &&
+	    cobalt_signal_send_pid(timer->target, &timer->sigp) == -ESRCH)
+		xntimer_stop(&timer->timerbase);
+}
+EXPORT_SYMBOL_GPL(cobalt_timer_handler);
+
+static inline struct cobalt_thread *
+timer_init(struct cobalt_timer *timer,
+	   const struct sigevent *__restrict__ evp) /* nklocked, IRQs off. */
+{
+	struct cobalt_thread *owner = cobalt_current_thread(), *target = NULL;
+	struct xnclock *clock;
+
+	/*
+	 * First, try to offload this operation to the extended
+	 * personality the current thread might originate from.
+	 */
+	if (cobalt_initcall_extension(timer_init, &timer->extref,
+				      owner, target, evp) && target)
+		return target;
+
+	/*
+	 * Ok, we have no extension available, or we do but it does
+	 * not want to overload the standard behavior: handle this
+	 * timer the pure Cobalt way then.
+	 */
+	if (evp == NULL || evp->sigev_notify == SIGEV_NONE) {
+		target = owner;	/* Assume SIGEV_THREAD_ID. */
+		goto init;
+	}
+
+	if (evp->sigev_notify != SIGEV_THREAD_ID)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Recipient thread must be a Xenomai shadow in user-space,
+	 * living in the same process than our caller.
+	 */
+	target = cobalt_thread_find_local(evp->sigev_notify_thread_id);
+	if (target == NULL)
+		return ERR_PTR(-EINVAL);
+init:
+	clock = cobalt_clock_find(timer->clockid);
+	if (IS_ERR(clock))
+		return ERR_PTR(PTR_ERR(clock));
+
+	xntimer_init(&timer->timerbase, clock, cobalt_timer_handler,
+		     target->threadbase.sched, XNTIMER_UGRAVITY);
+
+	return target;
+}
+
+static inline int timer_alloc_id(struct cobalt_process *cc)
+{
+	int id;
+
+	id = find_first_bit(cc->timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	if (id == CONFIG_XENO_OPT_NRTIMERS)
+		return -EAGAIN;
+
+	__clear_bit(id, cc->timers_map);
+
+	return id;
+}
+
+static inline void timer_free_id(struct cobalt_process *cc, int id)
+{
+	__set_bit(id, cc->timers_map);
+}
+
+struct cobalt_timer *
+cobalt_timer_by_id(struct cobalt_process *cc, timer_t timer_id)
+{
+	if (timer_id < 0 || timer_id >= CONFIG_XENO_OPT_NRTIMERS)
+		return NULL;
+
+	if (test_bit(timer_id, cc->timers_map))
+		return NULL;
+
+	return cc->timers[timer_id];
+}
+
+static inline int timer_create(clockid_t clockid,
+			       const struct sigevent *__restrict__ evp,
+			       timer_t * __restrict__ timerid)
+{
+	struct cobalt_process *cc;
+	struct cobalt_thread *target;
+	struct cobalt_timer *timer;
+	int signo, ret = -EINVAL;
+	timer_t timer_id;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	timer = xnmalloc(sizeof(*timer));
+	if (timer == NULL)
+		return -ENOMEM;
+
+	timer->sigp.si.si_errno = 0;
+	timer->sigp.si.si_code = SI_TIMER;
+	timer->sigp.si.si_overrun = 0;
+	INIT_LIST_HEAD(&timer->sigp.next);
+	timer->clockid = clockid;
+	timer->overruns = 0;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	ret = timer_alloc_id(cc);
+	if (ret < 0)
+		goto out;
+
+	timer_id = ret;
+
+	if (evp == NULL) {
+		timer->sigp.si.si_int = timer_id;
+		signo = SIGALRM;
+	} else {
+		if (evp->sigev_notify == SIGEV_NONE)
+			signo = 0; /* Don't notify. */
+		else {
+			signo = evp->sigev_signo;
+			if (signo < 1 || signo > _NSIG) {
+				ret = -EINVAL;
+				goto fail;
+			}
+			timer->sigp.si.si_value = evp->sigev_value;
+		}
+	}
+
+	timer->sigp.si.si_signo = signo;
+	timer->sigp.si.si_tid = timer_id;
+	timer->id = timer_id;
+
+	target = timer_init(timer, evp);
+	if (target == NULL) {
+		ret = -EPERM;
+		goto fail;
+	}
+
+	if (IS_ERR(target)) {
+		ret = PTR_ERR(target);
+		goto fail;
+	}
+
+	timer->target = xnthread_host_pid(&target->threadbase);
+	cc->timers[timer_id] = timer;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	*timerid = timer_id;
+
+	return 0;
+fail:
+	timer_free_id(cc, timer_id);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnfree(timer);
+
+	return ret;
+}
+
+static void timer_cleanup(struct cobalt_process *p, struct cobalt_timer *timer)
+{
+	xntimer_destroy(&timer->timerbase);
+
+	if (!list_empty(&timer->sigp.next))
+		list_del(&timer->sigp.next);
+
+	timer_free_id(p, cobalt_timer_id(timer));
+	p->timers[cobalt_timer_id(timer)] = NULL;
+}
+
+static inline int
+timer_delete(timer_t timerid)
+{
+	struct cobalt_process *cc;
+	struct cobalt_timer *timer;
+	int ret = 0;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+	/*
+	 * If an extension runs and actually handles the deletion, we
+	 * should not call the timer_cleanup extension handler for
+	 * this timer, but we shall destroy the core timer. If the
+	 * handler returns on error, the whole deletion process is
+	 * aborted, leaving the timer untouched. In all other cases,
+	 * we do the core timer cleanup work, firing the timer_cleanup
+	 * extension handler if defined.
+	 */
+  	if (cobalt_call_extension(timer_delete, &timer->extref, ret) && ret < 0)
+		goto out;
+
+	if (ret == 0)
+		cobalt_call_extension(timer_cleanup, &timer->extref, ret);
+	else
+		ret = 0;
+
+	timer_cleanup(cc, timer);
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(timer);
+
+	return ret;
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+void __cobalt_timer_getval(struct xntimer *__restrict__ timer,
+			   struct itimerspec64 *__restrict__ value)
+{
+	ns2ts(&value->it_interval, xntimer_interval(timer));
+
+	if (!xntimer_running_p(timer)) {
+		value->it_value.tv_sec = 0;
+		value->it_value.tv_nsec = 0;
+	} else {
+		ns2ts(&value->it_value, xntimer_get_timeout(timer));
+	}
+}
+
+static inline void
+timer_gettimeout(struct cobalt_timer *__restrict__ timer,
+		 struct itimerspec64 *__restrict__ value)
+{
+	int ret = 0;
+
+	if (cobalt_call_extension(timer_gettime, &timer->extref,
+				  ret, value) && ret != 0)
+		return;
+
+	__cobalt_timer_getval(&timer->timerbase, value);
+}
+
+int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag,
+			  const struct itimerspec64 *__restrict__ value)
+{
+	xnticks_t start, period;
+
+	if (value->it_value.tv_nsec == 0 && value->it_value.tv_sec == 0) {
+		xntimer_stop(timer);
+		return 0;
+	}
+
+	if ((unsigned long)value->it_value.tv_nsec >= ONE_BILLION ||
+	    ((unsigned long)value->it_interval.tv_nsec >= ONE_BILLION &&
+	     (value->it_value.tv_sec != 0 || value->it_value.tv_nsec != 0)))
+		return -EINVAL;
+
+	start = ts2ns(&value->it_value) + 1;
+	period = ts2ns(&value->it_interval);
+
+	/*
+	 * Now start the timer. If the timeout data has already
+	 * passed, the caller will handle the case.
+	 */
+	return xntimer_start(timer, start, period, clock_flag);
+}
+
+static inline int timer_set(struct cobalt_timer *timer, int flags,
+			    const struct itimerspec64 *__restrict__ value)
+{				/* nklocked, IRQs off. */
+	struct cobalt_thread *thread;
+	int ret = 0;
+
+	/* First, try offloading the work to an extension. */
+
+	if (cobalt_call_extension(timer_settime, &timer->extref,
+				  ret, value, flags) && ret != 0)
+		return ret < 0 ? ret : 0;
+
+	/*
+	 * No extension, or operation not handled. Default to plain
+	 * POSIX behavior.
+	 *
+	 * If the target thread vanished, just don't start the timer.
+	 */
+	thread = cobalt_thread_find(timer->target);
+	if (thread == NULL)
+		return 0;
+
+	/*
+	 * Make the timer affine to the CPU running the thread to be
+	 * signaled if possible.
+	 */
+	xntimer_set_affinity(&timer->timerbase, thread->threadbase.sched);
+
+	return __cobalt_timer_setval(&timer->timerbase,
+				     clock_flag(flags, timer->clockid), value);
+}
+
+static inline void
+timer_deliver_late(struct cobalt_process *cc, timer_t timerid)
+{
+	struct cobalt_timer *timer;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	/*
+	 * We dropped the lock shortly, revalidate the timer handle in
+	 * case a deletion slipped in.
+	 */
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer)
+		cobalt_timer_handler(&timer->timerbase);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+int __cobalt_timer_settime(timer_t timerid, int flags,
+			   const struct itimerspec64 *__restrict__ value,
+			   struct itimerspec64 *__restrict__ ovalue)
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	int ret;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	XENO_BUG_ON(COBALT, cc == NULL);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ovalue)
+		timer_gettimeout(timer, ovalue);
+
+	ret = timer_set(timer, flags, value);
+	if (ret == -ETIMEDOUT) {
+		/*
+		 * Time has already passed, deliver a notification
+		 * immediately. Since we are about to dive into the
+		 * signal machinery for this, let's drop the nklock to
+		 * break the atomic section temporarily.
+		 */
+		xnlock_put_irqrestore(&nklock, s);
+		timer_deliver_late(cc, timerid);
+		return 0;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value)
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL)
+		goto fail;
+
+	timer_gettimeout(timer, value);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -EINVAL;
+}
+
+COBALT_SYSCALL(timer_delete, current, (timer_t timerid))
+{
+	return timer_delete(timerid);
+}
+
+int __cobalt_timer_create(clockid_t clock,
+			  const struct sigevent *sev,
+			  timer_t __user *u_tm)
+{
+	timer_t timerid = 0;
+	int ret;
+
+	ret = timer_create(clock, sev, &timerid);
+	if (ret)
+		return ret;
+
+	if (cobalt_copy_to_user(u_tm, &timerid, sizeof(timerid))) {
+		timer_delete(timerid);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+COBALT_SYSCALL(timer_create, current,
+	       (clockid_t clock,
+		const struct sigevent __user *u_sev,
+		timer_t __user *u_tm))
+{
+	struct sigevent sev, *evp = NULL;
+
+	if (u_sev) {
+		evp = &sev;
+		if (cobalt_copy_from_user(&sev, u_sev, sizeof(sev)))
+			return -EFAULT;
+	}
+
+	return __cobalt_timer_create(clock, evp, u_tm);
+}
+
+COBALT_SYSCALL(timer_settime, primary,
+	       (timer_t tm, int flags,
+		const struct __user_old_itimerspec __user *u_newval,
+		struct __user_old_itimerspec __user *u_oldval))
+{
+	struct itimerspec64 newv, oldv, *oldvp = &oldv;
+	int ret;
+
+	if (u_oldval == NULL)
+		oldvp = NULL;
+
+	if (cobalt_get_u_itimerspec(&newv, u_newval))
+		return -EFAULT;
+
+	ret = __cobalt_timer_settime(tm, flags, &newv, oldvp);
+	if (ret)
+		return ret;
+
+	if (oldvp && cobalt_put_u_itimerspec(u_oldval, oldvp)) {
+		__cobalt_timer_settime(tm, flags, oldvp, NULL);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+COBALT_SYSCALL(timer_gettime, current,
+	       (timer_t tm, struct __user_old_itimerspec __user *u_val))
+{
+	struct itimerspec64 val;
+	int ret;
+
+	ret = __cobalt_timer_gettime(tm, &val);
+	if (ret)
+		return ret;
+
+	return cobalt_put_u_itimerspec(u_val, &val);
+}
+
+COBALT_SYSCALL(timer_getoverrun, current, (timer_t timerid))
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	int overruns;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL)
+		goto fail;
+
+	overruns = timer->overruns;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return overruns;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -EINVAL;
+}
+
+int cobalt_timer_deliver(struct cobalt_thread *waiter, timer_t timerid) /* nklocked, IRQs off. */
+{
+	struct cobalt_timer *timer;
+	xnticks_t now;
+
+	timer = cobalt_timer_by_id(cobalt_current_process(), timerid);
+	if (timer == NULL)
+		/* Killed before ultimate delivery, who cares then? */
+		return 0;
+
+	if (!xntimer_periodic_p(&timer->timerbase))
+		timer->overruns = 0;
+	else {
+		now = xnclock_read_raw(xntimer_clock(&timer->timerbase));
+		timer->overruns = xntimer_get_overruns(&timer->timerbase,
+					       &waiter->threadbase, now);
+		if ((unsigned int)timer->overruns > COBALT_DELAYMAX)
+			timer->overruns = COBALT_DELAYMAX;
+	}
+
+	return timer->overruns;
+}
+
+void cobalt_timer_reclaim(struct cobalt_process *p)
+{
+	struct cobalt_timer *timer;
+	unsigned id;
+	spl_t s;
+	int ret;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (find_first_zero_bit(p->timers_map, CONFIG_XENO_OPT_NRTIMERS) ==
+		CONFIG_XENO_OPT_NRTIMERS)
+		goto out;
+
+	for (id = 0; id < ARRAY_SIZE(p->timers); id++) {
+		timer = cobalt_timer_by_id(p, id);
+		if (timer == NULL)
+			continue;
+
+		cobalt_call_extension(timer_cleanup, &timer->extref, ret);
+		timer_cleanup(p, timer);
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(timer);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+}
+++ linux-patched/kernel/xenomai/sched-tp.c	2022-03-21 12:58:28.890893827 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-weak.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/uapi/sched.h>
+
+static void tp_schedule_next(struct xnsched_tp *tp)
+{
+	struct xnsched_tp_window *w;
+	struct xnsched *sched;
+	int p_next, ret;
+	xnticks_t t;
+
+	for (;;) {
+		/*
+		 * Switch to the next partition. Time holes in a
+		 * global time frame are defined as partition windows
+		 * assigned to part# -1, in which case the (always
+		 * empty) idle queue will be polled for runnable
+		 * threads.  Therefore, we may assume that a window
+		 * begins immediately after the previous one ends,
+		 * which simplifies the implementation a lot.
+		 */
+		w = &tp->gps->pwins[tp->wnext];
+		p_next = w->w_part;
+		tp->tps = p_next < 0 ? &tp->idle : &tp->partitions[p_next];
+
+		/* Schedule tick to advance to the next window. */
+		tp->wnext = (tp->wnext + 1) % tp->gps->pwin_nr;
+		w = &tp->gps->pwins[tp->wnext];
+		t = tp->tf_start + w->w_offset;
+
+		ret = xntimer_start(&tp->tf_timer, t, XN_INFINITE, XN_ABSOLUTE);
+		if (ret != -ETIMEDOUT)
+			break;
+		/*
+		 * We are late, make sure to remain within the bounds
+		 * of a valid time frame before advancing to the next
+		 * window. Otherwise, fix up by advancing to the next
+		 * time frame immediately.
+		 */
+		for (;;) {
+			t = tp->tf_start + tp->gps->tf_duration;
+			if (xnclock_read_monotonic(&nkclock) > t) {
+				tp->tf_start = t;
+				tp->wnext = 0;
+			} else
+				break;
+		}
+	}
+
+	sched = container_of(tp, struct xnsched, tp);
+	xnsched_set_resched(sched);
+}
+
+static void tp_tick_handler(struct xntimer *timer)
+{
+	struct xnsched_tp *tp = container_of(timer, struct xnsched_tp, tf_timer);
+	/*
+	 * Advance beginning date of time frame by a full period if we
+	 * are processing the last window.
+	 */
+	if (tp->wnext + 1 == tp->gps->pwin_nr)
+		tp->tf_start += tp->gps->tf_duration;
+
+	tp_schedule_next(tp);
+}
+
+static void xnsched_tp_init(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+	char timer_name[XNOBJECT_NAME_LEN];
+	int n;
+
+	for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++)
+		xnsched_initq(&tp->partitions[n].runnable);
+
+	xnsched_initq(&tp->idle.runnable);
+
+#ifdef CONFIG_SMP
+	ksformat(timer_name, sizeof(timer_name), "[tp-tick/%u]", sched->cpu);
+#else
+	strcpy(timer_name, "[tp-tick]");
+#endif
+	tp->tps = NULL;
+	tp->gps = NULL;
+	INIT_LIST_HEAD(&tp->threads);
+	xntimer_init(&tp->tf_timer, &nkclock, tp_tick_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&tp->tf_timer, timer_name);
+}
+
+static bool xnsched_tp_setparam(struct xnthread *thread,
+				const union xnsched_policy_param *p)
+{
+	struct xnsched *sched = thread->sched;
+
+	thread->tps = &sched->tp.partitions[p->tp.ptid];
+	xnthread_clear_state(thread, XNWEAK);
+
+	return xnsched_set_effective_priority(thread, p->tp.prio);
+}
+
+static void xnsched_tp_getparam(struct xnthread *thread,
+				union xnsched_policy_param *p)
+{
+	p->tp.prio = thread->cprio;
+	p->tp.ptid = thread->tps - thread->sched->tp.partitions;
+}
+
+static void xnsched_tp_trackprio(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	/*
+	 * The assigned partition never changes internally due to PI
+	 * (see xnsched_track_policy), since this would be pretty
+	 * wrong with respect to TP scheduling: i.e. we may not allow
+	 * a thread from another partition to consume CPU time from
+	 * the current one, despite this would help enforcing PI (see
+	 * note). In any case, introducing resource contention between
+	 * threads that belong to different partitions is utterly
+	 * wrong in the first place.  Only an explicit call to
+	 * xnsched_set_policy() may change the partition assigned to a
+	 * thread. For that reason, a policy reset action only boils
+	 * down to reinstating the base priority.
+	 *
+	 * NOTE: we do allow threads from lower scheduling classes to
+	 * consume CPU time from the current window as a result of a
+	 * PI boost, since this is aimed at speeding up the release of
+	 * a synchronization object a TP thread needs.
+	 */
+	if (p) {
+		/* We should never cross partition boundaries. */
+		XENO_WARN_ON(COBALT,
+			   thread->base_class == &xnsched_class_tp &&
+			   thread->tps - thread->sched->tp.partitions != p->tp.ptid);
+		thread->cprio = p->tp.prio;
+	} else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_tp_protectprio(struct xnthread *thread, int prio)
+{
+  	if (prio > XNSCHED_TP_MAX_PRIO)
+		prio = XNSCHED_TP_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_tp_chkparam(struct xnthread *thread,
+			       const union xnsched_policy_param *p)
+{
+	struct xnsched_tp *tp = &thread->sched->tp;
+
+	if (p->tp.ptid < 0 ||
+		p->tp.ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART)
+		return -EINVAL;
+
+	if (tp->gps == NULL ||
+	    p->tp.prio < XNSCHED_TP_MIN_PRIO ||
+	    p->tp.prio > XNSCHED_TP_MAX_PRIO)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xnsched_tp_declare(struct xnthread *thread,
+			      const union xnsched_policy_param *p)
+{
+	struct xnsched *sched = thread->sched;
+
+	list_add_tail(&thread->tp_link, &sched->tp.threads);
+
+	return 0;
+}
+
+static void xnsched_tp_forget(struct xnthread *thread)
+{
+	list_del(&thread->tp_link);
+	thread->tps = NULL;
+}
+
+static void xnsched_tp_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->tps->runnable, thread);
+}
+
+static void xnsched_tp_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->tps->runnable, thread);
+}
+
+static void xnsched_tp_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->tps->runnable, thread);
+}
+
+static struct xnthread *xnsched_tp_pick(struct xnsched *sched)
+{
+	/* Never pick a thread if we don't schedule partitions. */
+	if (!xntimer_running_p(&sched->tp.tf_timer))
+		return NULL;
+
+	return xnsched_getq(&sched->tp.tps->runnable);
+}
+
+static void xnsched_tp_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	union xnsched_policy_param param;
+	/*
+	 * Since our partition schedule is a per-scheduler property,
+	 * it cannot apply to a thread that moves to another CPU
+	 * anymore. So we upgrade that thread to the RT class when a
+	 * CPU migration occurs. A subsequent call to
+	 * __xnthread_set_schedparam() may move it back to TP
+	 * scheduling, with a partition assignment that fits the
+	 * remote CPU's partition schedule.
+	 */
+	param.rt.prio = thread->cprio;
+	__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+}
+
+void xnsched_tp_start_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->gps == NULL)
+		return;
+
+	tp->wnext = 0;
+	tp->tf_start = xnclock_read_monotonic(&nkclock);
+	tp_schedule_next(tp);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_start_schedule);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->gps)
+		xntimer_stop(&tp->tf_timer);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_stop_schedule);
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps)
+{
+	struct xnsched_tp_schedule *old_gps;
+	struct xnsched_tp *tp = &sched->tp;
+	union xnsched_policy_param param;
+	struct xnthread *thread, *tmp;
+
+	XENO_BUG_ON(COBALT, gps != NULL &&
+		   (gps->pwin_nr <= 0 || gps->pwins[0].w_offset != 0));
+
+	xnsched_tp_stop_schedule(sched);
+
+	/*
+	 * Move all TP threads on this scheduler to the RT class,
+	 * until we call __xnthread_set_schedparam() for them again.
+	 */
+	if (list_empty(&tp->threads))
+		goto done;
+
+	list_for_each_entry_safe(thread, tmp, &tp->threads, tp_link) {
+		param.rt.prio = thread->cprio;
+		__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+	}
+done:
+	old_gps = tp->gps;
+	tp->gps = gps;
+
+	return old_gps;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_set_schedule);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp_schedule *gps;
+
+	gps = sched->tp.gps;
+	if (gps == NULL)
+		return NULL;
+
+	atomic_inc(&gps->refcount);
+
+	return gps;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_get_schedule);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps)
+{
+	if (atomic_dec_and_test(&gps->refcount))
+		xnfree(gps);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_put_schedule);
+
+int xnsched_tp_get_partition(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->tps == NULL || tp->tps == &tp->idle)
+		return -1;
+
+	return tp->tps - tp->partitions;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_get_partition);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_tp_vfroot;
+
+struct vfile_sched_tp_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_tp_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int prio;
+	int ptid;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_tp_ops;
+
+static struct xnvfile_snapshot vfile_sched_tp = {
+	.privsz = sizeof(struct vfile_sched_tp_priv),
+	.datasz = sizeof(struct vfile_sched_tp_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_tp_ops,
+};
+
+static int vfile_sched_tp_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_tp.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_tp_next(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_tp_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_tp)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->ptid = thread->tps - thread->sched->tp.partitions;
+	p->prio = thread->cprio;
+
+	return 1;
+}
+
+static int vfile_sched_tp_show(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_tp_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %-4s  %s\n",
+			       "CPU", "PID", "PTID", "PRI", "NAME");
+	else
+		xnvfile_printf(it, "%3u  %-6d %-4d %-4d  %s\n",
+			       p->cpu,
+			       p->pid,
+			       p->ptid,
+			       p->prio,
+			       p->name);
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_tp_ops = {
+	.rewind = vfile_sched_tp_rewind,
+	.next = vfile_sched_tp_next,
+	.show = vfile_sched_tp_show,
+};
+
+static int xnsched_tp_init_vfile(struct xnsched_class *schedclass,
+				 struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_tp_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_tp,
+				     &sched_tp_vfroot);
+}
+
+static void xnsched_tp_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_tp);
+	xnvfile_destroy_dir(&sched_tp_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_tp = {
+	.sched_init		=	xnsched_tp_init,
+	.sched_enqueue		=	xnsched_tp_enqueue,
+	.sched_dequeue		=	xnsched_tp_dequeue,
+	.sched_requeue		=	xnsched_tp_requeue,
+	.sched_pick		=	xnsched_tp_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	xnsched_tp_migrate,
+	.sched_chkparam		=	xnsched_tp_chkparam,
+	.sched_setparam		=	xnsched_tp_setparam,
+	.sched_getparam		=	xnsched_tp_getparam,
+	.sched_trackprio	=	xnsched_tp_trackprio,
+	.sched_protectprio	=	xnsched_tp_protectprio,
+	.sched_declare		=	xnsched_tp_declare,
+	.sched_forget		=	xnsched_tp_forget,
+	.sched_kick		=	NULL,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_tp_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_tp_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(2),
+	.policy			=	SCHED_TP,
+	.name			=	"tp"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_tp);
+++ linux-patched/kernel/xenomai/sched-weak.c	2022-03-21 12:58:28.887893857 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/procfs.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/uapi/sched.h>
+
+static void xnsched_weak_init(struct xnsched *sched)
+{
+	xnsched_initq(&sched->weak.runnable);
+}
+
+static void xnsched_weak_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->weak.runnable, thread);
+}
+
+static void xnsched_weak_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->weak.runnable, thread);
+}
+
+static void xnsched_weak_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->weak.runnable, thread);
+}
+
+static struct xnthread *xnsched_weak_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->weak.runnable);
+}
+
+static bool xnsched_weak_setparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	if (!xnthread_test_state(thread, XNBOOST))
+		xnthread_set_state(thread, XNWEAK);
+
+	return xnsched_set_effective_priority(thread, p->weak.prio);
+}
+
+static void xnsched_weak_getparam(struct xnthread *thread,
+				  union xnsched_policy_param *p)
+{
+	p->weak.prio = thread->cprio;
+}
+
+static void xnsched_weak_trackprio(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->weak.prio;
+	else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_weak_protectprio(struct xnthread *thread, int prio)
+{
+  	if (prio > XNSCHED_WEAK_MAX_PRIO)
+		prio = XNSCHED_WEAK_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_weak_chkparam(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	if (p->weak.prio < XNSCHED_WEAK_MIN_PRIO ||
+	    p->weak.prio > XNSCHED_WEAK_MAX_PRIO)
+		return -EINVAL;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_weak_vfroot;
+
+struct vfile_sched_weak_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_weak_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops;
+
+static struct xnvfile_snapshot vfile_sched_weak = {
+	.privsz = sizeof(struct vfile_sched_weak_priv),
+	.datasz = sizeof(struct vfile_sched_weak_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_weak_ops,
+};
+
+static int vfile_sched_weak_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_weak.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_weak_next(struct xnvfile_snapshot_iterator *it,
+				 void *data)
+{
+	struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_weak_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_weak)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+
+	return 1;
+}
+
+static int vfile_sched_weak_show(struct xnvfile_snapshot_iterator *it,
+				 void *data)
+{
+	struct vfile_sched_weak_data *p = data;
+	char pribuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %s\n",
+			       "CPU", "PID", "PRI", "NAME");
+	else {
+		ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio);
+		xnvfile_printf(it, "%3u  %-6d %-4s %s\n",
+			       p->cpu,
+			       p->pid,
+			       pribuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops = {
+	.rewind = vfile_sched_weak_rewind,
+	.next = vfile_sched_weak_next,
+	.show = vfile_sched_weak_show,
+};
+
+static int xnsched_weak_init_vfile(struct xnsched_class *schedclass,
+				   struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_weak_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_weak,
+				     &sched_weak_vfroot);
+}
+
+static void xnsched_weak_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_weak);
+	xnvfile_destroy_dir(&sched_weak_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_weak = {
+	.sched_init		=	xnsched_weak_init,
+	.sched_enqueue		=	xnsched_weak_enqueue,
+	.sched_dequeue		=	xnsched_weak_dequeue,
+	.sched_requeue		=	xnsched_weak_requeue,
+	.sched_pick		=	xnsched_weak_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_chkparam		=	xnsched_weak_chkparam,
+	.sched_setparam		=	xnsched_weak_setparam,
+	.sched_trackprio	=	xnsched_weak_trackprio,
+	.sched_protectprio	=	xnsched_weak_protectprio,
+	.sched_getparam		=	xnsched_weak_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_weak_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_weak_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(1),
+	.policy			=	SCHED_WEAK,
+	.name			=	"weak"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_weak);
+++ linux-patched/kernel/xenomai/procfs.c	2022-03-21 12:58:28.883893896 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-sporadic.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/sched.h>
+#include <xenomai/version.h>
+#include "debug.h"
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+static int lock_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct xnlockinfo lockinfo;
+	spl_t s;
+	int cpu;
+
+	for_each_realtime_cpu(cpu) {
+		xnlock_get_irqsave(&nklock, s);
+		lockinfo = per_cpu(xnlock_stats, cpu);
+		xnlock_put_irqrestore(&nklock, s);
+
+		if (cpu > 0)
+			xnvfile_printf(it, "\n");
+
+		xnvfile_printf(it, "CPU%d:\n", cpu);
+
+		xnvfile_printf(it,
+			     "  longest locked section: %llu ns\n"
+			     "  spinning time: %llu ns\n"
+			     "  section entry: %s:%d (%s)\n",
+			       xnclock_ticks_to_ns(&nkclock, lockinfo.lock_time),
+			       xnclock_ticks_to_ns(&nkclock, lockinfo.spin_time),
+			       lockinfo.file, lockinfo.line, lockinfo.function);
+	}
+
+	return 0;
+}
+
+static ssize_t lock_vfile_store(struct xnvfile_input *input)
+{
+	ssize_t ret;
+	spl_t s;
+	int cpu;
+
+	long val;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val != 0)
+		return -EINVAL;
+
+	for_each_realtime_cpu(cpu) {
+		xnlock_get_irqsave(&nklock, s);
+		memset(&per_cpu(xnlock_stats, cpu), '\0', sizeof(struct xnlockinfo));
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops lock_vfile_ops = {
+	.show = lock_vfile_show,
+	.store = lock_vfile_store,
+};
+
+static struct xnvfile_regular lock_vfile = {
+	.ops = &lock_vfile_ops,
+};
+
+#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+static int latency_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%Lu\n",
+		       xnclock_ticks_to_ns(&nkclock, nkclock.gravity.user));
+
+	return 0;
+}
+
+static ssize_t latency_vfile_store(struct xnvfile_input *input)
+{
+	ssize_t ret;
+	long val;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	nkclock.gravity.user = xnclock_ns_to_ticks(&nkclock, val);
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops latency_vfile_ops = {
+	.show = latency_vfile_show,
+	.store = latency_vfile_store,
+};
+
+static struct xnvfile_regular latency_vfile = {
+	.ops = &latency_vfile_ops,
+};
+
+static int version_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%s\n", XENO_VERSION_STRING);
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops version_vfile_ops = {
+	.show = version_vfile_show,
+};
+
+static struct xnvfile_regular version_vfile = {
+	.ops = &version_vfile_ops,
+};
+
+static int faults_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	int cpu, trap;
+
+	xnvfile_puts(it, "TRAP ");
+
+	for_each_realtime_cpu(cpu)
+		xnvfile_printf(it, "        CPU%d", cpu);
+
+	for (trap = 0; cobalt_machine.fault_labels[trap]; trap++) {
+		if (*cobalt_machine.fault_labels[trap] == '\0')
+			continue;
+
+		xnvfile_printf(it, "\n%3d: ", trap);
+
+		for_each_realtime_cpu(cpu)
+			xnvfile_printf(it, "%12u",
+				       per_cpu(cobalt_machine_cpudata, cpu).faults[trap]);
+
+		xnvfile_printf(it, "    (%s)",
+			       cobalt_machine.fault_labels[trap]);
+	}
+
+	xnvfile_putc(it, '\n');
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops faults_vfile_ops = {
+	.show = faults_vfile_show,
+};
+
+static struct xnvfile_regular faults_vfile = {
+	.ops = &faults_vfile_ops,
+};
+
+void xnprocfs_cleanup_tree(void)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+	xnvfile_destroy_regular(&lock_vfile);
+#endif
+	xnvfile_destroy_dir(&cobalt_debug_vfroot);
+#endif /* XENO_OPT_DEBUG */
+	xnvfile_destroy_regular(&faults_vfile);
+	xnvfile_destroy_regular(&version_vfile);
+	xnvfile_destroy_regular(&latency_vfile);
+	xnintr_cleanup_proc();
+	xnheap_cleanup_proc();
+	xnclock_cleanup_proc();
+	xnsched_cleanup_proc();
+	xnvfile_destroy_root();
+}
+
+int __init xnprocfs_init_tree(void)
+{
+	int ret;
+
+	ret = xnvfile_init_root();
+	if (ret)
+		return ret;
+
+	ret = xnsched_init_proc();
+	if (ret)
+		return ret;
+
+	xnclock_init_proc();
+	xnheap_init_proc();
+	xnintr_init_proc();
+	xnvfile_init_regular("latency", &latency_vfile, &cobalt_vfroot);
+	xnvfile_init_regular("version", &version_vfile, &cobalt_vfroot);
+	xnvfile_init_regular("faults", &faults_vfile, &cobalt_vfroot);
+#ifdef CONFIG_XENO_OPT_DEBUG
+	xnvfile_init_dir("debug", &cobalt_debug_vfroot, &cobalt_vfroot);
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+	xnvfile_init_regular("lock", &lock_vfile, &cobalt_debug_vfroot);
+#endif
+#endif
+
+	return 0;
+}
+++ linux-patched/kernel/xenomai/sched-sporadic.c	2022-03-21 12:58:28.879893935 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/vfile.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/uapi/sched.h>
+
+#define MAX_REPLENISH CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL
+
+static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+
+static inline void sporadic_note_late_drop(struct xnsched *sched)
+{
+	/*
+	 * This code should pull the break when a misconfigured
+	 * sporadic thread is late on its drop date for more than a
+	 * hundred times in a row. This normally reveals a time budget
+	 * which is too tight.
+	 */
+	XENO_BUG_ON(COBALT, ++sched->pss.drop_retries > 100);
+}
+
+static inline void sporadic_note_valid_drop(struct xnsched *sched)
+{
+	sched->pss.drop_retries = 0;
+}
+
+#else /* !CONFIG_XENO_OPT_DEBUG_COBALT */
+
+static inline void sporadic_note_late_drop(struct xnsched *sched)
+{
+}
+
+static inline void sporadic_note_valid_drop(struct xnsched *sched)
+{
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_COBALT */
+
+static inline xnticks_t sporadic_diff_time(xnticks_t start, xnticks_t end)
+{
+	xnsticks_t d = (xnsticks_t)(end - start);
+	return unlikely(d < 0) ? -d : d;
+}
+
+static void sporadic_drop_handler(struct xntimer *timer)
+{
+	struct xnsched_sporadic_data *pss;
+	union xnsched_policy_param p;
+	struct xnthread *thread;
+
+	/*
+	 * XXX: this code will work properly regardless of
+	 * primary/secondary mode issues.
+	 */
+	pss = container_of(timer, struct xnsched_sporadic_data, drop_timer);
+	thread = pss->thread;
+
+	sporadic_post_recharge(thread, pss->budget);
+
+	if (pss->budget == 0 && thread->cprio > pss->param.low_prio) {
+		if (pss->param.low_prio < 0)
+			/*
+			 * Special case: low_prio == -1, we want the
+			 * thread to suspend until a replenishment
+			 * happens.
+			 */
+			xnthread_suspend(thread, XNHELD,
+					 XN_INFINITE, XN_RELATIVE, NULL);
+		else {
+			p.pss.init_budget = 0;
+			p.pss.current_prio = pss->param.low_prio;
+			/* Move sporadic thread to the background. */
+			__xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+		}
+	}
+}
+
+static void sporadic_schedule_drop(struct xnthread *thread)
+{
+	xnticks_t now = xnclock_read_monotonic(&nkclock);
+	struct xnsched_sporadic_data *pss = thread->pss;
+	int ret;
+
+	pss->resume_date = now;
+	/*
+	 * Assuming this timer should not fire that often unless the
+	 * monitored thread behaves badly, we don't pin it on the CPU
+	 * the thread is running, trading cycles at firing time
+	 * against cycles when arming the timer.
+	 */
+	xntimer_set_affinity(&pss->drop_timer, thread->sched);
+	ret = xntimer_start(&pss->drop_timer, now + pss->budget,
+			    XN_INFINITE, XN_ABSOLUTE);
+	if (ret == -ETIMEDOUT) {
+		sporadic_note_late_drop(thread->sched);
+		sporadic_drop_handler(&pss->drop_timer);
+	} else
+		sporadic_note_valid_drop(thread->sched);
+}
+
+static void sporadic_replenish_handler(struct xntimer *timer)
+{
+	struct xnsched_sporadic_data *pss;
+	union xnsched_policy_param p;
+	struct xnthread *thread;
+	xnticks_t now;
+	int r, ret;
+
+	pss = container_of(timer, struct xnsched_sporadic_data, repl_timer);
+	thread = pss->thread;
+	XENO_BUG_ON(COBALT, pss->repl_pending <= 0);
+
+retry:
+	now = xnclock_read_monotonic(&nkclock);
+
+	do {
+		r = pss->repl_out;
+		if ((xnsticks_t)(now - pss->repl_data[r].date) <= 0)
+			break;
+		pss->budget += pss->repl_data[r].amount;
+		if (pss->budget > pss->param.init_budget)
+			pss->budget = pss->param.init_budget;
+		pss->repl_out = (r + 1) % MAX_REPLENISH;
+	} while(--pss->repl_pending > 0);
+
+	if (pss->repl_pending > 0) {
+		xntimer_set_affinity(&pss->repl_timer, thread->sched);
+		ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date,
+				    XN_INFINITE, XN_ABSOLUTE);
+		if (ret == -ETIMEDOUT)
+			goto retry; /* This plugs a tiny race. */
+	}
+
+	if (pss->budget == 0)
+		return;
+
+	if (xnthread_test_state(thread, XNHELD))
+		xnthread_resume(thread, XNHELD);
+	else if (thread->cprio < pss->param.normal_prio) {
+		p.pss.init_budget = 0;
+		p.pss.current_prio = pss->param.normal_prio;
+		/* Move sporadic thread to the foreground. */
+		__xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+	}
+
+	/*
+	 * XXX: we have to reset the drop timer in case we preempted
+	 * the thread which just got a budget increase.
+	 */
+	if (thread->sched->curr == thread)
+		sporadic_schedule_drop(thread);
+}
+
+static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	int r, ret;
+
+	if (pss->repl_pending >= pss->param.max_repl)
+		return;
+
+	if (budget > pss->budget) {
+		budget = pss->budget;
+		pss->budget = 0;
+	} else
+		pss->budget -= budget;
+
+	r = pss->repl_in;
+	pss->repl_data[r].date = pss->resume_date + pss->param.repl_period;
+	pss->repl_data[r].amount = budget;
+	pss->repl_in = (r + 1) % MAX_REPLENISH;
+
+	if (pss->repl_pending++ == 0) {
+		xntimer_set_affinity(&pss->repl_timer, thread->sched);
+		ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date,
+				    XN_INFINITE, XN_ABSOLUTE);
+		/*
+		 * The following case should not happen unless the
+		 * initial budget value is inappropriate, but let's
+		 * handle it anyway.
+		 */
+		if (ret == -ETIMEDOUT)
+			sporadic_replenish_handler(&pss->repl_timer);
+	}
+}
+
+static void sporadic_suspend_activity(struct xnthread *thread)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	xnticks_t budget, now;
+
+	if (pss->budget > 0) {
+		xntimer_stop(&pss->drop_timer);
+		now = xnclock_read_monotonic(&nkclock);
+		budget = sporadic_diff_time(now, pss->resume_date);
+		sporadic_post_recharge(thread, budget);
+	}
+}
+
+static inline void sporadic_resume_activity(struct xnthread *thread)
+{
+	if (thread->pss->budget > 0)
+		sporadic_schedule_drop(thread);
+}
+
+static void xnsched_sporadic_init(struct xnsched *sched)
+{
+	/*
+	 * We litterally stack the sporadic scheduler on top of the RT
+	 * one, reusing its run queue directly. This way, RT and
+	 * sporadic threads are merged into the same runqueue and thus
+	 * share the same priority scale, with the addition of budget
+	 * management for the sporadic ones.
+	 */
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	sched->pss.drop_retries = 0;
+#endif
+}
+
+static bool xnsched_sporadic_setparam(struct xnthread *thread,
+				      const union xnsched_policy_param *p)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	bool effective;
+
+	xnthread_clear_state(thread, XNWEAK);
+	effective = xnsched_set_effective_priority(thread, p->pss.current_prio);
+
+	/*
+	 * We use the budget information to determine whether we got
+	 * here from one of our internal calls to
+	 * xnthread_set_schedparam(), in which case we don't want to
+	 * update the scheduling parameters, but only set the
+	 * effective priority.
+	 */
+	if (p->pss.init_budget > 0) {
+		pss->param = p->pss;
+		pss->budget = p->pss.init_budget;
+		pss->repl_in = 0;
+		pss->repl_out = 0;
+		pss->repl_pending = 0;
+		if (effective && thread == thread->sched->curr) {
+			xntimer_stop(&pss->drop_timer);
+			sporadic_schedule_drop(thread);
+		}
+	}
+
+	return effective;
+}
+
+static void xnsched_sporadic_getparam(struct xnthread *thread,
+				      union xnsched_policy_param *p)
+{
+	p->pss = thread->pss->param;
+	p->pss.current_prio = thread->cprio;
+}
+
+static void xnsched_sporadic_trackprio(struct xnthread *thread,
+				       const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->pss.current_prio;
+	else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_sporadic_protectprio(struct xnthread *thread, int prio)
+{
+	if (prio > XNSCHED_SPORADIC_MAX_PRIO)
+		prio = XNSCHED_SPORADIC_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_sporadic_chkparam(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	if (p->pss.low_prio != -1 &&
+	    (p->pss.low_prio < XNSCHED_SPORADIC_MIN_PRIO ||
+	     p->pss.low_prio > XNSCHED_SPORADIC_MAX_PRIO))
+		return -EINVAL;
+
+	if (p->pss.normal_prio < XNSCHED_SPORADIC_MIN_PRIO ||
+	    p->pss.normal_prio > XNSCHED_SPORADIC_MAX_PRIO)
+		return -EINVAL;
+
+	if (p->pss.init_budget == 0)
+		return -EINVAL;
+
+	if (p->pss.current_prio != p->pss.normal_prio)
+		return -EINVAL;
+
+	if (p->pss.repl_period < p->pss.init_budget)
+		return -EINVAL;
+
+	if (p->pss.normal_prio <= p->pss.low_prio)
+		return -EINVAL;
+
+	if (p->pss.max_repl < 1 || p->pss.max_repl > MAX_REPLENISH)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xnsched_sporadic_declare(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_sporadic_data *pss;
+
+	pss = xnmalloc(sizeof(*pss));
+	if (pss == NULL)
+		return -ENOMEM;
+
+	xntimer_init(&pss->repl_timer, &nkclock, sporadic_replenish_handler,
+		     thread->sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&pss->repl_timer, "pss-replenish");
+	xntimer_init(&pss->drop_timer, &nkclock, sporadic_drop_handler,
+		     thread->sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&pss->drop_timer, "pss-drop");
+
+	thread->pss = pss;
+	pss->thread = thread;
+
+	return 0;
+}
+
+static void xnsched_sporadic_forget(struct xnthread *thread)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+
+	xntimer_destroy(&pss->repl_timer);
+	xntimer_destroy(&pss->drop_timer);
+	xnfree(pss);
+	thread->pss = NULL;
+}
+
+static void xnsched_sporadic_enqueue(struct xnthread *thread)
+{
+	__xnsched_rt_enqueue(thread);
+}
+
+static void xnsched_sporadic_dequeue(struct xnthread *thread)
+{
+	__xnsched_rt_dequeue(thread);
+}
+
+static void xnsched_sporadic_requeue(struct xnthread *thread)
+{
+	__xnsched_rt_requeue(thread);
+}
+
+static struct xnthread *xnsched_sporadic_pick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr, *next;
+
+	next = xnsched_getq(&sched->rt.runnable);
+	if (next == NULL)
+		goto swap;
+
+	if (curr == next)
+		return next;
+
+	/* Arm the drop timer for an incoming sporadic thread. */
+	if (next->pss)
+		sporadic_resume_activity(next);
+swap:
+	/*
+	 * A non-sporadic outgoing thread is having a priority
+	 * inheritance boost, so apply an infinite time budget as we
+	 * want it to release the claimed resource asap. Otherwise,
+	 * clear the drop timer, then schedule a replenishment
+	 * operation.
+	 */
+	if (curr->pss)
+		sporadic_suspend_activity(curr);
+
+	return next;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_sporadic_vfroot;
+
+struct vfile_sched_sporadic_priv {
+	int nrthreads;
+	struct xnthread *curr;
+};
+
+struct vfile_sched_sporadic_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int current_prio;
+	int low_prio;
+	int normal_prio;
+	xnticks_t period;
+	xnticks_t timeout;
+	xnticks_t budget;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops;
+
+static struct xnvfile_snapshot vfile_sched_sporadic = {
+	.privsz = sizeof(struct vfile_sched_sporadic_priv),
+	.datasz = sizeof(struct vfile_sched_sporadic_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_sporadic_ops,
+};
+
+static int vfile_sched_sporadic_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_sporadic.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_sporadic_next(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_sporadic_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_sporadic)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->current_prio = thread->cprio;
+	p->low_prio = thread->pss->param.low_prio;
+	p->normal_prio = thread->pss->param.normal_prio;
+	p->period = xnthread_get_period(thread);
+	p->budget = thread->pss->param.init_budget;
+
+	return 1;
+}
+
+static int vfile_sched_sporadic_show(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	char lpbuf[16], npbuf[16], ptbuf[16], btbuf[16];
+	struct vfile_sched_sporadic_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-4s %-4s  %-10s %-10s %s\n",
+			       "CPU", "PID", "LPRI", "NPRI", "BUDGET",
+			       "PERIOD", "NAME");
+	else {
+		ksformat(lpbuf, sizeof(lpbuf), "%3d%c",
+			 p->low_prio, p->current_prio == p->low_prio ? '*' : ' ');
+
+		ksformat(npbuf, sizeof(npbuf), "%3d%c",
+			 p->normal_prio, p->current_prio == p->normal_prio ? '*' : ' ');
+
+		xntimer_format_time(p->period, ptbuf, sizeof(ptbuf));
+		xntimer_format_time(p->budget, btbuf, sizeof(btbuf));
+
+		xnvfile_printf(it,
+			       "%3u  %-6d %-4s %-4s  %-10s %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       lpbuf,
+			       npbuf,
+			       btbuf,
+			       ptbuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops = {
+	.rewind = vfile_sched_sporadic_rewind,
+	.next = vfile_sched_sporadic_next,
+	.show = vfile_sched_sporadic_show,
+};
+
+static int xnsched_sporadic_init_vfile(struct xnsched_class *schedclass,
+				       struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name,
+			       &sched_sporadic_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_sporadic,
+				     &sched_sporadic_vfroot);
+}
+
+static void xnsched_sporadic_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_sporadic);
+	xnvfile_destroy_dir(&sched_sporadic_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_sporadic = {
+	.sched_init		=	xnsched_sporadic_init,
+	.sched_enqueue		=	xnsched_sporadic_enqueue,
+	.sched_dequeue		=	xnsched_sporadic_dequeue,
+	.sched_requeue		=	xnsched_sporadic_requeue,
+	.sched_pick		=	xnsched_sporadic_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	NULL,
+	.sched_chkparam		=	xnsched_sporadic_chkparam,
+	.sched_setparam		=	xnsched_sporadic_setparam,
+	.sched_getparam		=	xnsched_sporadic_getparam,
+	.sched_trackprio	=	xnsched_sporadic_trackprio,
+	.sched_protectprio	=	xnsched_sporadic_protectprio,
+	.sched_declare		=	xnsched_sporadic_declare,
+	.sched_forget		=	xnsched_sporadic_forget,
+	.sched_kick		=	NULL,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_sporadic_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_sporadic_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(3),
+	.policy			=	SCHED_SPORADIC,
+	.name			=	"pss"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_sporadic);
+++ linux-patched/kernel/xenomai/vfile.c	2022-03-21 12:58:28.875893974 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/init.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/vfile.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_vfile Virtual file services
+ *
+ * Virtual files provide a mean to export Xenomai object states to
+ * user-space, based on common kernel interfaces.  This encapsulation
+ * is aimed at:
+ *
+ * - supporting consistent collection of very large record-based
+ * output, without encurring latency peaks for undergoing real-time
+ * activities.
+ *
+ * - in the future, hiding discrepancies between linux kernel
+ * releases, regarding the proper way to export kernel object states
+ * to userland, either via the /proc interface or by any other mean.
+ *
+ * This virtual file implementation offers record-based read support
+ * based on seq_files, single-buffer write support, directory and link
+ * handling, all visible from the /proc namespace.
+ *
+ * The vfile support exposes four filesystem object types:
+ *
+ * - snapshot-driven file (struct xnvfile_snapshot). This is commonly
+ * used to export real-time object states via the /proc filesystem. To
+ * minimize the latency involved in protecting the vfile routines from
+ * changes applied by real-time code on such objects, a snapshot of
+ * the data to output is first taken under proper locking, before the
+ * collected data is formatted and sent out in a lockless manner.
+ *
+ * Because a large number of records may have to be output, the data
+ * collection phase is not strictly atomic as a whole, but only
+ * protected at record level. The vfile implementation can be notified
+ * of updates to the underlying data set, and restart the collection
+ * from scratch until the snapshot is fully consistent.
+ *
+ * - regular sequential file (struct xnvfile_regular). This is
+ * basically an encapsulated sequential file object as available from
+ * the host kernel (i.e. seq_file), with a few additional features to
+ * make it more handy in a Xenomai environment, like implicit locking
+ * support and shortened declaration for simplest, single-record
+ * output.
+ *
+ * - virtual link (struct xnvfile_link). This is a symbolic link
+ * feature integrated with the vfile semantics. The link target is
+ * computed dynamically at creation time from a user-given helper
+ * routine.
+ *
+ * - virtual directory (struct xnvfile_directory). A directory object,
+ * which can be used to create a hierarchy for ordering a set of vfile
+ * objects.
+ *
+ *@{*/
+
+/**
+ * @var struct xnvfile_directory cobalt_vfroot
+ * @brief Xenomai vfile root directory
+ *
+ * This vdir maps the /proc/xenomai directory. It can be used to
+ * create a hierarchy of Xenomai-related vfiles under this root.
+ */
+struct xnvfile_directory cobalt_vfroot;
+EXPORT_SYMBOL_GPL(cobalt_vfroot);
+
+static struct xnvfile_directory sysroot;
+
+static void *vfile_snapshot_start(struct seq_file *seq, loff_t *offp)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	loff_t pos = *offp;
+
+	if (pos > it->nrdata)
+		return NULL;
+
+	if (pos == 0)
+		return SEQ_START_TOKEN;
+
+	return it->databuf + (pos - 1) * it->vfile->datasz;
+}
+
+static void *vfile_snapshot_next(struct seq_file *seq, void *v, loff_t *offp)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	loff_t pos = *offp;
+
+	++*offp;
+
+	if (pos >= it->nrdata)
+		return NULL;
+
+	return it->databuf + pos * it->vfile->datasz;
+}
+
+static void vfile_snapshot_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int vfile_snapshot_show(struct seq_file *seq, void *v)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	void *data = v == SEQ_START_TOKEN ? NULL : v;
+	int ret;
+
+	ret = it->vfile->ops->show(it, data);
+
+	return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
+}
+
+static struct seq_operations vfile_snapshot_ops = {
+	.start = vfile_snapshot_start,
+	.next = vfile_snapshot_next,
+	.stop = vfile_snapshot_stop,
+	.show = vfile_snapshot_show
+};
+
+static void vfile_snapshot_free(struct xnvfile_snapshot_iterator *it, void *buf)
+{
+	kfree(buf);
+}
+
+static int vfile_snapshot_open(struct inode *inode, struct file *file)
+{
+	struct xnvfile_snapshot *vfile = PDE_DATA(inode);
+	struct xnvfile_snapshot_ops *ops = vfile->ops;
+	struct xnvfile_snapshot_iterator *it;
+	int revtag, ret, nrdata;
+	struct seq_file *seq;
+	caddr_t data;
+
+	WARN_ON_ONCE(file->private_data != NULL);
+
+	if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
+		return -EACCES;
+
+	/*
+	 * Make sure to create the seq_file backend only when reading
+	 * from the v-file is possible.
+	 */
+	if ((file->f_mode & FMODE_READ) == 0) {
+		file->private_data = NULL;
+		return 0;
+	}
+
+	if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
+		return -EBUSY;
+
+	it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
+	if (it == NULL)
+		return -ENOMEM;
+
+	it->vfile = vfile;
+	xnvfile_file(vfile) = file;
+
+	ret = vfile->entry.lockops->get(&vfile->entry);
+	if (ret)
+		goto fail;
+redo:
+	/*
+	 * The ->rewind() method is optional; there may be cases where
+	 * we don't have to take an atomic snapshot of the v-file
+	 * contents before proceeding. In case ->rewind() detects a
+	 * stale backend object, it can force us to bail out.
+	 *
+	 * If present, ->rewind() may return a strictly positive
+	 * value, indicating how many records at most may be returned
+	 * by ->next(). We use this hint to allocate the snapshot
+	 * buffer, in case ->begin() is not provided. The size of this
+	 * buffer would then be vfile->datasz * hint value.
+	 *
+	 * If ->begin() is given, we always expect the latter do the
+	 * allocation for us regardless of the hint value. Otherwise,
+	 * a NULL return from ->rewind() tells us that the vfile won't
+	 * output any snapshot data via ->show().
+	 */
+	nrdata = 0;
+	if (ops->rewind) {
+		nrdata = ops->rewind(it);
+		if (nrdata < 0) {
+			ret = nrdata;
+			vfile->entry.lockops->put(&vfile->entry);
+			goto fail;
+		}
+	}
+	revtag = vfile->tag->rev;
+
+	vfile->entry.lockops->put(&vfile->entry);
+
+	/* Release the data buffer, in case we had to restart. */
+	if (it->databuf) {
+		it->endfn(it, it->databuf);
+		it->databuf = NULL;
+	}
+
+	/*
+	 * Having no record to output is fine, in which case ->begin()
+	 * shall return VFILE_SEQ_EMPTY if present. ->begin() may be
+	 * absent, meaning that no allocation is even required to
+	 * collect the records to output. NULL is kept for allocation
+	 * errors in all other cases.
+	 */
+	if (ops->begin) {
+		XENO_BUG_ON(COBALT, ops->end == NULL);
+		data = ops->begin(it);
+		if (data == NULL) {
+			kfree(it);
+			return -ENOMEM;
+		}
+		if (data != VFILE_SEQ_EMPTY) {
+			it->databuf = data;
+			it->endfn = ops->end;
+		}
+	} else if (nrdata > 0 && vfile->datasz > 0) {
+		/* We have a hint for auto-allocation. */
+		data = kmalloc(vfile->datasz * nrdata, GFP_KERNEL);
+		if (data == NULL) {
+			kfree(it);
+			return -ENOMEM;
+		}
+		it->databuf = data;
+		it->endfn = vfile_snapshot_free;
+	}
+
+	it->nrdata = 0;
+	data = it->databuf;
+	if (data == NULL)
+		goto done;
+
+	/*
+	 * Take a snapshot of the vfile contents, redo if the revision
+	 * tag of the scanned data set changed concurrently.
+	 */
+	for (;;) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			goto fail;
+		if (vfile->tag->rev != revtag)
+			goto redo;
+		ret = ops->next(it, data);
+		vfile->entry.lockops->put(&vfile->entry);
+		if (ret < 0)
+			goto fail;
+		if (ret == 0)
+			break;
+		if (ret != VFILE_SEQ_SKIP) {
+			data += vfile->datasz;
+			it->nrdata++;
+		}
+	}
+
+done:
+	ret = seq_open(file, &vfile_snapshot_ops);
+	if (ret)
+		goto fail;
+
+	seq = file->private_data;
+	it->seq = seq;
+	seq->private = it;
+	xnvfile_nref(vfile)++;
+
+	return 0;
+
+fail:
+	if (it->databuf)
+		it->endfn(it, it->databuf);
+	kfree(it);
+
+	return ret;
+}
+
+static int vfile_snapshot_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct xnvfile_snapshot_iterator *it;
+
+	if (seq) {
+		it = seq->private;
+		if (it) {
+			--xnvfile_nref(it->vfile);
+			XENO_BUG_ON(COBALT, it->vfile->entry.refcnt < 0);
+			if (it->databuf)
+				it->endfn(it, it->databuf);
+			kfree(it);
+		}
+
+		return seq_release(inode, file);
+	}
+
+	return 0;
+}
+
+ssize_t vfile_snapshot_write(struct file *file, const char __user *buf,
+			     size_t size, loff_t *ppos)
+{
+	struct xnvfile_snapshot *vfile =
+		PDE_DATA(file->f_path.dentry->d_inode);
+	struct xnvfile_input input;
+	ssize_t ret;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ret;
+	}
+
+	input.u_buf = buf;
+	input.size = size;
+	input.vfile = &vfile->entry;
+
+	ret = vfile->ops->store(&input);
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	return ret;
+}
+
+static const DEFINE_PROC_OPS(vfile_snapshot_fops,
+			vfile_snapshot_open,
+			vfile_snapshot_release,
+			seq_read,
+			vfile_snapshot_write);
+
+/**
+ * @fn int xnvfile_init_snapshot(const char *name, struct xnvfile_snapshot *vfile, struct xnvfile_directory *parent)
+ * @brief Initialize a snapshot-driven vfile.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vfile entry.
+ *
+ * @param vfile A pointer to a vfile descriptor to initialize
+ * from. The following fields in this structure should be filled in
+ * prior to call this routine:
+ *
+ * - .privsz is the size (in bytes) of the private data area to be
+ * reserved in the @ref snapshot_iterator "vfile iterator". A NULL
+ * value indicates that no private area should be reserved.
+ *
+ * - .datasz is the size (in bytes) of a single record to be collected
+ * by the @ref snapshot_next "next() handler" from the @ref
+ * snapshot_ops "operation descriptor".
+ *
+ * - .tag is a pointer to a mandatory vfile revision tag structure
+ * (struct xnvfile_rev_tag). This tag will be monitored for changes by
+ * the vfile core while collecting data to output, so that any update
+ * detected will cause the current snapshot data to be dropped, and
+ * the collection to restart from the beginning. To this end, any
+ * change to the data which may be part of the collected records,
+ * should also invoke xnvfile_touch() on the associated tag.
+ *
+ * - entry.lockops is a pointer to a @ref vfile_lockops "lock descriptor",
+ * defining the lock and unlock operations for the vfile. This pointer
+ * may be left to NULL, in which case the operations on the nucleus
+ * lock (i.e. nklock) will be used internally around calls to data
+ * collection handlers (see @ref snapshot_ops "operation descriptor").
+ *
+ * - .ops is a pointer to an @ref snapshot_ops "operation descriptor".
+ *
+ * @param parent A pointer to a virtual directory descriptor; the
+ * vfile entry will be created into this directory. If NULL, the /proc
+ * root directory will be used. /proc/xenomai is mapped on the
+ * globally available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual file entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+	int mode;
+
+	XENO_BUG_ON(COBALT, vfile->tag == NULL);
+
+	if (vfile->entry.lockops == NULL)
+		/* Defaults to nucleus lock */
+		vfile->entry.lockops = &xnvfile_nucleus_lock.ops;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	mode = vfile->ops->store ? 0644 : 0444;
+	ppde = parent->entry.pde;
+	pde = proc_create_data(name, mode, ppde, &vfile_snapshot_fops, vfile);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vfile->entry.pde = pde;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_snapshot);
+
+static void *vfile_regular_start(struct seq_file *seq, loff_t *offp)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	int ret;
+
+	it->pos = *offp;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	/*
+	 * If we have no begin() op, then we allow a single call only
+	 * to ->show(), by returning the start token once. Otherwise,
+	 * we are done.
+	 */
+	if (vfile->ops->begin == NULL)
+		return it->pos > 0 ? NULL : SEQ_START_TOKEN;
+
+	return vfile->ops->begin(it);
+}
+
+static void *vfile_regular_next(struct seq_file *seq, void *v, loff_t *offp)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	void *data;
+
+	it->pos = ++(*offp);
+
+	if (vfile->ops->next == NULL)
+		return NULL;
+
+	data = vfile->ops->next(it);
+	if (data == NULL)
+		return NULL;
+
+	return data;
+}
+
+static void vfile_regular_stop(struct seq_file *seq, void *v)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	if (vfile->ops->end)
+		vfile->ops->end(it);
+}
+
+static int vfile_regular_show(struct seq_file *seq, void *v)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	void *data = v == SEQ_START_TOKEN ? NULL : v;
+	int ret;
+
+	ret = vfile->ops->show(it, data);
+
+	return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
+}
+
+static struct seq_operations vfile_regular_ops = {
+	.start = vfile_regular_start,
+	.next = vfile_regular_next,
+	.stop = vfile_regular_stop,
+	.show = vfile_regular_show
+};
+
+static int vfile_regular_open(struct inode *inode, struct file *file)
+{
+	struct xnvfile_regular *vfile = PDE_DATA(inode);
+	struct xnvfile_regular_ops *ops = vfile->ops;
+	struct xnvfile_regular_iterator *it;
+	struct seq_file *seq;
+	int ret;
+
+	if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
+		return -EBUSY;
+
+	if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
+		return -EACCES;
+
+	if ((file->f_mode & FMODE_READ) == 0) {
+		file->private_data = NULL;
+		return 0;
+	}
+
+	it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
+	if (it == NULL)
+		return -ENOMEM;
+
+	it->vfile = vfile;
+	it->pos = -1;
+	xnvfile_file(vfile) = file;
+
+	if (ops->rewind) {
+		ret = ops->rewind(it);
+		if (ret) {
+		fail:
+			kfree(it);
+			return ret;
+		}
+	}
+
+	ret = seq_open(file, &vfile_regular_ops);
+	if (ret)
+		goto fail;
+
+	seq = file->private_data;
+	it->seq = seq;
+	seq->private = it;
+	xnvfile_nref(vfile)++;
+
+	return 0;
+}
+
+static int vfile_regular_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct xnvfile_regular_iterator *it;
+
+	if (seq) {
+		it = seq->private;
+		if (it) {
+			--xnvfile_nref(it->vfile);
+			XENO_BUG_ON(COBALT, xnvfile_nref(it->vfile) < 0);
+			kfree(it);
+		}
+
+		return seq_release(inode, file);
+	}
+
+	return 0;
+}
+
+ssize_t vfile_regular_write(struct file *file, const char __user *buf,
+			    size_t size, loff_t *ppos)
+{
+	struct xnvfile_regular *vfile =
+		PDE_DATA(file->f_path.dentry->d_inode);
+	struct xnvfile_input input;
+	ssize_t ret;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ret;
+	}
+
+	input.u_buf = buf;
+	input.size = size;
+	input.vfile = &vfile->entry;
+
+	ret = vfile->ops->store(&input);
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	return ret;
+}
+
+static const DEFINE_PROC_OPS(vfile_regular_fops,
+			vfile_regular_open,
+			vfile_regular_release,
+			seq_read,
+			vfile_regular_write);
+
+/**
+ * @fn int xnvfile_init_regular(const char *name, struct xnvfile_regular *vfile, struct xnvfile_directory *parent)
+ * @brief Initialize a regular vfile.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vfile entry.
+ *
+ * @param vfile A pointer to a vfile descriptor to initialize
+ * from. The following fields in this structure should be filled in
+ * prior to call this routine:
+ *
+ * - .privsz is the size (in bytes) of the private data area to be
+ * reserved in the @ref regular_iterator "vfile iterator". A NULL
+ * value indicates that no private area should be reserved.
+ *
+ * - entry.lockops is a pointer to a @ref vfile_lockops "locking
+ * descriptor", defining the lock and unlock operations for the
+ * vfile. This pointer may be left to NULL, in which case no
+ * locking will be applied.
+ *
+ * - .ops is a pointer to an @ref regular_ops "operation descriptor".
+ *
+ * @param parent A pointer to a virtual directory descriptor; the
+ * vfile entry will be created into this directory. If NULL, the /proc
+ * root directory will be used. /proc/xenomai is mapped on the
+ * globally available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual file entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+	int mode;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	mode = vfile->ops->store ? 0644 : 0444;
+	ppde = parent->entry.pde;
+	pde = proc_create_data(name, mode, ppde, &vfile_regular_fops, vfile);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vfile->entry.pde = pde;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_regular);
+
+/**
+ * @fn int xnvfile_init_dir(const char *name, struct xnvfile_directory *vdir, struct xnvfile_directory *parent)
+ * @brief Initialize a virtual directory entry.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vdir entry.
+ *
+ * @param vdir A pointer to the virtual directory descriptor to
+ * initialize.
+ *
+ * @param parent A pointer to a virtual directory descriptor standing
+ * for the parent directory of the new vdir.  If NULL, the /proc root
+ * directory will be used. /proc/xenomai is mapped on the globally
+ * available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual directory entry cannot be
+ * created in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	ppde = parent->entry.pde;
+	pde = proc_mkdir(name, ppde);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vdir->entry.pde = pde;
+	vdir->entry.lockops = NULL;
+	vdir->entry.private = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_dir);
+
+/**
+ * @fn int xnvfile_init_link(const char *from, const char *to, struct xnvfile_link *vlink, struct xnvfile_directory *parent)
+ * @brief Initialize a virtual link entry.
+ *
+ * @param from The name which should appear in the pseudo-filesystem,
+ * identifying the vlink entry.
+ *
+ * @param to The target file name which should be referred to
+ * symbolically by @a name.
+ *
+ * @param vlink A pointer to the virtual link descriptor to
+ * initialize.
+ *
+ * @param parent A pointer to a virtual directory descriptor standing
+ * for the parent directory of the new vlink. If NULL, the /proc root
+ * directory will be used. /proc/xenomai is mapped on the globally
+ * available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual link entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	ppde = parent->entry.pde;
+	pde = proc_symlink(from, ppde, to);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vlink->entry.pde = pde;
+	vlink->entry.lockops = NULL;
+	vlink->entry.private = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_link);
+
+/**
+ * @fn void xnvfile_destroy(struct xnvfile *vfile)
+ * @brief Removes a virtual file entry.
+ *
+ * @param vfile A pointer to the virtual file descriptor to
+ * remove.
+ *
+ * @coretags{secondary-only}
+ */
+void xnvfile_destroy(struct xnvfile *vfile)
+{
+	proc_remove(vfile->pde);
+}
+EXPORT_SYMBOL_GPL(xnvfile_destroy);
+
+/**
+ * @fn ssize_t xnvfile_get_blob(struct xnvfile_input *input, void *data, size_t size)
+ * @brief Read in a data bulk written to the vfile.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_blob() retrieves this data as an untyped
+ * binary blob, and copies it back to the caller's buffer.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param data The address of the destination buffer to copy the input
+ * data to.
+ *
+ * @param size The maximum number of bytes to copy to the destination
+ * buffer. If @a size is larger than the actual data size, the input
+ * is truncated to @a size.
+ *
+ * @return The number of bytes read and copied to the destination
+ * buffer upon success. Otherwise, a negative error code is returned:
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size)
+{
+	ssize_t nbytes = input->size;
+
+	if (nbytes > size)
+		nbytes = size;
+
+	if (nbytes > 0 && copy_from_user(data, input->u_buf, nbytes))
+		return -EFAULT;
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_blob);
+
+/**
+ * @fn ssize_t xnvfile_get_string(struct xnvfile_input *input, char *s, size_t maxlen)
+ * @brief Read in a C-string written to the vfile.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_string() retrieves this data as a
+ * null-terminated character string, and copies it back to the
+ * caller's buffer.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param s The address of the destination string buffer to copy the
+ * input data to.
+ *
+ * @param maxlen The maximum number of bytes to copy to the
+ * destination buffer, including the ending null character. If @a
+ * maxlen is larger than the actual string length, the input is
+ * truncated to @a maxlen.
+ *
+ * @return The number of characters read upon success. Otherwise, a
+ * negative error code is returned:
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen)
+{
+	ssize_t nbytes, eol;
+
+	if (maxlen < 1)
+		return -EINVAL;
+
+	nbytes = xnvfile_get_blob(input, s, maxlen - 1);
+	if (nbytes < 0)
+		return nbytes;
+
+	eol = nbytes;
+	if (eol > 0 && s[eol - 1] == '\n')
+		eol--;
+
+	s[eol] = '\0';
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_string);
+
+/**
+ * @fn ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
+ * @brief Evaluate the string written to the vfile as a long integer.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_integer() retrieves and interprets this
+ * data as a long integer, and copies the resulting value back to @a
+ * valp.
+ *
+ * The long integer can be expressed in decimal, octal or hexadecimal
+ * bases depending on the prefix found.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param valp The address of a long integer variable to receive the
+ * value.
+ *
+ * @return The number of characters read while evaluating the input as
+ * a long integer upon success. Otherwise, a negative error code is
+ * returned:
+ *
+ * - -EINVAL indicates a parse error on the input stream; the written
+ * text cannot be evaluated as a long integer.
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
+{
+	char *end, buf[32];
+	ssize_t nbytes;
+	long val;
+
+	nbytes = xnvfile_get_blob(input, buf, sizeof(buf) - 1);
+	if (nbytes < 0)
+		return nbytes;
+
+	if (nbytes == 0)
+		return -EINVAL;
+
+	buf[nbytes] = '\0';
+	val = simple_strtol(buf, &end, 0);
+
+	if (*end != '\0' && !isspace(*end))
+		return -EINVAL;
+
+	*valp = val;
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_integer);
+
+int __vfile_hostlock_get(struct xnvfile *vfile)
+{
+	struct xnvfile_hostlock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
+	mutex_lock(&lc->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__vfile_hostlock_get);
+
+void __vfile_hostlock_put(struct xnvfile *vfile)
+{
+	struct xnvfile_hostlock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
+	mutex_unlock(&lc->mutex);
+}
+EXPORT_SYMBOL_GPL(__vfile_hostlock_put);
+
+static int __vfile_nklock_get(struct xnvfile *vfile)
+{
+	struct xnvfile_nklock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
+	xnlock_get_irqsave(&nklock, lc->s);
+
+	return 0;
+}
+
+static void __vfile_nklock_put(struct xnvfile *vfile)
+{
+	struct xnvfile_nklock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
+	xnlock_put_irqrestore(&nklock, lc->s);
+}
+
+struct xnvfile_nklock_class xnvfile_nucleus_lock = {
+	.ops = {
+		.get = __vfile_nklock_get,
+		.put = __vfile_nklock_put,
+	},
+};
+
+int __init xnvfile_init_root(void)
+{
+	struct xnvfile_directory *vdir = &cobalt_vfroot;
+	struct proc_dir_entry *pde;
+
+	pde = proc_mkdir("xenomai", NULL);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vdir->entry.pde = pde;
+	vdir->entry.lockops = NULL;
+	vdir->entry.private = NULL;
+
+	return 0;
+}
+
+void xnvfile_destroy_root(void)
+{
+	cobalt_vfroot.entry.pde = NULL;
+	remove_proc_entry("xenomai", NULL);
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/init.c	2022-03-21 12:58:28.872894003 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <xenomai/version.h>
+#include <pipeline/machine.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/pipe.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/vdso.h>
+#include <rtdm/fd.h>
+#include "rtdm/internal.h"
+#include "posix/internal.h"
+#include "procfs.h"
+
+/**
+ * @defgroup cobalt Cobalt
+ *
+ * Cobalt supplements the native Linux kernel in dual kernel
+ * configurations. It deals with all time-critical activities, such as
+ * handling interrupts, and scheduling real-time threads. The Cobalt
+ * kernel has higher priority over all the native kernel activities.
+ *
+ * Cobalt provides an implementation of the POSIX and RTDM interfaces
+ * based on a set of generic RTOS building blocks.
+ */
+
+#ifdef CONFIG_SMP
+static unsigned long supported_cpus_arg = -1;
+module_param_named(supported_cpus, supported_cpus_arg, ulong, 0444);
+#endif /* CONFIG_SMP */
+
+static unsigned long sysheap_size_arg;
+module_param_named(sysheap_size, sysheap_size_arg, ulong, 0444);
+
+static char init_state_arg[16] = "enabled";
+module_param_string(state, init_state_arg, sizeof(init_state_arg), 0444);
+
+static BLOCKING_NOTIFIER_HEAD(state_notifier_list);
+
+struct cobalt_pipeline cobalt_pipeline;
+EXPORT_SYMBOL_GPL(cobalt_pipeline);
+
+DEFINE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+EXPORT_PER_CPU_SYMBOL_GPL(cobalt_machine_cpudata);
+
+atomic_t cobalt_runstate = ATOMIC_INIT(COBALT_STATE_WARMUP);
+EXPORT_SYMBOL_GPL(cobalt_runstate);
+
+struct cobalt_ppd cobalt_kernel_ppd = {
+	.exe_path = "vmlinux",
+};
+EXPORT_SYMBOL_GPL(cobalt_kernel_ppd);
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+#define boot_debug_notice "[DEBUG]"
+#else
+#define boot_debug_notice ""
+#endif
+
+#ifdef CONFIG_ENABLE_DEFAULT_TRACERS
+#define boot_evt_trace_notice "[ETRACE]"
+#else
+#define boot_evt_trace_notice ""
+#endif
+
+#define boot_state_notice						\
+	({								\
+		realtime_core_state() == COBALT_STATE_STOPPED ?		\
+			"[STOPPED]" : "";				\
+	})
+
+void cobalt_add_state_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&state_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_add_state_chain);
+
+void cobalt_remove_state_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&state_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_remove_state_chain);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate)
+{
+	blocking_notifier_call_chain(&state_notifier_list, newstate, NULL);
+}
+EXPORT_SYMBOL_GPL(cobalt_call_state_chain);
+
+static void sys_shutdown(void)
+{
+	void *membase;
+
+	pipeline_uninstall_tick_proxy();
+	xnsched_destroy_all();
+	xnregistry_cleanup();
+	membase = xnheap_get_membase(&cobalt_heap);
+	xnheap_destroy(&cobalt_heap);
+	xnheap_vfree(membase);
+}
+
+static struct {
+	const char *label;
+	enum cobalt_run_states state;
+} init_states[] __initdata = {
+	{ "disabled", COBALT_STATE_DISABLED },
+	{ "stopped", COBALT_STATE_STOPPED },
+	{ "enabled", COBALT_STATE_WARMUP },
+};
+
+static void __init setup_init_state(void)
+{
+	static char warn_bad_state[] __initdata =
+		XENO_WARNING "invalid init state '%s'\n";
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(init_states); n++)
+		if (strcmp(init_states[n].label, init_state_arg) == 0) {
+			set_realtime_core_state(init_states[n].state);
+			return;
+		}
+
+	printk(warn_bad_state, init_state_arg);
+}
+
+static __init int sys_init(void)
+{
+	void *heapaddr;
+	int ret;
+
+	if (sysheap_size_arg == 0)
+		sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
+
+	heapaddr = xnheap_vmalloc(sysheap_size_arg * 1024);
+	if (heapaddr == NULL ||
+	    xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)) {
+		return -ENOMEM;
+	}
+	xnheap_set_name(&cobalt_heap, "system heap");
+
+	xnsched_init_all();
+
+	xnregistry_init();
+
+	/*
+	 * If starting in stopped mode, do all initializations, but do
+	 * not enable the core timer.
+	 */
+	if (realtime_core_state() == COBALT_STATE_WARMUP) {
+		ret = pipeline_install_tick_proxy();
+		if (ret) {
+			sys_shutdown();
+			return ret;
+		}
+		set_realtime_core_state(COBALT_STATE_RUNNING);
+	}
+
+	return 0;
+}
+
+static int __init xenomai_init(void)
+{
+	int ret, __maybe_unused cpu;
+
+	setup_init_state();
+
+	if (!realtime_core_enabled()) {
+		printk(XENO_WARNING "disabled on kernel command line\n");
+		return 0;
+	}
+
+#ifdef CONFIG_SMP
+	cpumask_clear(&xnsched_realtime_cpus);
+	for_each_online_cpu(cpu) {
+		if (supported_cpus_arg & (1UL << cpu))
+			cpumask_set_cpu(cpu, &xnsched_realtime_cpus);
+	}
+	if (cpumask_empty(&xnsched_realtime_cpus)) {
+		printk(XENO_WARNING "disabled via empty real-time CPU mask\n");
+		set_realtime_core_state(COBALT_STATE_DISABLED);
+		return 0;
+	}
+	if (!cpumask_test_cpu(0, &xnsched_realtime_cpus)) {
+		printk(XENO_ERR "CPU 0 is missing in real-time CPU mask\n");
+		set_realtime_core_state(COBALT_STATE_DISABLED);
+		return -EINVAL;
+	}
+	cobalt_cpu_affinity = xnsched_realtime_cpus;
+#endif /* CONFIG_SMP */
+
+	xnsched_register_classes();
+
+	ret = xnprocfs_init_tree();
+	if (ret)
+		goto fail;
+
+	ret = pipeline_init();
+	if (ret)
+		goto cleanup_proc;
+
+	xnintr_mount();
+
+	ret = xnpipe_mount();
+	if (ret)
+		goto cleanup_mach;
+
+	ret = xnselect_mount();
+	if (ret)
+		goto cleanup_pipe;
+
+	ret = sys_init();
+	if (ret)
+		goto cleanup_select;
+
+	ret = pipeline_late_init();
+	if (ret)
+		goto cleanup_sys;
+
+	ret = rtdm_init();
+	if (ret)
+		goto cleanup_sys;
+
+	ret = cobalt_init();
+	if (ret)
+		goto cleanup_rtdm;
+
+	rtdm_fd_init();
+
+	printk(XENO_INFO "Cobalt v%s %s%s%s%s\n",
+	       XENO_VERSION_STRING,
+	       boot_debug_notice,
+	       boot_lat_trace_notice,
+	       boot_evt_trace_notice,
+	       boot_state_notice);
+
+	return 0;
+
+cleanup_rtdm:
+	rtdm_cleanup();
+cleanup_sys:
+	sys_shutdown();
+cleanup_select:
+	xnselect_umount();
+cleanup_pipe:
+	xnpipe_umount();
+cleanup_mach:
+	pipeline_cleanup();
+cleanup_proc:
+	xnprocfs_cleanup_tree();
+fail:
+	set_realtime_core_state(COBALT_STATE_DISABLED);
+	printk(XENO_ERR "init failed, code %d\n", ret);
+
+	return ret;
+}
+device_initcall(xenomai_init);
+
+/**
+ * @ingroup cobalt
+ * @defgroup cobalt_core Cobalt kernel
+ *
+ * The Cobalt core is a co-kernel which supplements the Linux kernel
+ * for delivering real-time services with very low latency. It
+ * implements a set of generic RTOS building blocks, which the
+ * Cobalt/POSIX and Cobalt/RTDM APIs are based on.  Cobalt has higher
+ * priority over the Linux kernel activities.
+ *
+ * @{
+ *
+ * @page cobalt-core-tags Dual kernel service tags
+ *
+ * The Cobalt kernel services may be restricted to particular calling
+ * contexts, or entail specific side-effects. To describe this
+ * information, each service documented by this section bears a set of
+ * tags when applicable.
+ *
+ * The table below matches the tags used throughout the documentation
+ * with the description of their meaning for the caller.
+ *
+ * @par
+ * <b>Context tags</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Context on entry</TH></TR>
+ * <TR><TD>primary-only</TD>	<TD>Must be called from a Cobalt task in primary mode</TD></TR>
+ * <TR><TD>primary-timed</TD>	<TD>Requires a Cobalt task in primary mode if timed</TD></TR>
+ * <TR><TD>coreirq-only</TD>	<TD>Must be called from a Cobalt IRQ handler</TD></TR>
+ * <TR><TD>secondary-only</TD>	<TD>Must be called from a Cobalt task in secondary mode or regular Linux task</TD></TR>
+ * <TR><TD>rtdm-task</TD>	<TD>Must be called from a RTDM driver task</TD></TR>
+ * <TR><TD>mode-unrestricted</TD>	<TD>May be called from a Cobalt task in either primary or secondary mode</TD></TR>
+ * <TR><TD>task-unrestricted</TD>	<TD>May be called from a Cobalt or regular Linux task indifferently</TD></TR>
+ * <TR><TD>unrestricted</TD>	<TD>May be called from any context previously described</TD></TR>
+ * <TR><TD>atomic-entry</TD>	<TD>Caller must currently hold the big Cobalt kernel lock (nklock)</TD></TR>
+ * </TABLE>
+ *
+ * @par
+ * <b>Possible side-effects</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Description</TH></TR>
+ * <TR><TD>might-switch</TD>	<TD>The Cobalt kernel may switch context</TD></TR>
+ * </TABLE>
+ *
+ * @}
+ */
+++ linux-patched/kernel/xenomai/sched.c	2022-03-21 12:58:28.868894042 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/bufd.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/uapi/signal.h>
+#include <pipeline/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_sched Thread scheduling control
+ * @{
+ */
+
+DEFINE_PER_CPU(struct xnsched, nksched);
+EXPORT_PER_CPU_SYMBOL_GPL(nksched);
+
+cpumask_t cobalt_cpu_affinity = CPU_MASK_ALL;
+EXPORT_SYMBOL_GPL(cobalt_cpu_affinity);
+
+LIST_HEAD(nkthreadq);
+
+int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+static struct xnsched_class *xnsched_class_highest;
+
+#define for_each_xnsched_class(p) \
+   for (p = xnsched_class_highest; p; p = p->next)
+
+static void xnsched_register_class(struct xnsched_class *sched_class)
+{
+	sched_class->next = xnsched_class_highest;
+	xnsched_class_highest = sched_class;
+
+	/*
+	 * Classes shall be registered by increasing priority order,
+	 * idle first and up.
+	 */
+	XENO_BUG_ON(COBALT, sched_class->next &&
+		   sched_class->next->weight > sched_class->weight);
+
+	printk(XENO_INFO "scheduling class %s registered.\n", sched_class->name);
+}
+
+void xnsched_register_classes(void)
+{
+	xnsched_register_class(&xnsched_class_idle);
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	xnsched_register_class(&xnsched_class_weak);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	xnsched_register_class(&xnsched_class_tp);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	xnsched_register_class(&xnsched_class_sporadic);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	xnsched_register_class(&xnsched_class_quota);
+#endif
+	xnsched_register_class(&xnsched_class_rt);
+}
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+
+static unsigned long wd_timeout_arg = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT;
+module_param_named(watchdog_timeout, wd_timeout_arg, ulong, 0644);
+
+static inline xnticks_t get_watchdog_timeout(void)
+{
+	return wd_timeout_arg * 1000000000ULL;
+}
+
+/**
+ * @internal
+ * @fn void watchdog_handler(struct xntimer *timer)
+ * @brief Process watchdog ticks.
+ *
+ * This internal routine handles incoming watchdog triggers to detect
+ * software lockups. It forces the offending thread to stop
+ * monopolizing the CPU, either by kicking it out of primary mode if
+ * running in user space, or cancelling it if kernel-based.
+ *
+ * @coretags{coreirq-only, atomic-entry}
+ */
+static void watchdog_handler(struct xntimer *timer)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = sched->curr;
+
+	/*
+	 * CAUTION: The watchdog tick might have been delayed while we
+	 * were busy switching the CPU to secondary mode at the
+	 * trigger date eventually. Make sure that we are not about to
+	 * kick the incoming root thread.
+	 */
+	if (xnthread_test_state(curr, XNROOT))
+ 		return;
+
+	trace_cobalt_watchdog_signal(curr);
+
+	if (xnthread_test_state(curr, XNUSER)) {
+		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
+		       "'%s' signaled\n", xnsched_cpu(sched), curr->name);
+		xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
+	} else {
+		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
+		       "'%s' canceled\n", xnsched_cpu(sched), curr->name);
+		/*
+		 * On behalf on an IRQ handler, xnthread_cancel()
+		 * would go half way cancelling the preempted
+		 * thread. Therefore we manually raise XNKICKED to
+		 * cause the next call to xnthread_suspend() to return
+		 * early in XNBREAK condition, and XNCANCELD so that
+		 * @thread exits next time it invokes
+		 * xnthread_test_cancel().
+		 */
+		xnthread_set_info(curr, XNKICKED|XNCANCELD);
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+
+static void roundrobin_handler(struct xntimer *timer)
+{
+	struct xnsched *sched = container_of(timer, struct xnsched, rrbtimer);
+	xnsched_tick(sched);
+}
+
+static void xnsched_init(struct xnsched *sched, int cpu)
+{
+	char rrbtimer_name[XNOBJECT_NAME_LEN];
+	char htimer_name[XNOBJECT_NAME_LEN];
+	char root_name[XNOBJECT_NAME_LEN];
+	union xnsched_policy_param param;
+	struct xnthread_init_attr attr;
+	struct xnsched_class *p;
+
+#ifdef CONFIG_SMP
+	sched->cpu = cpu;
+	ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu);
+	ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu);
+	ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu);
+	cpumask_clear(&sched->resched);
+#else
+	strcpy(htimer_name, "[host-timer]");
+	strcpy(rrbtimer_name, "[rrb-timer]");
+	strcpy(root_name, "ROOT");
+#endif
+	for_each_xnsched_class(p) {
+		if (p->sched_init)
+			p->sched_init(sched);
+	}
+
+	sched->status = 0;
+	sched->lflags = XNIDLE;
+	sched->inesting = 0;
+	sched->curr = &sched->rootcb;
+
+	attr.flags = XNROOT | XNFPU;
+	attr.name = root_name;
+	attr.personality = &xenomai_personality;
+	attr.affinity = *cpumask_of(cpu);
+	param.idle.prio = XNSCHED_IDLE_PRIO;
+
+	__xnthread_init(&sched->rootcb, &attr,
+			sched, &xnsched_class_idle, &param);
+
+	/*
+	 * No direct handler here since the host timer processing is
+	 * postponed to xnintr_irq_handler(), as part of the interrupt
+	 * exit code.
+	 */
+	xntimer_init(&sched->htimer, &nkclock, NULL,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
+	xntimer_set_name(&sched->htimer, htimer_name);
+	xntimer_init(&sched->rrbtimer, &nkclock, roundrobin_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&sched->rrbtimer, rrbtimer_name);
+	xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO);
+
+	xnstat_exectime_set_current(sched, &sched->rootcb.stat.account);
+#ifdef CONFIG_XENO_ARCH_FPU
+	sched->fpuholder = &sched->rootcb;
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+	pipeline_init_root_tcb(&sched->rootcb);
+	list_add_tail(&sched->rootcb.glink, &nkthreadq);
+	cobalt_nrthreads++;
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_init(&sched->wdtimer, &nkclock, watchdog_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&sched->wdtimer, "[watchdog]");
+	xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+}
+
+void xnsched_init_all(void)
+{
+	struct xnsched *sched;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		sched = &per_cpu(nksched, cpu);
+		xnsched_init(sched, cpu);
+	}
+
+	pipeline_request_resched_ipi(__xnsched_run_handler);
+}
+
+static void xnsched_destroy(struct xnsched *sched)
+{
+	xntimer_destroy(&sched->htimer);
+	xntimer_destroy(&sched->rrbtimer);
+	xntimer_destroy(&sched->rootcb.ptimer);
+	xntimer_destroy(&sched->rootcb.rtimer);
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_destroy(&sched->wdtimer);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+}
+
+void xnsched_destroy_all(void)
+{
+	struct xnthread *thread, *tmp;
+	struct xnsched *sched;
+	int cpu;
+	spl_t s;
+
+	pipeline_free_resched_ipi();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* NOTE: &nkthreadq can't be empty (root thread(s)). */
+	list_for_each_entry_safe(thread, tmp, &nkthreadq, glink) {
+		if (!xnthread_test_state(thread, XNROOT))
+			xnthread_cancel(thread);
+	}
+
+	xnsched_run();
+
+	for_each_online_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		xnsched_destroy(sched);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static inline void set_thread_running(struct xnsched *sched,
+				      struct xnthread *thread)
+{
+	xnthread_clear_state(thread, XNREADY);
+	if (xnthread_test_state(thread, XNRRB))
+		xntimer_start(&sched->rrbtimer,
+			      thread->rrperiod, XN_INFINITE, XN_RELATIVE);
+	else
+		xntimer_stop(&sched->rrbtimer);
+}
+
+/* Must be called with nklock locked, interrupts off. */
+struct xnthread *xnsched_pick_next(struct xnsched *sched)
+{
+	struct xnsched_class *p __maybe_unused;
+	struct xnthread *curr = sched->curr;
+	struct xnthread *thread;
+
+	if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) {
+		/*
+		 * Do not preempt the current thread if it holds the
+		 * scheduler lock.
+		 */
+		if (curr->lock_count > 0) {
+			xnsched_set_self_resched(sched);
+			return curr;
+		}
+		/*
+		 * Push the current thread back to the run queue of
+		 * the scheduling class it belongs to, if not yet
+		 * linked to it (XNREADY tells us if it is).
+		 */
+		if (!xnthread_test_state(curr, XNREADY)) {
+			xnsched_requeue(curr);
+			xnthread_set_state(curr, XNREADY);
+		}
+	}
+
+	/*
+	 * Find the runnable thread having the highest priority among
+	 * all scheduling classes, scanned by decreasing priority.
+	 */
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+	for_each_xnsched_class(p) {
+		thread = p->sched_pick(sched);
+		if (thread) {
+			set_thread_running(sched, thread);
+			return thread;
+		}
+	}
+
+	return NULL; /* Never executed because of the idle class. */
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+	thread = xnsched_rt_pick(sched);
+	if (unlikely(thread == NULL))
+		thread = &sched->rootcb;
+
+	set_thread_running(sched, thread);
+
+	return thread;
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+}
+
+void xnsched_lock(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/* See comments in xnsched_run(), ___xnsched_run(). */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	if (sched->lflags & XNINIRQ)
+		return;
+
+	/*
+	 * CAUTION: The fast xnthread_current() accessor carries the
+	 * relevant lock nesting count only if current runs in primary
+	 * mode. Otherwise, if the caller is unknown or relaxed
+	 * Xenomai-wise, then we fall back to the root thread on the
+	 * current scheduler, which must be done with IRQs off.
+	 * Either way, we don't need to grab the super lock.
+	 */
+	XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) &&
+			  !hard_irqs_disabled());
+
+	curr->lock_count++;
+}
+EXPORT_SYMBOL_GPL(xnsched_lock);
+
+void xnsched_unlock(void)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) &&
+			  !hard_irqs_disabled());
+
+	if (sched->lflags & XNINIRQ)
+		return;
+
+	if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
+		return;
+
+	if (--curr->lock_count == 0) {
+		xnthread_clear_localinfo(curr, XNLBALERT);
+		xnsched_run();
+	}
+}
+EXPORT_SYMBOL_GPL(xnsched_unlock);
+
+/* nklock locked, interrupts off. */
+void xnsched_putback(struct xnthread *thread)
+{
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+	else
+		xnthread_set_state(thread, XNREADY);
+
+	xnsched_enqueue(thread);
+	xnsched_set_resched(thread->sched);
+}
+
+/* nklock locked, interrupts off. */
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p)
+{
+	struct xnsched_class *orig_effective_class __maybe_unused;
+	bool effective;
+	int ret;
+
+	ret = xnsched_chkparam(sched_class, thread, p);
+	if (ret)
+		return ret;
+
+	/*
+	 * Declaring a thread to a new scheduling class may fail, so
+	 * we do that early, while the thread is still a member of the
+	 * previous class. However, this also means that the
+	 * declaration callback shall not do anything that might
+	 * affect the previous class (such as touching thread->rlink
+	 * for instance).
+	 */
+	if (sched_class != thread->base_class) {
+		ret = xnsched_declare(sched_class, thread, p);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * As a special case, we may be called from __xnthread_init()
+	 * with no previous scheduling class at all.
+	 */
+	if (likely(thread->base_class != NULL)) {
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_dequeue(thread);
+
+		if (sched_class != thread->base_class)
+			xnsched_forget(thread);
+	}
+
+	/*
+	 * Set the base and effective scheduling parameters. However,
+	 * xnsched_setparam() will deny lowering the effective
+	 * priority if a boost is undergoing, only recording the
+	 * change into the base priority field in such situation.
+	 */
+	thread->base_class = sched_class;
+	/*
+	 * Referring to the effective class from a setparam() handler
+	 * is wrong: make sure to break if so.
+	 */
+	if (XENO_DEBUG(COBALT)) {
+		orig_effective_class = thread->sched_class;
+		thread->sched_class = NULL;
+	}
+
+	/*
+	 * This is the ONLY place where calling xnsched_setparam() is
+	 * legit, sane and safe.
+	 */
+	effective = xnsched_setparam(thread, p);
+	if (effective) {
+		thread->sched_class = sched_class;
+		thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+	} else if (XENO_DEBUG(COBALT))
+		thread->sched_class = orig_effective_class;
+
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_enqueue(thread);
+
+	/*
+	 * Make sure not to raise XNSCHED when setting up the root
+	 * thread, so that we can't start rescheduling on interrupt
+	 * exit before all CPUs have their runqueue fully
+	 * built. Filtering on XNROOT here is correct because the root
+	 * thread enters the idle class once as part of the runqueue
+	 * setup process and never leaves it afterwards.
+	 */
+	if (!xnthread_test_state(thread, XNDORMANT|XNROOT))
+		xnsched_set_resched(thread->sched);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_set_policy);
+
+/* nklock locked, interrupts off. */
+bool xnsched_set_effective_priority(struct xnthread *thread, int prio)
+{
+	int wprio = xnsched_calc_wprio(thread->base_class, prio);
+
+	thread->bprio = prio;
+	if (wprio == thread->wprio)
+		return true;
+
+	/*
+	 * We may not lower the effective/current priority of a
+	 * boosted thread when changing the base scheduling
+	 * parameters. Only xnsched_track_policy() and
+	 * xnsched_protect_priority() may do so when dealing with PI
+	 * and PP synchs resp.
+	 */
+	if (wprio < thread->wprio && xnthread_test_state(thread, XNBOOST))
+		return false;
+
+	thread->cprio = prio;
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	return true;
+}
+
+/* nklock locked, interrupts off. */
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target)
+{
+	union xnsched_policy_param param;
+
+	/*
+	 * Inherit (or reset) the effective scheduling class and
+	 * priority of a thread. Unlike xnsched_set_policy(), this
+	 * routine is allowed to lower the weighted priority with no
+	 * restriction, even if a boost is undergoing.
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+	/*
+	 * Self-targeting means to reset the scheduling policy and
+	 * parameters to the base settings. Otherwise, make thread
+	 * inherit the scheduling parameters from target.
+	 */
+	if (target == thread) {
+		thread->sched_class = thread->base_class;
+		xnsched_trackprio(thread, NULL);
+		/*
+		 * Per SuSv2, resetting the base scheduling parameters
+		 * should not move the thread to the tail of its
+		 * priority group.
+		 */
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_requeue(thread);
+
+	} else {
+		xnsched_getparam(target, &param);
+		thread->sched_class = target->sched_class;
+		xnsched_trackprio(thread, &param);
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_enqueue(thread);
+	}
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+/* nklock locked, interrupts off. */
+void xnsched_protect_priority(struct xnthread *thread, int prio)
+{
+	/*
+	 * Apply a PP boost by changing the effective priority of a
+	 * thread, forcing it to the RT class. Like
+	 * xnsched_track_policy(), this routine is allowed to lower
+	 * the weighted priority with no restriction, even if a boost
+	 * is undergoing.
+	 *
+	 * This routine only deals with active boosts, resetting the
+	 * base priority when leaving a PP boost is obtained by a call
+	 * to xnsched_track_policy().
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+
+	thread->sched_class = &xnsched_class_rt;
+	xnsched_protectprio(thread, prio);
+
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_enqueue(thread);
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+static void migrate_thread(struct xnthread *thread, struct xnsched *sched)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (xnthread_test_state(thread, XNREADY)) {
+		xnsched_dequeue(thread);
+		xnthread_clear_state(thread, XNREADY);
+	}
+
+	if (sched_class->sched_migrate)
+		sched_class->sched_migrate(thread, sched);
+	/*
+	 * WARNING: the scheduling class may have just changed as a
+	 * result of calling the per-class migration hook.
+	 */
+	thread->sched = sched;
+}
+
+/*
+ * nklock locked, interrupts off. thread must be runnable.
+ */
+void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	xnsched_set_resched(thread->sched);
+	migrate_thread(thread, sched);
+	/* Move thread to the remote run queue. */
+	xnsched_putback(thread);
+}
+
+/*
+ * nklock locked, interrupts off. Thread may be blocked.
+ */
+void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
+{
+	struct xnsched *last_sched = thread->sched;
+
+	migrate_thread(thread, sched);
+
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
+		xnsched_requeue(thread);
+		xnthread_set_state(thread, XNREADY);
+		xnsched_set_resched(last_sched);
+	}
+}
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+void xnsched_initq(struct xnsched_mlq *q)
+{
+	int prio;
+
+	q->elems = 0;
+	bitmap_zero(q->prio_map, XNSCHED_MLQ_LEVELS);
+
+	for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++)
+		INIT_LIST_HEAD(q->heads + prio);
+}
+
+static inline int get_qindex(struct xnsched_mlq *q, int prio)
+{
+	XENO_BUG_ON(COBALT, prio < 0 || prio >= XNSCHED_MLQ_LEVELS);
+	/*
+	 * BIG FAT WARNING: We need to rescale the priority level to a
+	 * 0-based range. We use find_first_bit() to scan the bitmap
+	 * which is a bit scan forward operation. Therefore, the lower
+	 * the index value, the higher the priority (since least
+	 * significant bits will be found first when scanning the
+	 * bitmap).
+	 */
+	return XNSCHED_MLQ_LEVELS - prio - 1;
+}
+
+static struct list_head *add_q(struct xnsched_mlq *q, int prio)
+{
+	struct list_head *head;
+	int idx;
+
+	idx = get_qindex(q, prio);
+	head = q->heads + idx;
+	q->elems++;
+
+	/* New item is not linked yet. */
+	if (list_empty(head))
+		__set_bit(idx, q->prio_map);
+
+	return head;
+}
+
+void xnsched_addq(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	struct list_head *head = add_q(q, thread->cprio);
+	list_add(&thread->rlink, head);
+}
+
+void xnsched_addq_tail(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	struct list_head *head = add_q(q, thread->cprio);
+	list_add_tail(&thread->rlink, head);
+}
+
+static void del_q(struct xnsched_mlq *q,
+		  struct list_head *entry, int idx)
+{
+	struct list_head *head = q->heads + idx;
+
+	list_del(entry);
+	q->elems--;
+
+	if (list_empty(head))
+		__clear_bit(idx, q->prio_map);
+}
+
+void xnsched_delq(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	del_q(q, &thread->rlink, get_qindex(q, thread->cprio));
+}
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q)
+{
+	struct xnthread *thread;
+	struct list_head *head;
+	int idx;
+
+	if (q->elems == 0)
+		return NULL;
+
+	idx = xnsched_weightq(q);
+	head = q->heads + idx;
+	XENO_BUG_ON(COBALT, list_empty(head));
+	thread = list_first_entry(head, struct xnthread, rlink);
+	del_q(q, &thread->rlink, idx);
+
+	return thread;
+}
+
+struct xnthread *xnsched_findq(struct xnsched_mlq *q, int prio)
+{
+	struct list_head *head;
+	int idx;
+
+	idx = get_qindex(q, prio);
+	head = q->heads + idx;
+	if (list_empty(head))
+		return NULL;
+
+	return list_first_entry(head, struct xnthread, rlink);
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	struct xnsched_mlq *q = &sched->rt.runnable;
+	struct xnthread *thread;
+	struct list_head *head;
+	int idx;
+
+	if (q->elems == 0)
+		return NULL;
+
+	/*
+	 * Some scheduling policies may be implemented as variants of
+	 * the core SCHED_FIFO class, sharing its runqueue
+	 * (e.g. SCHED_SPORADIC, SCHED_QUOTA). This means that we have
+	 * to do some cascading to call the right pick handler
+	 * eventually.
+	 */
+	idx = xnsched_weightq(q);
+	head = q->heads + idx;
+	XENO_BUG_ON(COBALT, list_empty(head));
+
+	/*
+	 * The active class (i.e. ->sched_class) is the one currently
+	 * queuing the thread, reflecting any priority boost due to
+	 * PI.
+	 */
+	thread = list_first_entry(head, struct xnthread, rlink);
+	if (unlikely(thread->sched_class != &xnsched_class_rt))
+		return thread->sched_class->sched_pick(sched);
+
+	del_q(q, &thread->rlink, idx);
+
+	return thread;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+
+#else /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(struct list_head *q, int prio)
+{
+	struct xnthread *thread;
+
+	if (list_empty(q))
+		return NULL;
+
+	/* Find thread leading a priority group. */
+	list_for_each_entry(thread, q, rlink) {
+		if (prio == thread->cprio)
+			return thread;
+	}
+
+	return NULL;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	struct list_head *q = &sched->rt.runnable;
+	struct xnthread *thread;
+
+	if (list_empty(q))
+		return NULL;
+
+	thread = list_first_entry(q, struct xnthread, rlink);
+	if (unlikely(thread->sched_class != &xnsched_class_rt))
+		return thread->sched_class->sched_pick(sched);
+
+	list_del(&thread->rlink);
+
+	return thread;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+/**
+ * @fn int xnsched_run(void)
+ * @brief The rescheduling procedure.
+ *
+ * This is the central rescheduling routine which should be called to
+ * validate and apply changes which have previously been made to the
+ * nucleus scheduling state, such as suspending, resuming or changing
+ * the priority of threads.  This call performs context switches as
+ * needed. xnsched_run() schedules out the current thread if:
+ *
+ * - the current thread is about to block.
+ * - a runnable thread from a higher priority scheduling class is
+ * waiting for the CPU.
+ * - the current thread does not lead the runnable threads from its
+ * own scheduling class (i.e. round-robin).
+ *
+ * The Cobalt core implements a lazy rescheduling scheme so that most
+ * of the services affecting the threads state MUST be followed by a
+ * call to the rescheduling procedure for the new scheduling state to
+ * be applied.
+ *
+ * In other words, multiple changes on the scheduler state can be done
+ * in a row, waking threads up, blocking others, without being
+ * immediately translated into the corresponding context switches.
+ * When all changes have been applied, xnsched_run() should be called
+ * for considering those changes, and possibly switching context.
+ *
+ * As a notable exception to the previous principle however, every
+ * action which ends up suspending the current thread begets an
+ * implicit call to the rescheduling procedure on behalf of the
+ * blocking service.
+ *
+ * Typically, self-suspension or sleeping on a synchronization object
+ * automatically leads to a call to the rescheduling procedure,
+ * therefore the caller does not need to explicitly issue
+ * xnsched_run() after such operations.
+ *
+ * The rescheduling procedure always leads to a null-effect if it is
+ * called on behalf of an interrupt service routine. Any outstanding
+ * scheduler lock held by the outgoing thread will be restored when
+ * the thread is scheduled back in.
+ *
+ * Calling this procedure with no applicable context switch pending is
+ * harmless and simply leads to a null-effect.
+ *
+ * @return Non-zero is returned if a context switch actually happened,
+ * otherwise zero if the current thread was left running.
+ *
+ * @coretags{unrestricted}
+ */
+static inline int test_resched(struct xnsched *sched)
+{
+	int resched = xnsched_resched_p(sched);
+#ifdef CONFIG_SMP
+	/* Send resched IPI to remote CPU(s). */
+	if (unlikely(!cpumask_empty(&sched->resched))) {
+		smp_mb();
+		pipeline_send_resched_ipi(&sched->resched);
+		cpumask_clear(&sched->resched);
+	}
+#endif
+	sched->status &= ~XNRESCHED;
+
+	return resched;
+}
+
+static inline void enter_root(struct xnthread *root)
+{
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_stop(&root->sched->wdtimer);
+#endif
+}
+
+static inline void leave_root(struct xnthread *root)
+{
+	pipeline_prep_switch_oob(root);
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_start(&root->sched->wdtimer, get_watchdog_timeout(),
+		      XN_INFINITE, XN_RELATIVE);
+#endif
+}
+
+void __xnsched_run_handler(void) /* hw interrupts off. */
+{
+	trace_cobalt_schedule_remote(xnsched_current());
+	xnsched_run();
+}
+
+static inline void do_lazy_user_work(struct xnthread *curr)
+{
+	xnthread_commit_ceiling(curr);
+}
+
+int ___xnsched_run(struct xnsched *sched)
+{
+	bool switched = false, leaving_inband;
+	struct xnthread *prev, *next, *curr;
+	spl_t s;
+
+	XENO_WARN_ON_ONCE(COBALT, is_secondary_domain());
+
+	trace_cobalt_schedule(sched);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	curr = sched->curr;
+	/*
+	 * CAUTION: xnthread_host_task(curr) may be unsynced and even
+	 * stale if curr = &rootcb, since the task logged by
+	 * leave_root() may not still be the current one. Use
+	 * "current" for disambiguating.
+	 */
+	xntrace_pid(task_pid_nr(current), xnthread_current_priority(curr));
+
+	if (xnthread_test_state(curr, XNUSER))
+		do_lazy_user_work(curr);
+
+	if (!test_resched(sched))
+		goto out;
+
+	next = xnsched_pick_next(sched);
+	if (next == curr) {
+		if (unlikely(xnthread_test_state(next, XNROOT))) {
+			if (sched->lflags & XNHTICK)
+				xnintr_host_tick(sched);
+			if (sched->lflags & XNHDEFER)
+				xnclock_program_shot(&nkclock, sched);
+		}
+		goto out;
+	}
+
+	prev = curr;
+
+	trace_cobalt_switch_context(prev, next);
+
+	/*
+	 * sched->curr is shared locklessly with xnsched_run() and
+	 * xnsched_lock(). WRITE_ONCE() makes sure sched->curr is
+	 * written atomically so that these routines always observe
+	 * consistent values by preventing the compiler from using
+	 * store tearing.
+	 */
+	WRITE_ONCE(sched->curr, next);
+	leaving_inband = false;
+
+	if (xnthread_test_state(prev, XNROOT)) {
+		leave_root(prev);
+		leaving_inband = true;
+	} else if (xnthread_test_state(next, XNROOT)) {
+		if (sched->lflags & XNHTICK)
+			xnintr_host_tick(sched);
+		if (sched->lflags & XNHDEFER)
+			xnclock_program_shot(&nkclock, sched);
+		enter_root(next);
+	}
+
+	xnstat_exectime_switch(sched, &next->stat.account);
+	xnstat_counter_inc(&next->stat.csw);
+
+	if (pipeline_switch_to(prev, next, leaving_inband))
+		/* oob -> in-band transition detected. */
+		return true;
+
+	/*
+	 * Re-read sched->curr for tracing: the current thread may
+	 * have switched from in-band to oob context.
+	 */
+	xntrace_pid(task_pid_nr(current),
+		xnthread_current_priority(xnsched_current()->curr));
+
+	switched = true;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return !!switched;
+}
+EXPORT_SYMBOL_GPL(___xnsched_run);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_directory sched_vfroot;
+
+struct vfile_schedlist_priv {
+	struct xnthread *curr;
+	xnticks_t start_time;
+};
+
+struct vfile_schedlist_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	char sched_class[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+	int cprio;
+	xnticks_t timeout;
+	int state;
+};
+
+static struct xnvfile_snapshot_ops vfile_schedlist_ops;
+
+static struct xnvfile_snapshot schedlist_vfile = {
+	.privsz = sizeof(struct vfile_schedlist_priv),
+	.datasz = sizeof(struct vfile_schedlist_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedlist_ops,
+};
+
+static int vfile_schedlist_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it);
+
+	/* &nkthreadq cannot be empty (root thread(s)). */
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+	priv->start_time = xnclock_read_monotonic(&nkclock);
+
+	return cobalt_nrthreads;
+}
+
+static int vfile_schedlist_next(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_schedlist_data *p = data;
+	xnticks_t timeout, period;
+	struct xnthread *thread;
+	xnticks_t base_time;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+	p->state = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		p->state |= XNLOCK;
+	knamecpy(p->sched_class, thread->sched_class->name);
+	knamecpy(p->personality, thread->personality->name);
+	period = xnthread_get_period(thread);
+	base_time = priv->start_time;
+	if (xntimer_clock(&thread->ptimer) != &nkclock)
+		base_time = xnclock_read_monotonic(xntimer_clock(&thread->ptimer));
+	timeout = xnthread_get_timeout(thread, base_time);
+	/*
+	 * Here we cheat: thread is periodic and the sampling rate may
+	 * be high, so it is indeed possible that the next tick date
+	 * from the ptimer progresses fast enough while we are busy
+	 * collecting output data in this loop, so that next_date -
+	 * start_time > period. In such a case, we simply ceil the
+	 * value to period to keep the result meaningful, even if not
+	 * necessarily accurate. But what does accuracy mean when the
+	 * sampling frequency is high, and the way to read it has to
+	 * go through the vfile interface anyway?
+	 */
+	if (period > 0 && period < timeout &&
+	    !xntimer_running_p(&thread->rtimer))
+		timeout = period;
+
+	p->timeout = timeout;
+
+	return 1;
+}
+
+static int vfile_schedlist_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedlist_data *p = data;
+	char sbuf[64], pbuf[16], tbuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-5s  %-8s  %-5s %-12s  %-10s %s\n",
+			       "CPU", "PID", "CLASS", "TYPE", "PRI", "TIMEOUT",
+			       "STAT", "NAME");
+	else {
+		ksformat(pbuf, sizeof(pbuf), "%3d", p->cprio);
+		xntimer_format_time(p->timeout, tbuf, sizeof(tbuf));
+		xnthread_format_status(p->state, sbuf, sizeof(sbuf));
+
+		xnvfile_printf(it,
+			       "%3u  %-6d %-5s  %-8s  %-5s %-12s  %-10s %s%s%s\n",
+			       p->cpu,
+			       p->pid,
+			       p->sched_class,
+			       p->personality,
+			       pbuf,
+			       tbuf,
+			       sbuf,
+			       (p->state & XNUSER) ? "" : "[",
+			       p->name,
+			       (p->state & XNUSER) ? "" : "]");
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_schedlist_ops = {
+	.rewind = vfile_schedlist_rewind,
+	.next = vfile_schedlist_next,
+	.show = vfile_schedlist_show,
+};
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static spl_t vfile_schedstat_lock_s;
+
+static int vfile_schedstat_get_lock(struct xnvfile *vfile)
+{
+	int ret;
+
+	ret = xnintr_get_query_lock();
+	if (ret < 0)
+		return ret;
+	xnlock_get_irqsave(&nklock, vfile_schedstat_lock_s);
+	return 0;
+}
+
+static void vfile_schedstat_put_lock(struct xnvfile *vfile)
+{
+	xnlock_put_irqrestore(&nklock, vfile_schedstat_lock_s);
+	xnintr_put_query_lock();
+}
+
+static struct xnvfile_lock_ops vfile_schedstat_lockops = {
+	.get = vfile_schedstat_get_lock,
+	.put = vfile_schedstat_put_lock,
+};
+
+struct vfile_schedstat_priv {
+	int irq;
+	struct xnthread *curr;
+	struct xnintr_iterator intr_it;
+};
+
+struct vfile_schedstat_data {
+	int cpu;
+	pid_t pid;
+	int state;
+	char name[XNOBJECT_NAME_LEN];
+	unsigned long ssw;
+	unsigned long csw;
+	unsigned long xsc;
+	unsigned long pf;
+	xnticks_t exectime_period;
+	xnticks_t account_period;
+	xnticks_t exectime_total;
+	struct xnsched_class *sched_class;
+	xnticks_t period;
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_schedstat_ops;
+
+static struct xnvfile_snapshot schedstat_vfile = {
+	.privsz = sizeof(struct vfile_schedstat_priv),
+	.datasz = sizeof(struct vfile_schedstat_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedstat_ops,
+	.entry = { .lockops = &vfile_schedstat_lockops },
+};
+
+static int vfile_schedstat_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it);
+	int irqnr;
+
+	/*
+	 * The activity numbers on each valid interrupt descriptor are
+	 * grouped under a pseudo-thread.
+	 */
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+	priv->irq = 0;
+	irqnr = xnintr_query_init(&priv->intr_it) * num_online_cpus();
+
+	return irqnr + cobalt_nrthreads;
+}
+
+static int vfile_schedstat_next(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_schedstat_data *p = data;
+	struct xnthread *thread;
+	struct xnsched *sched;
+	xnticks_t period;
+	int __maybe_unused ret;
+
+	if (priv->curr == NULL)
+		/*
+		 * We are done with actual threads, scan interrupt
+		 * descriptors.
+		 */
+		goto scan_irqs;
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	sched = thread->sched;
+	p->cpu = xnsched_cpu(sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->state = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		p->state |= XNLOCK;
+	p->ssw = xnstat_counter_get(&thread->stat.ssw);
+	p->csw = xnstat_counter_get(&thread->stat.csw);
+	p->xsc = xnstat_counter_get(&thread->stat.xsc);
+	p->pf = xnstat_counter_get(&thread->stat.pf);
+	p->sched_class = thread->sched_class;
+	p->cprio = thread->cprio;
+	p->period = xnthread_get_period(thread);
+
+	period = sched->last_account_switch - thread->stat.lastperiod.start;
+	if (period == 0 && thread == sched->curr) {
+		p->exectime_period = 1;
+		p->account_period = 1;
+	} else {
+		p->exectime_period = thread->stat.account.total -
+			thread->stat.lastperiod.total;
+		p->account_period = period;
+	}
+	p->exectime_total = thread->stat.account.total;
+	thread->stat.lastperiod.total = thread->stat.account.total;
+	thread->stat.lastperiod.start = sched->last_account_switch;
+
+	return 1;
+
+scan_irqs:
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	if (priv->irq >= PIPELINE_NR_IRQS)
+		return 0;	/* All done. */
+
+	ret = xnintr_query_next(priv->irq, &priv->intr_it, p->name);
+	if (ret) {
+		if (ret == -EAGAIN)
+			xnvfile_touch(it->vfile); /* force rewind. */
+		priv->irq++;
+		return VFILE_SEQ_SKIP;
+	}
+
+	if (!xnsched_supported_cpu(priv->intr_it.cpu))
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = priv->intr_it.cpu;
+	p->csw = priv->intr_it.hits;
+	p->exectime_period = priv->intr_it.exectime_period;
+	p->account_period = priv->intr_it.account_period;
+	p->exectime_total = priv->intr_it.exectime_total;
+	p->pid = 0;
+	p->state =  0;
+	p->ssw = 0;
+	p->xsc = 0;
+	p->pf = 0;
+	p->sched_class = &xnsched_class_idle;
+	p->cprio = 0;
+	p->period = 0;
+
+	return 1;
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+	return 0;
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+}
+
+static int vfile_schedstat_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_data *p = data;
+	int usage = 0;
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-10s %-10s %-10s %-4s  %-8s  %5s"
+			       "  %s\n",
+			       "CPU", "PID", "MSW", "CSW", "XSC", "PF", "STAT", "%CPU",
+			       "NAME");
+	else {
+		if (p->account_period) {
+			while (p->account_period > 0xffffffffUL) {
+				p->exectime_period >>= 16;
+				p->account_period >>= 16;
+			}
+			usage = xnarch_ulldiv(p->exectime_period * 1000LL +
+					      (p->account_period >> 1),
+					      p->account_period, NULL);
+		}
+		xnvfile_printf(it,
+			       "%3u  %-6d %-10lu %-10lu %-10lu %-4lu  %.8x  %3u.%u"
+			       "  %s%s%s\n",
+			       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
+			       usage / 10, usage % 10,
+			       (p->state & XNUSER) ? "" : "[",
+			       p->name,
+			       (p->state & XNUSER) ? "" : "]");
+	}
+
+	return 0;
+}
+
+static int vfile_schedacct_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_data *p = data;
+
+	if (p == NULL)
+		return 0;
+
+	xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8x %Lu %Lu %Lu %s %s %d %Lu\n",
+		       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
+		       xnclock_ticks_to_ns(&nkclock, p->account_period),
+		       xnclock_ticks_to_ns(&nkclock, p->exectime_period),
+		       xnclock_ticks_to_ns(&nkclock, p->exectime_total),
+		       p->name,
+		       p->sched_class->name,
+		       p->cprio,
+		       p->period);
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_schedstat_ops = {
+	.rewind = vfile_schedstat_rewind,
+	.next = vfile_schedstat_next,
+	.show = vfile_schedstat_show,
+};
+
+/*
+ * An accounting vfile is a thread statistics vfile in disguise with a
+ * different output format, which is parser-friendly.
+ */
+static struct xnvfile_snapshot_ops vfile_schedacct_ops;
+
+static struct xnvfile_snapshot schedacct_vfile = {
+	.privsz = sizeof(struct vfile_schedstat_priv),
+	.datasz = sizeof(struct vfile_schedstat_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedacct_ops,
+};
+
+static struct xnvfile_snapshot_ops vfile_schedacct_ops = {
+	.rewind = vfile_schedstat_rewind,
+	.next = vfile_schedstat_next,
+	.show = vfile_schedacct_show,
+};
+
+#endif /* CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_SMP
+
+static int affinity_vfile_show(struct xnvfile_regular_iterator *it,
+			       void *data)
+{
+	unsigned long val = 0;
+	int cpu;
+
+	for (cpu = 0; cpu < BITS_PER_LONG; cpu++)
+		if (cpumask_test_cpu(cpu, &cobalt_cpu_affinity))
+			val |= (1UL << cpu);
+
+	xnvfile_printf(it, "%08lx\n", val);
+
+	return 0;
+}
+
+static ssize_t affinity_vfile_store(struct xnvfile_input *input)
+{
+	cpumask_t affinity;
+	ssize_t ret;
+	long val;
+	int cpu;
+	spl_t s;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val == 0)
+		affinity = xnsched_realtime_cpus; /* Reset to default. */
+	else {
+		cpumask_clear(&affinity);
+		for (cpu = 0; cpu < BITS_PER_LONG; cpu++, val >>= 1) {
+			if (val & 1) {
+				/*
+				 * The new dynamic affinity must be a strict
+				 * subset of the static set of supported CPUs.
+				 */
+				if (!cpumask_test_cpu(cpu,
+						      &xnsched_realtime_cpus))
+					return -EINVAL;
+				cpumask_set_cpu(cpu, &affinity);
+			}
+		}
+	}
+
+	cpumask_and(&affinity, &affinity, cpu_online_mask);
+	if (cpumask_empty(&affinity))
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_cpu_affinity = affinity;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops affinity_vfile_ops = {
+	.show = affinity_vfile_show,
+	.store = affinity_vfile_store,
+};
+
+static struct xnvfile_regular affinity_vfile = {
+	.ops = &affinity_vfile_ops,
+};
+
+#endif /* CONFIG_SMP */
+
+int xnsched_init_proc(void)
+{
+	struct xnsched_class *p;
+	int ret;
+
+	ret = xnvfile_init_dir("sched", &sched_vfroot, &cobalt_vfroot);
+	if (ret)
+		return ret;
+
+	ret = xnvfile_init_snapshot("threads", &schedlist_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+
+	for_each_xnsched_class(p) {
+		if (p->sched_init_vfile) {
+			ret = p->sched_init_vfile(p, &sched_vfroot);
+			if (ret)
+				return ret;
+		}
+	}
+
+#ifdef CONFIG_XENO_OPT_STATS
+	ret = xnvfile_init_snapshot("stat", &schedstat_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+	ret = xnvfile_init_snapshot("acct", &schedacct_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_SMP
+	xnvfile_init_regular("affinity", &affinity_vfile, &cobalt_vfroot);
+#endif /* CONFIG_SMP */
+
+	return 0;
+}
+
+void xnsched_cleanup_proc(void)
+{
+	struct xnsched_class *p;
+
+	for_each_xnsched_class(p) {
+		if (p->sched_cleanup_vfile)
+			p->sched_cleanup_vfile(p);
+	}
+
+#ifdef CONFIG_SMP
+	xnvfile_destroy_regular(&affinity_vfile);
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_XENO_OPT_STATS
+	xnvfile_destroy_snapshot(&schedacct_vfile);
+	xnvfile_destroy_snapshot(&schedstat_vfile);
+#endif /* CONFIG_XENO_OPT_STATS */
+	xnvfile_destroy_snapshot(&schedlist_vfile);
+	xnvfile_destroy_dir(&sched_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+++ linux-patched/kernel/xenomai/bufd.c	2022-03-21 12:58:28.865894071 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/synch.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/syscall.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_bufd Buffer descriptor
+ *
+ * Abstraction for copying data to/from different address spaces
+ *
+ * A buffer descriptor is a simple abstraction dealing with copy
+ * operations to/from memory buffers which may belong to different
+ * address spaces.
+ *
+ * To this end, the buffer descriptor library provides a small set of
+ * copy routines which are aware of address space restrictions when
+ * moving data, and a generic container type which can hold a
+ * reference to - or cover - a particular memory area, either present
+ * in kernel space, or in any of the existing user memory contexts.
+ *
+ * The goal of the buffer descriptor abstraction is to hide address
+ * space specifics from Xenomai services dealing with memory areas,
+ * allowing them to operate on multiple address spaces seamlessly.
+ *
+ * The common usage patterns are as follows:
+ *
+ * - Implementing a Xenomai syscall returning a bulk of data to the
+ *   caller, which may have to be copied back to either kernel or user
+ *   space:
+ *
+ *   @code
+ *   [Syscall implementation]
+ *   ssize_t rt_bulk_read_inner(struct xnbufd *bufd)
+ *   {
+ *       ssize_t ret;
+ *       size_t len;
+ *       void *bulk;
+ *
+ *       bulk = get_next_readable_bulk(&len);
+ *       ret = xnbufd_copy_from_kmem(bufd, bulk, min(bufd->b_len, len));
+ *       free_bulk(bulk);
+ *
+ *       ret = this_may_fail();
+ *       if (ret)
+ *	       xnbufd_invalidate(bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Kernel wrapper for in-kernel calls]
+ *   int rt_bulk_read(void *ptr, size_t len)
+ *   {
+ *       struct xnbufd bufd;
+ *       ssize_t ret;
+ *
+ *       xnbufd_map_kwrite(&bufd, ptr, len);
+ *       ret = rt_bulk_read_inner(&bufd);
+ *       xnbufd_unmap_kwrite(&bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Userland trampoline for user syscalls]
+ *   int __rt_bulk_read(struct pt_regs *regs)
+ *   {
+ *       struct xnbufd bufd;
+ *       void __user *ptr;
+ *       ssize_t ret;
+ *       size_t len;
+ *
+ *       ptr = (void __user *)__xn_reg_arg1(regs);
+ *       len = __xn_reg_arg2(regs);
+ *
+ *       xnbufd_map_uwrite(&bufd, ptr, len);
+ *       ret = rt_bulk_read_inner(&bufd);
+ *       xnbufd_unmap_uwrite(&bufd);
+ *
+ *       return ret;
+ *   }
+ *   @endcode
+ *
+ * - Implementing a Xenomai syscall receiving a bulk of data from the
+ *   caller, which may have to be read from either kernel or user
+ *   space:
+ *
+ *   @code
+ *   [Syscall implementation]
+ *   ssize_t rt_bulk_write_inner(struct xnbufd *bufd)
+ *   {
+ *       void *bulk = get_free_bulk(bufd->b_len);
+ *       return xnbufd_copy_to_kmem(bulk, bufd, bufd->b_len);
+ *   }
+ *
+ *   [Kernel wrapper for in-kernel calls]
+ *   int rt_bulk_write(const void *ptr, size_t len)
+ *   {
+ *       struct xnbufd bufd;
+ *       ssize_t ret;
+ *
+ *       xnbufd_map_kread(&bufd, ptr, len);
+ *       ret = rt_bulk_write_inner(&bufd);
+ *       xnbufd_unmap_kread(&bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Userland trampoline for user syscalls]
+ *   int __rt_bulk_write(struct pt_regs *regs)
+ *   {
+ *       struct xnbufd bufd;
+ *       void __user *ptr;
+ *       ssize_t ret;
+ *       size_t len;
+ *
+ *       ptr = (void __user *)__xn_reg_arg1(regs);
+ *       len = __xn_reg_arg2(regs);
+ *
+ *       xnbufd_map_uread(&bufd, ptr, len);
+ *       ret = rt_bulk_write_inner(&bufd);
+ *       xnbufd_unmap_uread(&bufd);
+ *
+ *       return ret;
+ *   }
+ *   @endcode
+ *
+ *@{*/
+
+/**
+ * @fn void xnbufd_map_kread(struct xnbufd *bufd, const void *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for reading from kernel memory.
+ *
+ * The new buffer descriptor may be used to copy data from kernel
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_kread().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes kernel memory area, starting from @a ptr.
+ *
+ * @param ptr The start of the kernel buffer to map.
+ *
+ * @param len The length of the kernel buffer starting at @a ptr.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_map_kwrite(struct xnbufd *bufd, void *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for writing to kernel memory.
+ *
+ * The new buffer descriptor may be used to copy data to kernel
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_kwrite().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes kernel memory area, starting from @a ptr.
+ *
+ * @param ptr The start of the kernel buffer to map.
+ *
+ * @param len The length of the kernel buffer starting at @a ptr.
+ *
+ * @coretags{unrestricted}
+ */
+void xnbufd_map_kmem(struct xnbufd *bufd, void *ptr, size_t len)
+{
+	bufd->b_ptr = ptr;
+	bufd->b_len = len;
+	bufd->b_mm = NULL;
+	bufd->b_off = 0;
+	bufd->b_carry = NULL;
+}
+EXPORT_SYMBOL_GPL(xnbufd_map_kmem);
+
+/**
+ * @fn void xnbufd_map_uread(struct xnbufd *bufd, const void __user *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for reading from user memory.
+ *
+ * The new buffer descriptor may be used to copy data from user
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_uread().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes user memory area, starting from @a ptr. @a ptr is
+ * never dereferenced directly, since it may refer to a buffer that
+ * lives in another address space.
+ *
+ * @param ptr The start of the user buffer to map.
+ *
+ * @param len The length of the user buffer starting at @a ptr.
+ *
+ * @coretags{task-unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_map_uwrite(struct xnbufd *bufd, void __user *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for writing to user memory.
+ *
+ * The new buffer descriptor may be used to copy data to user
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_uwrite().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes user memory area, starting from @a ptr. @a ptr is
+ * never dereferenced directly, since it may refer to a buffer that
+ * lives in another address space.
+ *
+ * @param ptr The start of the user buffer to map.
+ *
+ * @param len The length of the user buffer starting at @a ptr.
+ *
+ * @coretags{task-unrestricted}
+ */
+
+void xnbufd_map_umem(struct xnbufd *bufd, void __user *ptr, size_t len)
+{
+	bufd->b_ptr = ptr;
+	bufd->b_len = len;
+	bufd->b_mm = current->mm;
+	bufd->b_off = 0;
+	bufd->b_carry = NULL;
+}
+EXPORT_SYMBOL_GPL(xnbufd_map_umem);
+
+/**
+ * @fn ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len)
+ * @brief Copy memory covered by a buffer descriptor to kernel memory.
+ *
+ * This routine copies @a len bytes from the area referred to by the
+ * buffer descriptor @a bufd to the kernel memory area @a to.
+ * xnbufd_copy_to_kmem() tracks the read offset within the source
+ * memory internally, so that it may be called several times in a
+ * loop, until the entire memory area is loaded.
+ *
+ * The source address space is dealt with, according to the following
+ * rules:
+ *
+ * - if @a bufd refers to readable kernel area (i.e. see
+ *   xnbufd_map_kread()), the copy is immediately and fully performed
+ *   with no restriction.
+ *
+ * - if @a bufd refers to a readable user area (i.e. see
+ *   xnbufd_map_uread()), the copy is performed only if that area
+ *   lives in the currently active address space, and only if the
+ *   caller may sleep Linux-wise to process any potential page fault
+ *   which may arise while reading from that memory.
+ *
+ * - any attempt to read from @a bufd from a non-suitable context is
+ *   considered as a bug, and will raise a panic assertion when the
+ *   nucleus is compiled in debug mode.
+ *
+ * @param to The start address of the kernel memory to copy to.
+ *
+ * @param bufd The address of the buffer descriptor covering the user
+ * memory to copy data from.
+ *
+ * @param len The length of the user memory to copy from @a bufd.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd. Otherwise:
+ *
+ * - -EINVAL is returned upon attempt to read from the user area from
+ *   an invalid context. This error is only returned when the debug
+ *   mode is disabled; otherwise a panic assertion is raised.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ *
+ * This routine may switch the caller to secondary mode if a page
+ * fault occurs while reading from the user area. For that reason,
+ * xnbufd_copy_to_kmem() may only be called from a preemptible section
+ * (Linux-wise).
+ */
+ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len)
+{
+	caddr_t from;
+
+	thread_only();
+
+	if (len == 0)
+		goto out;
+
+	from = bufd->b_ptr + bufd->b_off;
+
+	/*
+	 * If the descriptor covers a source buffer living in the
+	 * kernel address space, we may read from it directly.
+	 */
+	if (bufd->b_mm == NULL) {
+		memcpy(to, from, len);
+		goto advance_offset;
+	}
+
+	/*
+	 * We want to read data from user-space, check whether:
+	 * 1) the source buffer lies in the current address space,
+	 * 2) we may fault while reading from the buffer directly.
+	 *
+	 * If we can't reach the buffer, or the current context may
+	 * not fault while reading data from it, copy_from_user() is
+	 * not an option and we have a bug somewhere, since there is
+	 * no way we could fetch the data to kernel space immediately.
+	 *
+	 * Note that we don't check for non-preemptible Linux context
+	 * here, since the source buffer would live in kernel space in
+	 * such a case.
+	 */
+	if (current->mm == bufd->b_mm) {
+		preemptible_only();
+		if (cobalt_copy_from_user(to, (void __user *)from, len))
+			return -EFAULT;
+		goto advance_offset;
+	}
+
+	XENO_BUG(COBALT);
+
+	return -EINVAL;
+
+advance_offset:
+	bufd->b_off += len;
+out:
+	return (ssize_t)bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_copy_to_kmem);
+
+/**
+ * @fn ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len)
+ * @brief Copy kernel memory to the area covered by a buffer descriptor.
+ *
+ * This routine copies @a len bytes from the kernel memory starting at
+ * @a from to the area referred to by the buffer descriptor @a
+ * bufd. xnbufd_copy_from_kmem() tracks the write offset within the
+ * destination memory internally, so that it may be called several
+ * times in a loop, until the entire memory area is stored.
+ *
+ * The destination address space is dealt with, according to the
+ * following rules:
+ *
+ * - if @a bufd refers to a writable kernel area (i.e. see
+ *   xnbufd_map_kwrite()), the copy is immediatly and fully performed
+ *   with no restriction.
+ *
+ * - if @a bufd refers to a writable user area (i.e. see
+ *   xnbufd_map_uwrite()), the copy is performed only if that area
+ *   lives in the currently active address space, and only if the
+ *   caller may sleep Linux-wise to process any potential page fault
+ *   which may arise while writing to that memory.
+ *
+ * - if @a bufd refers to a user area which may not be immediately
+ *   written to from the current context, the copy is postponed until
+ *   xnbufd_unmap_uwrite() is invoked for @a ubufd, at which point the
+ *   copy will take place. In such a case, the source memory is
+ *   transferred to a carry over buffer allocated internally; this
+ *   operation may lead to request dynamic memory from the nucleus
+ *   heap if @a len is greater than 64 bytes.
+ *
+ * @param bufd The address of the buffer descriptor covering the user
+ * memory to copy data to.
+ *
+ * @param from The start address of the kernel memory to copy from.
+ *
+ * @param len The length of the kernel memory to copy to @a bufd.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd. Otherwise,
+ *
+ * - -ENOMEM is returned when no memory is available from the nucleus
+ *    heap to allocate the carry over buffer.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ *
+ * This routine may switch the caller to secondary mode if a page
+ * fault occurs while reading from the user area. For that reason,
+ * xnbufd_copy_to_kmem() may only be called from a preemptible section
+ * (Linux-wise).
+ */
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len)
+{
+	caddr_t to;
+
+	thread_only();
+
+	if (len == 0)
+		goto out;
+
+	to = bufd->b_ptr + bufd->b_off;
+
+	/*
+	 * If the descriptor covers a destination buffer living in the
+	 * kernel address space, we may copy to it directly.
+	 */
+	if (bufd->b_mm == NULL)
+		goto direct_copy;
+
+	/*
+	 * We want to pass data to user-space, check whether:
+	 * 1) the destination buffer lies in the current address space,
+	 * 2) we may fault while writing to the buffer directly.
+	 *
+	 * If we can't reach the buffer, or the current context may
+	 * not fault while copying data to it, copy_to_user() is not
+	 * an option and we have to convey the data from kernel memory
+	 * through the carry over buffer.
+	 *
+	 * Note that we don't check for non-preemptible Linux context
+	 * here: feeding a RT activity with data from a non-RT context
+	 * is wrong in the first place, so never mind.
+	 */
+	if (current->mm == bufd->b_mm) {
+		preemptible_only();
+		if (cobalt_copy_to_user((void __user *)to, from, len))
+			return -EFAULT;
+		goto advance_offset;
+	}
+
+	/*
+	 * We need a carry over buffer to convey the data to
+	 * user-space. xnbufd_unmap_uwrite() should be called on the
+	 * way back to user-space to update the destination buffer
+	 * from the carry over area.
+	 */
+	if (bufd->b_carry == NULL) {
+		/*
+		 * Try to use the fast carry over area available
+		 * directly from the descriptor for short messages, to
+		 * save a dynamic allocation request.
+		 */
+		if (bufd->b_len <= sizeof(bufd->b_buf))
+			bufd->b_carry = bufd->b_buf;
+		else {
+			bufd->b_carry = xnmalloc(bufd->b_len);
+			if (bufd->b_carry == NULL)
+				return -ENOMEM;
+		}
+		to = bufd->b_carry;
+	} else
+		to = bufd->b_carry + bufd->b_off;
+
+direct_copy:
+	memcpy(to, from, len);
+
+advance_offset:
+	bufd->b_off += len;
+out:
+	return (ssize_t)bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_copy_from_kmem);
+
+/**
+ * @fn void xnbufd_unmap_uread(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_uread().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_uread(), to read data from a user area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ */
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd)
+{
+	preemptible_only();
+
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_uread);
+
+/**
+ * @fn void xnbufd_unmap_uwrite(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_uwrite().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_uwrite(), to write data to a user area.
+ *
+ * The main action taken is to write the contents of the kernel memory
+ * area passed to xnbufd_copy_from_kmem() whenever the copy operation
+ * was postponed at that time; the carry over buffer is eventually
+ * released as needed. If xnbufd_copy_from_kmem() was allowed to copy
+ * to the destination user memory at once, then xnbufd_unmap_uwrite()
+ * leads to a no-op.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ */
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd)
+{
+	ssize_t ret = 0;
+	void __user *to;
+	void *from;
+	size_t len;
+
+	preemptible_only();
+
+	len = bufd->b_off;
+
+	if (bufd->b_carry == NULL)
+		/* Copy took place directly. Fine. */
+		goto done;
+
+	/*
+	 * Something was written to the carry over area, copy the
+	 * contents to user-space, then release the area if needed.
+	 */
+	to = (void __user *)bufd->b_ptr;
+	from = bufd->b_carry;
+	ret = cobalt_copy_to_user(to, from, len);
+
+	if (bufd->b_len > sizeof(bufd->b_buf))
+		xnfree(bufd->b_carry);
+done:
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return ret ?: (ssize_t)len;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_uwrite);
+
+/**
+ * @fn void xnbufd_reset(struct xnbufd *bufd)
+ * @brief Reset a buffer descriptor.
+ *
+ * The buffer descriptor is reset, so that all data already copied is
+ * forgotten. Any carry over buffer allocated is kept, though.
+ *
+ * @param bufd The address of the buffer descriptor to reset.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_invalidate(struct xnbufd *bufd)
+ * @brief Invalidate a buffer descriptor.
+ *
+ * The buffer descriptor is invalidated, making it unusable for
+ * further copy operations. If an outstanding carry over buffer was
+ * allocated by a previous call to xnbufd_copy_from_kmem(), it is
+ * immediately freed so that no data transfer will happen when the
+ * descriptor is finalized.
+ *
+ * The only action that may subsequently be performed on an
+ * invalidated descriptor is calling the relevant unmapping routine
+ * for it. For that reason, xnbufd_invalidate() should be invoked on
+ * the error path when data may have been transferred to the carry
+ * over buffer.
+ *
+ * @param bufd The address of the buffer descriptor to invalidate.
+ *
+ * @coretags{unrestricted}
+ */
+void xnbufd_invalidate(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	if (bufd->b_carry) {
+		if (bufd->b_len > sizeof(bufd->b_buf))
+			xnfree(bufd->b_carry);
+		bufd->b_carry = NULL;
+	}
+	bufd->b_off = 0;
+}
+EXPORT_SYMBOL_GPL(xnbufd_invalidate);
+
+/**
+ * @fn void xnbufd_unmap_kread(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_kread().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_kread(), to read data from a kernel area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ */
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_kread);
+
+/**
+ * @fn void xnbufd_unmap_kwrite(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_kwrite().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_kwrite(), to write data to a kernel area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ */
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_kwrite);
+
+/** @} */
+++ linux-patched/kernel/xenomai/synch.c	2022-03-21 12:58:28.861894110 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/select.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/signal.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/uapi/signal.h>
+#include <trace/events/cobalt-core.h>
+
+#define PP_CEILING_MASK 0xff
+
+static inline int get_ceiling_value(struct xnsynch *synch)
+{
+	/*
+	 * The ceiling priority value is stored in user-writable
+	 * memory, make sure to constrain it within valid bounds for
+	 * xnsched_class_rt before using it.
+	 */
+	return *synch->ceiling_ref & PP_CEILING_MASK ?: 1;
+}
+
+struct xnsynch *lookup_lazy_pp(xnhandle_t handle);
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_synch Thread synchronization services
+ * @{
+ */
+
+/**
+ * @brief Initialize a synchronization object.
+ *
+ * Initializes a synchronization object. Xenomai threads can wait on
+ * and signal such objects for serializing access to resources.
+ * This object has built-in support for priority inheritance.
+ *
+ * @param synch The address of a synchronization object descriptor
+ * Cobalt will use to store the object-specific data.  This descriptor
+ * must always be valid while the object is active therefore it must
+ * be allocated in permanent memory.
+ *
+ * @param flags A set of creation flags affecting the operation. The
+ * valid flags are:
+ *
+ * - XNSYNCH_PRIO causes the threads waiting for the resource to pend
+ * in priority order. Otherwise, FIFO ordering is used (XNSYNCH_FIFO).
+ *
+ * - XNSYNCH_OWNER indicates that the synchronization object shall
+ * track the resource ownership, allowing a single owner at most at
+ * any point in time. Note that setting this flag implies the use of
+ * xnsynch_acquire() and xnsynch_release() instead of
+ * xnsynch_sleep_on() and xnsynch_wakeup_*().
+ *
+ * - XNSYNCH_PI enables priority inheritance when a priority inversion
+ * is detected among threads using this object.  XNSYNCH_PI implies
+ * XNSYNCH_OWNER and XNSYNCH_PRIO.
+ *
+ * - XNSYNCH_PP enables priority protect to prevent priority inversion.
+ * XNSYNCH_PP implies XNSYNCH_OWNER and XNSYNCH_PRIO.
+ *
+ * - XNSYNCH_DREORD (Disable REORDering) tells Cobalt not to reorder
+ * the wait list upon priority change of a waiter. Reordering is the
+ * default. Only applies when XNSYNCH_PRIO is present.
+ *
+ * @param fastlock Address of the fast lock word to be associated with
+ * a synchronization object with ownership tracking. Therefore, a
+ * valid fast-lock address is required if XNSYNCH_OWNER is set in @a
+ * flags.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnsynch_init(struct xnsynch *synch, int flags, atomic_t *fastlock)
+{
+	if (flags & (XNSYNCH_PI|XNSYNCH_PP))
+		flags |= XNSYNCH_PRIO | XNSYNCH_OWNER;	/* Obviously... */
+
+	synch->status = flags & ~XNSYNCH_CLAIMED;
+	synch->owner = NULL;
+	synch->cleanup = NULL;	/* for PI/PP only. */
+	synch->wprio = -1;
+	synch->ceiling_ref = NULL;
+	INIT_LIST_HEAD(&synch->pendq);
+
+	if (flags & XNSYNCH_OWNER) {
+		BUG_ON(fastlock == NULL);
+		synch->fastlock = fastlock;
+		atomic_set(fastlock, XN_NO_HANDLE);
+	} else
+		synch->fastlock = NULL;
+}
+EXPORT_SYMBOL_GPL(xnsynch_init);
+
+/**
+ * @brief Initialize a synchronization object enforcing PP.
+ *
+ * This call is a variant of xnsynch_init() for initializing
+ * synchronization objects enabling the priority protect protocol.
+ *
+ * @param synch The address of a synchronization object descriptor
+ * Cobalt will use to store the object-specific data.  See
+ * xnsynch_init().
+ *
+ * @param flags A set of creation flags affecting the operation. See
+ * xnsynch_init(). XNSYNCH_PI is mutually exclusive with XNSYNCH_PP,
+ * and won't be considered.
+ *
+ * @param fastlock Address of the fast lock word to be associated with
+ * a synchronization object with ownership tracking. See xnsynch_init().
+ *
+ * @param ceiling_ref The address of the variable holding the current
+ * priority ceiling value for this object.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref)
+{
+	xnsynch_init(synch, (flags & ~XNSYNCH_PI) | XNSYNCH_PP, fastlock);
+	synch->ceiling_ref = ceiling_ref;
+}
+
+/**
+ * @fn void xnsynch_destroy(struct xnsynch *synch)
+ * @brief Destroy a synchronization object.
+ *
+ * Destroys the synchronization object @a synch, unblocking all
+ * waiters with the XNRMID status.
+ *
+ * @return XNSYNCH_RESCHED is returned if at least one thread is
+ * unblocked, which means the caller should invoke xnsched_run() for
+ * applying the new scheduling state. Otherwise, XNSYNCH_DONE is
+ * returned.
+
+ * @sideeffect Same as xnsynch_flush().
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnsynch_destroy(struct xnsynch *synch)
+{
+	int ret;
+	
+	ret = xnsynch_flush(synch, XNRMID);
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnsynch_destroy);
+
+/**
+ * @fn int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode);
+ * @brief Sleep on an ownerless synchronization object.
+ *
+ * Makes the calling thread sleep on the specified synchronization
+ * object, waiting for it to be signaled.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to pend on the given resource. It must not be used
+ * with synchronization objects that are supposed to track ownership
+ * (XNSYNCH_OWNER).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to sleep on.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on the resource. This value is a wait time given as a
+ * count of nanoseconds. It can either be relative, absolute
+ * monotonic, or absolute adjustable depending on @a
+ * timeout_mode. Passing XN_INFINITE @b and setting @a mode to
+ * XN_RELATIVE specifies an unbounded wait. All other values are used
+ * to initialize a watchdog timer.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @return A bitmask which may include zero or one information bit
+ * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the
+ * caller, for detecting respectively: object deletion, timeout or
+ * signal/unblock conditions which might have happened while waiting.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
+		     xntmode_t timeout_mode)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	thread = xnthread_current();
+
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP) &&
+	    thread->res_count > 0 &&
+	    xnthread_test_state(thread, XNWARN))
+		xnthread_signal(thread, SIGDEBUG, SIGDEBUG_MUTEX_SLEEP);
+	
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_sleepon(synch);
+
+	if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
+		list_add_tail(&thread->plink, &synch->pendq);
+	else /* i.e. priority-sorted */
+		list_add_priff(thread, &synch->pendq, wprio, plink);
+
+	xnthread_suspend(thread, XNPEND, timeout, timeout_mode, synch);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
+}
+EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
+
+/**
+ * @fn struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+ * @brief Unblock the heading thread from wait.
+ *
+ * This service wakes up the thread which is currently leading the
+ * synchronization object's pending list. The sleeping thread is
+ * unblocked from its pending state, but no reschedule is performed.
+ *
+ * This service should be called by upper interfaces wanting to signal
+ * the given resource so that a single waiter is resumed. It must not
+ * be used with synchronization objects that are supposed to track
+ * ownership (XNSYNCH_OWNER not set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @return The descriptor address of the unblocked thread.
+ *
+ * @coretags{unrestricted}
+ */
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&synch->pendq)) {
+		thread = NULL;
+		goto out;
+	}
+
+	trace_cobalt_synch_wakeup(synch);
+	thread = list_first_entry(&synch->pendq, struct xnthread, plink);
+	list_del(&thread->plink);
+	thread->wchan = NULL;
+	xnthread_resume(thread, XNPEND);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_one_sleeper);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr)
+{
+	struct xnthread *thread, *tmp;
+	int nwakeups = 0;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&synch->pendq))
+		goto out;
+
+	trace_cobalt_synch_wakeup_many(synch);
+
+	list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) {
+		if (nwakeups++ >= nr)
+			break;
+		list_del(&thread->plink);
+		thread->wchan = NULL;
+		xnthread_resume(thread, XNPEND);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return nwakeups;
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers);
+
+/**
+ * @fn void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper);
+ * @brief Unblock a particular thread from wait.
+ *
+ * This service wakes up a specific thread which is currently pending on
+ * the given synchronization object. The sleeping thread is unblocked
+ * from its pending state, but no reschedule is performed.
+ *
+ * This service should be called by upper interfaces wanting to signal
+ * the given resource so that a specific waiter is resumed. It must not
+ * be used with synchronization objects that are supposed to track
+ * ownership (XNSYNCH_OWNER not set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @param sleeper The thread to unblock which MUST be currently linked
+ * to the synchronization object's pending queue (i.e. synch->pendq).
+ *
+ * @coretags{unrestricted}
+ */
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper)
+{
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_wakeup(synch);
+	list_del(&sleeper->plink);
+	sleeper->wchan = NULL;
+	xnthread_resume(sleeper, XNPEND);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_this_sleeper);
+
+static inline void raise_boost_flag(struct xnthread *owner)
+{
+	/* Backup the base priority at first boost only. */
+	if (!xnthread_test_state(owner, XNBOOST)) {
+		owner->bprio = owner->cprio;
+		xnthread_set_state(owner, XNBOOST);
+	}
+}
+
+static void inherit_thread_priority(struct xnthread *owner,
+				    struct xnthread *target)
+{
+	if (xnthread_test_state(owner, XNZOMBIE))
+		return;
+	
+	/* Apply the scheduling policy of "target" to "thread" */
+	xnsched_track_policy(owner, target);
+
+	/*
+	 * Owner may be sleeping, propagate priority update through
+	 * the PI chain if needed.
+	 */
+	if (owner->wchan)
+		xnsynch_requeue_sleeper(owner);
+}
+
+static void __ceil_owner_priority(struct xnthread *owner, int prio)
+{
+	if (xnthread_test_state(owner, XNZOMBIE))
+		return;
+	/*
+	 * Raise owner priority to the ceiling value, this implicitly
+	 * selects SCHED_FIFO for the owner.
+	 */
+	xnsched_protect_priority(owner, prio);
+
+	if (owner->wchan)
+		xnsynch_requeue_sleeper(owner);
+}
+
+static void adjust_boost(struct xnthread *owner, struct xnthread *target)
+{
+	struct xnsynch *synch;
+
+	/*
+	 * CAUTION: we may have PI and PP-enabled objects among the
+	 * boosters, considering the leader of synch->pendq is
+	 * therefore NOT enough for determining the next boost
+	 * priority, since PP is tracked on acquisition, not on
+	 * contention. Check the head of the booster list instead.
+	 */
+	synch = list_first_entry(&owner->boosters, struct xnsynch, next);
+	if (synch->wprio == owner->wprio)
+		return;
+	
+	if (synch->status & XNSYNCH_PP)
+		__ceil_owner_priority(owner, get_ceiling_value(synch));
+	else {
+		XENO_BUG_ON(COBALT, list_empty(&synch->pendq));
+		if (target == NULL)
+			target = list_first_entry(&synch->pendq,
+						  struct xnthread, plink);
+		inherit_thread_priority(owner, target);
+	}
+}
+
+static void ceil_owner_priority(struct xnsynch *synch)
+{
+	struct xnthread *owner = synch->owner;
+	int wprio;
+
+	/* PP ceiling values are implicitly based on the RT class. */
+	wprio = xnsched_calc_wprio(&xnsched_class_rt,
+				   get_ceiling_value(synch));
+	synch->wprio = wprio;
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	raise_boost_flag(owner);
+	synch->status |= XNSYNCH_CEILING;
+
+	/*
+	 * If the ceiling value is lower than the current effective
+	 * priority, we must not adjust the latter.  BEWARE: not only
+	 * this restriction is required to keep the PP logic right,
+	 * but this is also a basic assumption made by all
+	 * xnthread_commit_ceiling() callers which won't check for any
+	 * rescheduling opportunity upon return.
+	 *
+	 * However we do want the object to be linked to the booster
+	 * list, and XNBOOST must appear in the current thread status.
+	 *
+	 * This way, setparam() won't be allowed to decrease the
+	 * current weighted priority below the ceiling value, until we
+	 * eventually release this object.
+	 */
+	if (wprio > owner->wprio)
+		adjust_boost(owner, NULL);
+}
+
+static inline
+void track_owner(struct xnsynch *synch, struct xnthread *owner)
+{
+	synch->owner = owner;
+}
+
+static inline  /* nklock held, irqs off */
+void set_current_owner_locked(struct xnsynch *synch, struct xnthread *owner)
+{
+	/*
+	 * Update the owner information, and apply priority protection
+	 * for PP objects. We may only get there if owner is current,
+	 * or blocked.
+	 */
+	track_owner(synch, owner);
+	if (synch->status & XNSYNCH_PP)
+		ceil_owner_priority(synch);
+}
+
+static inline
+void set_current_owner(struct xnsynch *synch, struct xnthread *owner)
+{
+	spl_t s;
+
+	track_owner(synch, owner);
+	if (synch->status & XNSYNCH_PP) {
+		xnlock_get_irqsave(&nklock, s);
+		ceil_owner_priority(synch);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+}
+
+static inline
+xnhandle_t get_owner_handle(xnhandle_t ownerh, struct xnsynch *synch)
+{
+	/*
+	 * On acquisition from kernel space, the fast lock handle
+	 * should bear the FLCEIL bit for PP objects, so that userland
+	 * takes the slow path on release, jumping to the kernel for
+	 * dropping the ceiling priority boost.
+	 */
+	if (synch->status & XNSYNCH_PP)
+		ownerh = xnsynch_fast_ceiling(ownerh);
+
+	return ownerh;
+}
+
+static void commit_ceiling(struct xnsynch *synch, struct xnthread *curr)
+{
+	xnhandle_t oldh, h;
+	atomic_t *lockp;
+
+	track_owner(synch, curr);
+	ceil_owner_priority(synch);
+	/*
+	 * Raise FLCEIL, which indicates a kernel entry will be
+	 * required for releasing this resource.
+	 */
+	lockp = xnsynch_fastlock(synch);
+	do {
+		h = atomic_read(lockp);
+		oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_ceiling(h));
+	} while (oldh != h);
+}
+
+void xnsynch_commit_ceiling(struct xnthread *curr)  /* nklock held, irqs off */
+{
+	struct xnsynch *synch;
+	atomic_t *lockp;
+
+	/* curr->u_window has to be valid, curr bears XNUSER. */
+	synch = lookup_lazy_pp(curr->u_window->pp_pending);
+	if (synch == NULL) {
+		/*
+		 * If pp_pending is a bad handle, don't panic but
+		 * rather ignore: we don't want a misbehaving userland
+		 * to crash the kernel.
+		 */
+		XENO_WARN_ON_ONCE(USER, 1);
+		goto out;
+	}
+
+	/*
+	 * For PP locks, userland does, in that order:
+	 *
+	 * -- LOCK
+	 * 1. curr->u_window->pp_pending = lock_handle
+	 *    barrier();
+	 * 2. atomic_cmpxchg(lockp, XN_NO_HANDLE, curr->handle);
+	 *
+	 * -- UNLOCK
+	 * 1. atomic_cmpxchg(lockp, curr->handle, XN_NO_HANDLE); [unclaimed]
+	 *    barrier();
+	 * 2. curr->u_window->pp_pending = XN_NO_HANDLE
+	 *
+	 * Make sure we have not been caught in a rescheduling in
+	 * between those steps. If we did, then we won't be holding
+	 * the lock as we schedule away, therefore no priority update
+	 * must take place.
+	 */
+	lockp = xnsynch_fastlock(synch);
+	if (xnsynch_fast_owner_check(lockp, curr->handle))
+		return;
+
+	/*
+	 * In rare cases, we could be called multiple times for
+	 * committing a lazy ceiling for the same object, e.g. if
+	 * userland is preempted in the middle of a recursive locking
+	 * sequence.
+	 *
+	 * This stems from the fact that userland has to update
+	 * ->pp_pending prior to trying to grab the lock atomically,
+	 * at which point it can figure out whether a recursive
+	 * locking happened. We get out of this trap by testing the
+	 * XNSYNCH_CEILING flag.
+	 */
+	if ((synch->status & XNSYNCH_CEILING) == 0)
+		commit_ceiling(synch, curr);
+out:
+	curr->u_window->pp_pending = XN_NO_HANDLE;
+}
+
+/**
+ * @fn int xnsynch_try_acquire(struct xnsynch *synch);
+ * @brief Try acquiring the ownership of a synchronization object.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to acquire the ownership of the given resource. If
+ * the resource is already assigned to another thread, the call
+ * returns with an error code.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to acquire.
+ *
+ * @return Zero is returned if @a synch has been successfully
+ * acquired. Otherwise:
+ *
+ * - -EDEADLK is returned if @a synch is currently held by the calling
+ * thread.
+ *
+ * - -EBUSY is returned if @a synch is currently held by another
+ * thread.
+ *
+ * @coretags{primary-only}
+ */
+int xnsynch_try_acquire(struct xnsynch *synch)
+{
+	struct xnthread *curr;
+	atomic_t *lockp;
+	xnhandle_t h;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	curr = xnthread_current();
+	lockp = xnsynch_fastlock(synch);
+	trace_cobalt_synch_try_acquire(synch);
+
+	h = atomic_cmpxchg(lockp, XN_NO_HANDLE,
+			   get_owner_handle(curr->handle, synch));
+	if (h != XN_NO_HANDLE)
+		return xnhandle_get_id(h) == curr->handle ?
+			-EDEADLK : -EBUSY;
+
+	set_current_owner(synch, curr);
+	xnthread_get_resource(curr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsynch_try_acquire);
+
+/**
+ * @fn int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode);
+ * @brief Acquire the ownership of a synchronization object.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to acquire the ownership of the given resource. If
+ * the resource is already assigned to another thread, the caller is
+ * suspended.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to acquire.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on the resource. This value is a wait time given as a
+ * count of nanoseconds. It can either be relative, absolute
+ * monotonic, or absolute adjustable depending on @a
+ * timeout_mode. Passing XN_INFINITE @b and setting @a mode to
+ * XN_RELATIVE specifies an unbounded wait. All other values are used
+ * to initialize a watchdog timer.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @return A bitmask which may include zero or one information bit
+ * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the
+ * caller, for detecting respectively: object deletion, timeout or
+ * signal/unblock conditions which might have happened while waiting.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note Unlike xnsynch_try_acquire(), this call does NOT check for
+ * invalid recursive locking request, which means that such request
+ * will always cause a deadlock for the caller.
+ */
+int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
+		    xntmode_t timeout_mode)
+{
+	struct xnthread *curr, *owner;
+	xnhandle_t currh, h, oldh;
+	atomic_t *lockp;
+	spl_t s;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	curr = xnthread_current();
+	currh = curr->handle;
+	lockp = xnsynch_fastlock(synch);
+	trace_cobalt_synch_acquire(synch);
+redo:
+	/* Basic form of xnsynch_try_acquire(). */
+	h = atomic_cmpxchg(lockp, XN_NO_HANDLE,
+			   get_owner_handle(currh, synch));
+	if (likely(h == XN_NO_HANDLE)) {
+		set_current_owner(synch, curr);
+		xnthread_get_resource(curr);
+		return 0;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * Set claimed bit.  In case it appears to be set already,
+	 * re-read its state under nklock so that we don't miss any
+	 * change between the lock-less read and here. But also try to
+	 * avoid cmpxchg where possible. Only if it appears not to be
+	 * set, start with cmpxchg directly.
+	 */
+	if (xnsynch_fast_is_claimed(h)) {
+		oldh = atomic_read(lockp);
+		goto test_no_owner;
+	}
+
+	do {
+		oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_claimed(h));
+		if (likely(oldh == h))
+			break;
+	test_no_owner:
+		if (oldh == XN_NO_HANDLE) {
+			/* Mutex released from another cpu. */
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		h = oldh;
+	} while (!xnsynch_fast_is_claimed(h));
+
+	owner = xnthread_lookup(h);
+	if (owner == NULL) {
+		/*
+		 * The handle is broken, therefore pretend that the
+		 * synch object was deleted to signal an error.
+		 */
+		xnthread_set_info(curr, XNRMID);
+		goto out;
+	}
+
+	/*
+	 * This is the contended path. We just detected an earlier
+	 * syscall-less fast locking from userland, fix up the
+	 * in-kernel state information accordingly.
+	 *
+	 * The consistency of the state information is guaranteed,
+	 * because we just raised the claim bit atomically for this
+	 * contended lock, therefore userland will have to jump to the
+	 * kernel when releasing it, instead of doing a fast
+	 * unlock. Since we currently own the superlock, consistency
+	 * wrt transfer_ownership() is guaranteed through
+	 * serialization.
+	 *
+	 * CAUTION: in this particular case, the only assumptions we
+	 * can safely make is that *owner is valid but not current on
+	 * this CPU.
+	 */
+	track_owner(synch, owner);
+	xnsynch_detect_relaxed_owner(synch, curr);
+
+	if ((synch->status & XNSYNCH_PRIO) == 0) { /* i.e. FIFO */
+		list_add_tail(&curr->plink, &synch->pendq);
+		goto block;
+	}
+
+	if (curr->wprio > owner->wprio) {
+		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
+			/* Ownership is still pending, steal the resource. */
+			set_current_owner_locked(synch, curr);
+			xnthread_clear_info(curr, XNRMID | XNTIMEO | XNBREAK);
+			xnthread_set_info(owner, XNROBBED);
+			goto grab;
+		}
+
+		list_add_priff(curr, &synch->pendq, wprio, plink);
+
+		if (synch->status & XNSYNCH_PI) {
+			raise_boost_flag(owner);
+
+			if (synch->status & XNSYNCH_CLAIMED)
+				list_del(&synch->next); /* owner->boosters */
+			else
+				synch->status |= XNSYNCH_CLAIMED;
+
+			synch->wprio = curr->wprio;
+			list_add_priff(synch, &owner->boosters, wprio, next);
+			/*
+			 * curr->wprio > owner->wprio implies that
+			 * synch must be leading the booster list
+			 * after insertion, so we may call
+			 * inherit_thread_priority() for tracking
+			 * current's priority directly without going
+			 * through adjust_boost().
+			 */
+			inherit_thread_priority(owner, curr);
+		}
+	} else
+		list_add_priff(curr, &synch->pendq, wprio, plink);
+block:
+	xnthread_suspend(curr, XNPEND, timeout, timeout_mode, synch);
+	curr->wwake = NULL;
+	xnthread_clear_info(curr, XNWAKEN);
+
+	if (xnthread_test_info(curr, XNRMID | XNTIMEO | XNBREAK))
+		goto out;
+
+	if (xnthread_test_info(curr, XNROBBED)) {
+		/*
+		 * Somebody stole us the ownership while we were ready
+		 * to run, waiting for the CPU: we need to wait again
+		 * for the resource.
+		 */
+		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		timeout = xntimer_get_timeout_stopped(&curr->rtimer);
+		if (timeout > 1) { /* Otherwise, it's too late. */
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		xnthread_set_info(curr, XNTIMEO);
+		goto out;
+	}
+grab:
+	xnthread_get_resource(curr);
+
+	if (xnsynch_pended_p(synch))
+		currh = xnsynch_fast_claimed(currh);
+
+	/* Set new ownership for this object. */
+	atomic_set(lockp, get_owner_handle(currh, synch));
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return xnthread_test_info(curr, XNRMID|XNTIMEO|XNBREAK);
+}
+EXPORT_SYMBOL_GPL(xnsynch_acquire);
+
+static void drop_booster(struct xnsynch *synch, struct xnthread *owner)
+{
+	list_del(&synch->next);	/* owner->boosters */
+
+	if (list_empty(&owner->boosters)) {
+		xnthread_clear_state(owner, XNBOOST);
+		inherit_thread_priority(owner, owner);
+	} else
+		adjust_boost(owner, NULL);
+}
+
+static inline void clear_pi_boost(struct xnsynch *synch,
+				  struct xnthread *owner)
+{	/* nklock held, irqs off */
+	synch->status &= ~XNSYNCH_CLAIMED;
+	drop_booster(synch, owner);
+}
+
+static inline void clear_pp_boost(struct xnsynch *synch,
+				  struct xnthread *owner)
+{	/* nklock held, irqs off */
+	synch->status &= ~XNSYNCH_CEILING;
+	drop_booster(synch, owner);
+}
+
+static bool transfer_ownership(struct xnsynch *synch,
+			       struct xnthread *lastowner)
+{				/* nklock held, irqs off */
+	struct xnthread *nextowner;
+	xnhandle_t nextownerh;
+	atomic_t *lockp;
+
+	lockp = xnsynch_fastlock(synch);
+
+	/*
+	 * Our caller checked for contention locklessly, so we do have
+	 * to check again under lock in a different way.
+	 */
+	if (list_empty(&synch->pendq)) {
+		synch->owner = NULL;
+		atomic_set(lockp, XN_NO_HANDLE);
+		return false;
+	}
+
+	nextowner = list_first_entry(&synch->pendq, struct xnthread, plink);
+	list_del(&nextowner->plink);
+	nextowner->wchan = NULL;
+	nextowner->wwake = synch;
+	set_current_owner_locked(synch, nextowner);
+	xnthread_set_info(nextowner, XNWAKEN);
+	xnthread_resume(nextowner, XNPEND);
+
+	if (synch->status & XNSYNCH_CLAIMED)
+		clear_pi_boost(synch, lastowner);
+
+	nextownerh = get_owner_handle(nextowner->handle, synch);
+	if (xnsynch_pended_p(synch))
+		nextownerh = xnsynch_fast_claimed(nextownerh);
+
+	atomic_set(lockp, nextownerh);
+
+	return true;
+}
+
+/**
+ * @fn bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr)
+ * @brief Release a resource and pass it to the next waiting thread.
+ *
+ * This service releases the ownership of the given synchronization
+ * object. The thread which is currently leading the object's pending
+ * list, if any, is unblocked from its pending state. However, no
+ * reschedule is performed.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @param curr The descriptor address of the current thread, which
+ * must own the object at the time of calling.
+ *
+ * @return True if a reschedule is required.
+ *
+ * @sideeffect
+ *
+ * - The effective priority of the previous resource owner might be
+ * lowered to its base priority value as a consequence of the priority
+ * boost being cleared.
+ *
+ * - The synchronization object ownership is transfered to the
+ * unblocked thread.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr)
+{
+	bool need_resched = false;
+	xnhandle_t currh, h;
+	atomic_t *lockp;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	trace_cobalt_synch_release(synch);
+
+	if (xnthread_put_resource(curr))
+		return false;
+
+	lockp = xnsynch_fastlock(synch);
+	currh = curr->handle;
+	/*
+	 * FLCEIL may only be raised by the owner, or when the owner
+	 * is blocked waiting for the synch (ownership transfer). In
+	 * addition, only the current owner of a synch may release it,
+	 * therefore we can't race while testing FLCEIL locklessly.
+	 * All updates to FLCLAIM are covered by the superlock.
+	 *
+	 * Therefore, clearing the fastlock racelessly in this routine
+	 * without leaking FLCEIL/FLCLAIM updates can be achieved by
+	 * holding the superlock.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	if (synch->status & XNSYNCH_CEILING) {
+		clear_pp_boost(synch, curr);
+		need_resched = true;
+	}
+
+	h = atomic_cmpxchg(lockp, currh, XN_NO_HANDLE);
+	if ((h & ~XNSYNCH_FLCEIL) != currh)
+		/* FLCLAIM set, synch is contended. */
+		need_resched = transfer_ownership(synch, curr);
+	else if (h != currh)	/* FLCEIL set, FLCLAIM clear. */
+		atomic_set(lockp, XN_NO_HANDLE);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return need_resched;
+}
+EXPORT_SYMBOL_GPL(xnsynch_release);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	struct xnsynch *synch = thread->wchan;
+	struct xnthread *owner;
+
+	XENO_BUG_ON(COBALT, !(synch->status & XNSYNCH_PRIO));
+
+	/*
+	 * Update the position in the pend queue of a thread waiting
+	 * for a lock. This routine propagates the change throughout
+	 * the PI chain if required.
+	 */
+	list_del(&thread->plink);
+	list_add_priff(thread, &synch->pendq, wprio, plink);
+	owner = synch->owner;
+
+	/* Only PI-enabled objects are of interest here. */
+	if ((synch->status & XNSYNCH_PI) == 0)
+		return;
+
+	synch->wprio = thread->wprio;
+	if (synch->status & XNSYNCH_CLAIMED)
+		list_del(&synch->next);
+	else {
+		synch->status |= XNSYNCH_CLAIMED;
+		raise_boost_flag(owner);
+	}
+
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	adjust_boost(owner, thread);
+}
+EXPORT_SYMBOL_GPL(xnsynch_requeue_sleeper);
+
+/**
+ * @fn struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+ * @brief Access the thread leading a synch object wait queue.
+ *
+ * This services returns the descriptor address of to the thread leading a
+ * synchronization object wait queue.
+ *
+ * @param synch The descriptor address of the target synchronization object.
+ *
+ * @return The descriptor address of the unblocked thread.
+ *
+ * @coretags{unrestricted}
+ */
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch)
+{
+	struct xnthread *thread = NULL;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!list_empty(&synch->pendq))
+		thread = list_first_entry(&synch->pendq,
+					  struct xnthread, plink);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(xnsynch_peek_pendq);
+
+/**
+ * @fn int xnsynch_flush(struct xnsynch *synch, int reason);
+ * @brief Unblock all waiters pending on a resource.
+ *
+ * This service atomically releases all threads which currently sleep
+ * on a given resource. This service should be called by upper
+ * interfaces under circumstances requiring that the pending queue of
+ * a given resource is cleared, such as before the resource is
+ * deleted.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to be flushed.
+ *
+ * @param reason Some flags to set in the information mask of every
+ * unblocked thread. Zero is an acceptable value. The following bits
+ * are pre-defined by Cobalt:
+ *
+ * - XNRMID should be set to indicate that the synchronization object
+ * is about to be destroyed (see xnthread_resume()).
+ *
+ * - XNBREAK should be set to indicate that the wait has been forcibly
+ * interrupted (see xnthread_unblock()).
+ *
+ * @return XNSYNCH_RESCHED is returned if at least one thread is
+ * unblocked, which means the caller should invoke xnsched_run() for
+ * applying the new scheduling state. Otherwise, XNSYNCH_DONE is
+ * returned.
+ *
+ * @sideeffect
+ *
+ * - The effective priority of the current resource owner might be
+ * lowered to its base priority value as a consequence of the priority
+ * inheritance boost being cleared.
+ *
+ * @coretags{unrestricted}
+ */
+int xnsynch_flush(struct xnsynch *synch, int reason)
+{
+	struct xnthread *sleeper, *tmp;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_flush(synch);
+
+	if (list_empty(&synch->pendq)) {
+		XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED);
+		ret = XNSYNCH_DONE;
+	} else {
+		ret = XNSYNCH_RESCHED;
+		list_for_each_entry_safe(sleeper, tmp, &synch->pendq, plink) {
+			list_del(&sleeper->plink);
+			xnthread_set_info(sleeper, reason);
+			sleeper->wchan = NULL;
+			xnthread_resume(sleeper, XNPEND);
+		}
+		if (synch->status & XNSYNCH_CLAIMED)
+			clear_pi_boost(synch, synch->owner);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnsynch_flush);
+
+void xnsynch_forget_sleeper(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	struct xnsynch *synch = thread->wchan;
+	struct xnthread *owner, *target;
+
+	/*
+	 * Do all the necessary housekeeping chores to stop a thread
+	 * from waiting on a given synchronization object. Doing so
+	 * may require to update a PI chain.
+	 */
+	trace_cobalt_synch_forget(synch);
+
+	xnthread_clear_state(thread, XNPEND);
+	thread->wchan = NULL;
+	list_del(&thread->plink); /* synch->pendq */
+
+	/*
+	 * Only a sleeper leaving a PI chain triggers an update.
+	 * NOTE: PP objects never bear the CLAIMED bit.
+	 */
+	if ((synch->status & XNSYNCH_CLAIMED) == 0)
+		return;
+
+	owner = synch->owner;
+
+	if (list_empty(&synch->pendq)) {
+		/* No more sleepers: clear the PI boost. */
+		clear_pi_boost(synch, owner);
+		return;
+	}
+
+	/*
+	 * Reorder the booster queue of the current owner after we
+	 * left the wait list, then set its priority to the new
+	 * required minimum required to prevent priority inversion.
+	 */
+	target = list_first_entry(&synch->pendq, struct xnthread, plink);
+	synch->wprio = target->wprio;
+	list_del(&synch->next);	/* owner->boosters */
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	adjust_boost(owner, target);
+}
+EXPORT_SYMBOL_GPL(xnsynch_forget_sleeper);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+/*
+ * Detect when a thread is about to sleep on a synchronization
+ * object currently owned by someone running in secondary mode.
+ */
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper)
+{
+	if (xnthread_test_state(sleeper, XNWARN) &&
+	    !xnthread_test_info(sleeper, XNPIALERT) &&
+	    xnthread_test_state(synch->owner, XNRELAX)) {
+		xnthread_set_info(sleeper, XNPIALERT);
+		__xnthread_signal(sleeper, SIGDEBUG,
+				  SIGDEBUG_MIGRATE_PRIOINV);
+	} else
+		xnthread_clear_info(sleeper,  XNPIALERT);
+}
+
+/*
+ * Detect when a thread is about to relax while holding booster(s)
+ * (claimed PI or active PP object), which denotes a potential for
+ * priority inversion. In such an event, any sleeper bearing the
+ * XNWARN bit will receive a SIGDEBUG notification.
+ */
+void xnsynch_detect_boosted_relax(struct xnthread *owner)
+{
+	struct xnthread *sleeper;
+	struct xnsynch *synch;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnthread_for_each_booster(synch, owner) {
+		xnsynch_for_each_sleeper(sleeper, synch) {
+			if (xnthread_test_state(sleeper, XNWARN)) {
+				xnthread_set_info(sleeper, XNPIALERT);
+				__xnthread_signal(sleeper, SIGDEBUG,
+						  SIGDEBUG_MIGRATE_PRIOINV);
+			}
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+#endif /* CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+/** @} */
+++ linux-patched/kernel/xenomai/select.c	2022-03-21 12:58:28.857894149 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/debug.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2008 Efixo
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/bitops.h>	/* For hweight_long */
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <pipeline/sirq.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_select Synchronous I/O multiplexing
+ *
+ * This module implements the services needed for implementing the
+ * POSIX select() service, or any other event multiplexing services.
+ *
+ * Following the implementation of the posix select service, this module defines
+ * three types of events:
+ * - \a XNSELECT_READ meaning that a file descriptor is ready for reading;
+ * - \a XNSELECT_WRITE meaning that a file descriptor is ready for writing;
+ * - \a XNSELECT_EXCEPT meaning that a file descriptor received an exceptional
+ *   event.
+ *
+ * It works by defining two structures:
+ * - a @a struct @a xnselect structure, which should be added to every file
+ * descriptor for every event type (read, write, or except);
+ * - a @a struct @a xnselector structure, the selection structure,  passed by
+ * the thread calling the xnselect service, where this service does all its
+ * housekeeping.
+ * @{
+ */
+
+static LIST_HEAD(selector_list);
+static int deletion_virq;
+
+/**
+ * Initialize a @a struct @a xnselect structure.
+ *
+ * This service must be called to initialize a @a struct @a xnselect structure
+ * before it is bound to a selector by the means of xnselect_bind().
+ *
+ * @param select_block pointer to the xnselect structure to be initialized
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnselect_init(struct xnselect *select_block)
+{
+	INIT_LIST_HEAD(&select_block->bindings);
+}
+EXPORT_SYMBOL_GPL(xnselect_init);
+
+static inline int xnselect_wakeup(struct xnselector *selector)
+{
+	return xnsynch_flush(&selector->synchbase, 0) == XNSYNCH_RESCHED;
+}
+
+/**
+ * Bind a file descriptor (represented by its @a xnselect structure) to a
+ * selector block.
+ *
+ * @param select_block pointer to the @a struct @a xnselect to be bound;
+ *
+ * @param binding pointer to a newly allocated (using xnmalloc) @a struct
+ * @a xnselect_binding;
+ *
+ * @param selector pointer to the selector structure;
+ *
+ * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT);
+ *
+ * @param index index of the file descriptor (represented by @a
+ * select_block) in the bit fields used by the @a selector structure;
+ *
+ * @param state current state of the file descriptor.
+ *
+ * @a select_block must have been initialized with xnselect_init(),
+ * the @a xnselector structure must have been initialized with
+ * xnselector_init(), @a binding may be uninitialized.
+ *
+ * This service must be called with nklock locked, irqs off. For this reason,
+ * the @a binding parameter must have been allocated by the caller outside the
+ * locking section.
+ *
+ * @retval -EINVAL if @a type or @a index is invalid;
+ * @retval 0 otherwise.
+ *
+ * @coretags{task-unrestricted, might-switch, atomic-entry}
+ */
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned type,
+		  unsigned index,
+		  unsigned state)
+{
+	atomic_only();
+
+	if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE)
+		return -EINVAL;
+
+	binding->selector = selector;
+	binding->fd = select_block;
+	binding->type = type;
+	binding->bit_index = index;
+
+	list_add_tail(&binding->slink, &selector->bindings);
+	list_add_tail(&binding->link, &select_block->bindings);
+	__FD_SET__(index, &selector->fds[type].expected);
+	if (state) {
+		__FD_SET__(index, &selector->fds[type].pending);
+		if (xnselect_wakeup(selector))
+			xnsched_run();
+	} else
+		__FD_CLR__(index, &selector->fds[type].pending);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnselect_bind);
+
+/* Must be called with nklock locked irqs off */
+int __xnselect_signal(struct xnselect *select_block, unsigned state)
+{
+	struct xnselect_binding *binding;
+	struct xnselector *selector;
+	int resched = 0;
+
+	list_for_each_entry(binding, &select_block->bindings, link) {
+		selector = binding->selector;
+		if (state) {
+			if (!__FD_ISSET__(binding->bit_index,
+					&selector->fds[binding->type].pending)) {
+				__FD_SET__(binding->bit_index,
+					 &selector->fds[binding->type].pending);
+				if (xnselect_wakeup(selector))
+					resched = 1;
+			}
+		} else
+			__FD_CLR__(binding->bit_index,
+				 &selector->fds[binding->type].pending);
+	}
+
+	return resched;
+}
+EXPORT_SYMBOL_GPL(__xnselect_signal);
+
+/**
+ * Destroy the @a xnselect structure associated with a file descriptor.
+ *
+ * Any binding with a @a xnselector block is destroyed.
+ *
+ * @param select_block pointer to the @a xnselect structure associated
+ * with a file descriptor
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void xnselect_destroy(struct xnselect *select_block)
+{
+	struct xnselect_binding *binding, *tmp;
+	struct xnselector *selector;
+	int resched = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&select_block->bindings))
+		goto out;
+
+	list_for_each_entry_safe(binding, tmp, &select_block->bindings, link) {
+		list_del(&binding->link);
+		selector = binding->selector;
+		__FD_CLR__(binding->bit_index,
+			 &selector->fds[binding->type].expected);
+		if (!__FD_ISSET__(binding->bit_index,
+				&selector->fds[binding->type].pending)) {
+			__FD_SET__(binding->bit_index,
+				 &selector->fds[binding->type].pending);
+			if (xnselect_wakeup(selector))
+				resched = 1;
+		}
+		list_del(&binding->slink);
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(binding);
+		xnlock_get_irqsave(&nklock, s);
+	}
+	if (resched)
+		xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnselect_destroy);
+
+static unsigned
+fd_set_andnot(fd_set *result, fd_set *first, fd_set *second, unsigned n)
+{
+	unsigned i, not_empty = 0;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if((result->fds_bits[i] =
+		    first->fds_bits[i] & ~(second->fds_bits[i])))
+			not_empty = 1;
+
+	if (i < __FDSET_LONGS__
+	    && (result->fds_bits[i] =
+		first->fds_bits[i] & ~(second->fds_bits[i]) & (__FDMASK__(n) - 1)))
+		not_empty = 1;
+
+	return not_empty;
+}
+
+static unsigned
+fd_set_and(fd_set *result, fd_set *first, fd_set *second, unsigned n)
+{
+	unsigned i, not_empty = 0;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if((result->fds_bits[i] =
+		    first->fds_bits[i] & second->fds_bits[i]))
+			not_empty = 1;
+
+	if (i < __FDSET_LONGS__
+	    && (result->fds_bits[i] =
+		first->fds_bits[i] & second->fds_bits[i] & (__FDMASK__(n) - 1)))
+		not_empty = 1;
+
+	return not_empty;
+}
+
+static void fd_set_zeropad(fd_set *set, unsigned n)
+{
+	unsigned i;
+
+	i = __FDELT__(n);
+
+	if (i < __FDSET_LONGS__)
+		set->fds_bits[i] &= (__FDMASK__(n) - 1);
+
+	for(i++; i < __FDSET_LONGS__; i++)
+		set->fds_bits[i] = 0;
+}
+
+static unsigned fd_set_popcount(fd_set *set, unsigned n)
+{
+	unsigned count = 0, i;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if (set->fds_bits[i])
+			count += hweight_long(set->fds_bits[i]);
+
+	if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1)))
+		count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1));
+
+	return count;
+}
+
+/**
+ * Initialize a selector structure.
+ *
+ * @param selector The selector structure to be initialized.
+ *
+ * @retval 0
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnselector_init(struct xnselector *selector)
+{
+	unsigned int i;
+
+	xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
+		__FD_ZERO__(&selector->fds[i].expected);
+		__FD_ZERO__(&selector->fds[i].pending);
+	}
+	INIT_LIST_HEAD(&selector->bindings);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnselector_init);
+
+/**
+ * Check the state of a number of file descriptors, wait for a state change if
+ * no descriptor is ready.
+ *
+ * @param selector structure to check for pending events
+ * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned;
+ * @param in_fds the set of descriptors which events should be checked
+ * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1;
+ * @param timeout the timeout, whose meaning depends on @a timeout_mode, note
+ * that xnselect() pass @a timeout and @a timeout_mode unchanged to
+ * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a
+ * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep
+ * than expected if the sleep is interrupted.
+ * @param timeout_mode the mode of @a timeout.
+ *
+ * @retval -EINVAL if @a nfds is negative;
+ * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet
+ * been registered with xnselect_bind(), @a out_fds contains the set of such
+ * descriptors;
+ * @retval -EINTR if @a xnselect was interrupted while waiting;
+ * @retval 0 in case of timeout.
+ * @retval the number of file descriptors having received an event.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode)
+{
+	unsigned int i, not_empty = 0, count;
+	int info = 0;
+	spl_t s;
+
+	if ((unsigned) nfds > __FD_SETSIZE)
+		return -EINVAL;
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i])
+			fd_set_zeropad(out_fds[i], nfds);
+
+	xnlock_get_irqsave(&nklock, s);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i]
+		    && fd_set_andnot(out_fds[i], in_fds[i],
+				     &selector->fds[i].expected, nfds))
+			not_empty = 1;
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (not_empty)
+		return -ECHRNG;
+
+	xnlock_get_irqsave(&nklock, s);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i]
+		    && fd_set_and(out_fds[i], in_fds[i],
+				  &selector->fds[i].pending, nfds))
+			not_empty = 1;
+
+	while (!not_empty) {
+		info = xnsynch_sleep_on(&selector->synchbase,
+					timeout, timeout_mode);
+
+		for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+			if (out_fds[i]
+			    && fd_set_and(out_fds[i], in_fds[i],
+					  &selector->fds[i].pending, nfds))
+				not_empty = 1;
+
+		if (info & (XNBREAK | XNTIMEO))
+			break;
+	}
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (not_empty) {
+		for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
+			if (out_fds[i])
+				count += fd_set_popcount(out_fds[i], nfds);
+
+		return count;
+	}
+
+	if (info & XNBREAK)
+		return -EINTR;
+
+	return 0; /* Timeout */
+}
+EXPORT_SYMBOL_GPL(xnselect);
+
+/**
+ * Destroy a selector block.
+ *
+ * All bindings with file descriptor are destroyed.
+ *
+ * @param selector the selector block to be destroyed
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnselector_destroy(struct xnselector *selector)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&selector->destroy_link, &selector_list);
+	pipeline_post_sirq(deletion_virq);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnselector_destroy);
+
+static irqreturn_t xnselector_destroy_loop(int virq, void *dev_id)
+{
+	struct xnselect_binding *binding, *tmpb;
+	struct xnselector *selector, *tmps;
+	struct xnselect *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&selector_list))
+		goto out;
+
+	list_for_each_entry_safe(selector, tmps, &selector_list, destroy_link) {
+		list_del(&selector->destroy_link);
+		if (list_empty(&selector->bindings))
+			goto release;
+		list_for_each_entry_safe(binding, tmpb, &selector->bindings, slink) {
+			list_del(&binding->slink);
+			fd = binding->fd;
+			list_del(&binding->link);
+			xnlock_put_irqrestore(&nklock, s);
+			xnfree(binding);
+			xnlock_get_irqsave(&nklock, s);
+		}
+	release:
+		xnsynch_destroy(&selector->synchbase);
+		xnsched_run();
+		xnlock_put_irqrestore(&nklock, s);
+
+		xnfree(selector);
+
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return IRQ_HANDLED;
+}
+
+int xnselect_mount(void)
+{
+	deletion_virq = pipeline_create_inband_sirq(xnselector_destroy_loop);
+	if (deletion_virq < 0)
+		return deletion_virq;
+
+	return 0;
+}
+
+int xnselect_umount(void)
+{
+	pipeline_delete_inband_sirq(deletion_virq);
+	return 0;
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/debug.h	2022-03-21 12:58:28.854894178 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/procfs.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _KERNEL_COBALT_DEBUG_H
+#define _KERNEL_COBALT_DEBUG_H
+
+#include <cobalt/kernel/assert.h>
+
+struct xnthread;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+
+int xndebug_init(void);
+
+void xndebug_cleanup(void);
+
+void xndebug_shadow_init(struct xnthread *thread);
+
+extern struct xnvfile_directory cobalt_debug_vfroot;
+
+#else  /* !XENO_OPT_DEBUG */
+
+static inline int xndebug_init(void)
+{
+	return 0;
+}
+
+static inline void xndebug_cleanup(void)
+{
+}
+
+static inline void xndebug_shadow_init(struct xnthread *thread)
+{
+}
+
+#endif  /* !XENO_OPT_DEBUG */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX
+void xndebug_notify_relax(struct xnthread *thread,
+			  int reason);
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason);
+#else
+static inline
+void xndebug_notify_relax(struct xnthread *thread, int reason)
+{
+}
+static inline
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason)
+{
+	/* Simply ignore. */
+}
+#endif
+
+#endif /* !_KERNEL_COBALT_DEBUG_H */
+++ linux-patched/kernel/xenomai/procfs.h	2022-03-21 12:58:28.850894217 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _KERNEL_COBALT_PROCFS_H
+#define _KERNEL_COBALT_PROCFS_H
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int xnprocfs_init_tree(void);
+void xnprocfs_cleanup_tree(void);
+#else
+static inline int xnprocfs_init_tree(void) { return 0; }
+static inline void xnprocfs_cleanup_tree(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+#endif /* !_KERNEL_COBALT_PROCFS_H */
+++ linux-patched/kernel/xenomai/Kconfig	2022-03-21 12:58:28.847894247 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/heap.c	1970-01-01 01:00:00.000000000 +0100
+menu "Core features"
+
+config XENO_OPT_SCHED_CLASSES
+	bool "Extra scheduling classes"
+	default n
+	help
+	The Cobalt kernel implements a set of scheduling classes.
+	Each scheduling class defines its own set of rules for
+	determining when and how to select a new thread to run.
+
+	Cobalt has a built-in real-time class, which supports both
+	preemptive fixed-priority FIFO, and round-robin scheduling.
+
+	Enabling CONFIG_XENO_OPT_SCHED_CLASSES allows you to select
+	additional scheduling classes to enable in the Cobalt kernel.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_WEAK
+	bool "Weak scheduling class"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option creates a Cobalt scheduling class for mapping
+	members of the regular POSIX SCHED_FIFO/RR policies to a low
+	priority class of the Cobalt kernel, providing no real-time
+	guarantee. Therefore, up to a hundred non real-time priority
+	levels are available from the SCHED_WEAK policy.
+
+	When CONFIG_XENO_OPT_SCHED_WEAK is disabled, Cobalt still
+	supports a single non real-time priority level (i.e. zero
+	priority), assigned to members of the SCHED_OTHER class.
+
+	SCHED_WEAK/SCHED_OTHER threads can access Cobalt resources,
+	wait on Cobalt synchronization objects, but cannot compete for
+	the CPU with members of the real-time Cobalt classes.
+
+	Since Cobalt assumes no real-time requirement for
+	SCHED_WEAK/SCHED_OTHER threads, they are automatically moved
+	back to secondary mode upon return from any Cobalt syscall if
+	necessary, unless they hold a Cobalt mutex, which would defer
+	the transition until such mutex is released.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_TP
+	bool "Temporal partitioning"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables support for temporal partitioning.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_TP_NRPART
+	int "Number of partitions"
+	default 4
+	range 1 1024
+	depends on XENO_OPT_SCHED_TP
+	help
+	Define here the maximum number of temporal partitions the TP
+	scheduler may have to handle.
+
+config XENO_OPT_SCHED_SPORADIC
+	bool "Sporadic scheduling"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables support for the sporadic scheduling policy
+	in the Cobalt kernel (SCHED_SPORADIC), also known as POSIX
+	sporadic server.
+
+	It can be used to enforce a capped limit on the execution time
+	of a thread within a given period of time.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_SPORADIC_MAXREPL
+	int "Maximum number of pending replenishments"
+	default 8
+	range 4 16
+	depends on XENO_OPT_SCHED_SPORADIC
+	help
+	For performance reason, the budget replenishment information
+	is statically stored on a per-thread basis. This parameter
+	defines the maximum number of replenishment requests that can
+	be pending concurrently for any given thread that undergoes
+	sporadic scheduling (system minimum is 4).
+
+config XENO_OPT_SCHED_QUOTA
+	bool "Thread groups with runtime quota"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables the SCHED_QUOTA scheduling policy in the
+	Cobalt kernel.
+
+	This policy enforces a limitation on the CPU consumption of
+	threads over a globally defined period, known as the quota
+	interval. This is done by pooling threads with common
+	requirements in groups, and giving each group a share of the
+	global period (see CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+
+	When threads have entirely consumed the quota allotted to the
+	group they belong to, the latter is suspended as a whole,
+	until the next quota interval starts. At this point, a new
+	runtime budget is given to each group, in accordance with its
+	share.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_QUOTA_PERIOD
+	int "Quota interval (us)"
+	default 10000
+	range 100 1000000000
+	depends on XENO_OPT_SCHED_QUOTA
+	help
+	The global period thread groups can get a share of.
+
+config XENO_OPT_SCHED_QUOTA_NR_GROUPS
+	int "Number of thread groups"
+	default 32
+	range 1 1024
+	depends on XENO_OPT_SCHED_QUOTA
+	help
+	The overall number of thread groups which may be defined
+	across all CPUs.
+
+config XENO_OPT_STATS
+	bool "Runtime statistics"
+	depends on XENO_OPT_VFILE
+	default y
+	help
+	This option causes the Cobalt kernel to collect various
+	per-thread runtime statistics, which are accessible through
+	the /proc/xenomai/sched/stat interface.
+
+config XENO_OPT_STATS_IRQS
+	bool "Account IRQ handlers separatly"
+	depends on XENO_OPT_STATS && IPIPE
+	default y
+	help
+	When enabled, the runtime of interrupt handlers is accounted
+	separately from the threads they interrupt. Also, the
+	occurrence of shared interrupts is accounted on a per-handler
+	basis.
+
+	This option is available to legacy I-pipe builds only.
+
+config XENO_OPT_SHIRQ
+	bool "Shared interrupts"
+	help
+	Enables support for both level- and edge-triggered shared
+	interrupts, so that multiple real-time interrupt handlers
+	are allowed to control dedicated hardware devices which are
+	configured to share the same interrupt line.
+
+config XENO_OPT_RR_QUANTUM
+	int "Round-robin quantum (us)"
+	default 1000
+	help
+	This parameter defines the duration of the default round-robin
+	time quantum expressed as a count of micro-seconds. This value
+	may be overriden internally by Cobalt services which do
+	provide a round-robin interval.
+
+config XENO_OPT_AUTOTUNE
+        tristate "Auto-tuning"
+        default y
+	select XENO_DRIVERS_AUTOTUNE
+        help
+	Enable auto-tuning capabilities. Auto-tuning is used for
+	adjusting the core timing services to the intrinsic latency of
+	the platform.
+
+config XENO_OPT_SCALABLE_SCHED
+	bool "O(1) scheduler"
+	help
+	This option causes a multi-level priority queue to be used in
+	the real-time scheduler, so that it operates in constant-time
+	regardless of the number of _concurrently runnable_ threads
+	(which might be much lower than the total number of active
+	threads).
+
+	Its use is recommended for large multi-threaded systems
+	involving more than 10 of such threads; otherwise, the default
+	linear method usually performs better with lower memory
+	footprints.
+
+choice
+	prompt "Timer indexing method"
+	default XENO_OPT_TIMER_LIST if !X86_64
+	default XENO_OPT_TIMER_RBTREE if X86_64
+	help
+	This option allows to select the underlying data structure
+	which is going to be used for ordering the outstanding
+	software timers managed by the Cobalt kernel.
+
+config XENO_OPT_TIMER_LIST
+	bool "Linear"
+	help
+	Use a linked list. Albeit O(N), this simple data structure is
+	particularly efficient when only a few timers (< 10) may be
+	concurrently outstanding at any point in time.
+
+config XENO_OPT_TIMER_RBTREE
+	bool "Tree"
+	help
+	Use a red-black tree. This data structure is efficient when a
+	high number of software timers may be concurrently
+	outstanding at any point in time.
+
+endchoice
+
+config XENO_OPT_PIPE
+	bool
+
+config XENO_OPT_MAP
+	bool
+
+config XENO_OPT_EXTCLOCK
+       bool
+
+config XENO_OPT_COBALT_EXTENSION
+       bool
+
+config XENO_OPT_VFILE
+       bool
+       depends on PROC_FS
+       default y
+
+endmenu
+
+menu "Sizes and static limits"
+
+config XENO_OPT_PIPE_NRDEV
+	int "Number of pipe devices"
+	depends on XENO_OPT_PIPE
+	default 32
+	help
+	Message pipes are bi-directional FIFO communication channels
+	allowing data exchange between Cobalt threads and regular
+	POSIX threads. Pipes natively preserve message boundaries, but
+	can also be used in byte streaming mode from kernel to
+	user-space.
+
+	This option sets the maximum number of pipe devices supported
+	in the system. Pipe devices are named /dev/rtpN where N is a
+	device minor number ranging from 0 to XENO_OPT_PIPE_NRDEV - 1.
+
+config XENO_OPT_REGISTRY_NRSLOTS
+	int "Number of registry slots"
+	default 512
+	help
+	The registry is used by the Cobalt kernel to export named
+	resources to user-space programs via the /proc interface.
+	Each named resource occupies a registry slot. This option sets
+	the maximum number of resources the registry can handle.
+
+config XENO_OPT_SYS_HEAPSZ
+	int "Size of system heap (Kb)"
+	default 4096
+	help
+	The system heap is used for various internal allocations by
+	the Cobalt kernel. The size is expressed in Kilobytes.
+
+config XENO_OPT_PRIVATE_HEAPSZ
+	int "Size of private heap (Kb)"
+	default 256
+	help
+	The Cobalt kernel implements fast IPC mechanisms within the
+	scope of a process which require a private kernel memory heap
+	to be mapped in the address space of each Xenomai application
+	process. This option can be used to set the size of this
+	per-process heap.
+
+	64k is considered a large enough size for common use cases.
+
+config XENO_OPT_SHARED_HEAPSZ
+	int "Size of shared heap (Kb)"
+	default 256
+	help
+	The Cobalt kernel implements fast IPC mechanisms between
+	processes which require a shared kernel memory heap to be
+	mapped in the address space of all Xenomai application
+	processes. This option can be used to set the size of this
+	system-wide heap.
+
+	64k is considered a large enough size for common use cases.
+
+config XENO_OPT_NRTIMERS
+       int "Maximum number of POSIX timers per process"
+       default 256
+       help
+       This tunable controls how many POSIX timers can exist at any
+       given time for each Cobalt process (a timer is created by a
+       call to the timer_create() service of the Cobalt/POSIX API).
+
+config XENO_OPT_DEBUG_TRACE_LOGSZ
+       int "Trace log size"
+       depends on XENO_OPT_DEBUG_TRACE_RELAX
+       default 16
+       help
+       The size (kilobytes) of the trace log of relax requests. Once
+       this limit is reached, subsequent traces will be silently
+       discarded.
+
+       Writing to /proc/xenomai/debug/relax empties the trace log.
+
+endmenu
+
+menu "Latency settings"
+
+config XENO_OPT_TIMING_SCHEDLAT
+	int "User scheduling latency (ns)"
+	default 0
+	help
+	The user scheduling latency is the time between the
+	termination of an interrupt handler and the execution of the
+	first instruction of the real-time application thread this
+	handler resumes. A default value of 0 (recommended) will cause
+	a pre-calibrated value to be used.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+config XENO_OPT_TIMING_KSCHEDLAT
+	int "Intra-kernel scheduling latency (ns)"
+	default 0
+	help
+	The intra-kernel scheduling latency is the time between the
+	termination of an interrupt handler and the execution of the
+	first instruction of the RTDM kernel thread this handler
+	resumes. A default value of 0 (recommended) will cause a
+	pre-calibrated value to be used.
+
+	Intra-kernel latency is usually significantly lower than user
+	scheduling latency on MMU-enabled platforms, due to CPU cache
+	latency.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+config XENO_OPT_TIMING_IRQLAT
+	int "Interrupt latency (ns)"
+	default 0
+	help
+	The interrupt latency is the time between the occurrence of an
+	IRQ and the first instruction of the interrupt handler which
+	will service it. A default value of 0 (recommended) will cause
+	a pre-calibrated value to be used.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+endmenu
+
+menuconfig XENO_OPT_DEBUG
+	depends on XENO_OPT_VFILE
+	bool "Debug support"
+	help
+	  When enabled, various debugging features can be switched
+	  on. They can help to find problems in applications, drivers,
+	  and the Cobalt kernel. XENO_OPT_DEBUG by itself does not have
+	  any impact on the generated code.
+
+if XENO_OPT_DEBUG
+
+config XENO_OPT_DEBUG_COBALT
+	bool "Cobalt runtime assertions"
+	help
+	  This option activates various assertions inside the Cobalt
+	  kernel. This option has limited overhead.
+
+config XENO_OPT_DEBUG_MEMORY
+	bool "Cobalt memory checks"
+	help
+	  This option enables memory debug checks inside the Cobalt
+	  kernel. This option may induce significant overhead with large
+	  heaps.
+
+config XENO_OPT_DEBUG_CONTEXT
+       bool "Check for calling context"
+       help
+         This option enables checks for the calling context in the
+         Cobalt kernel, aimed at detecting when regular Linux routines
+         are entered from a real-time context, and conversely.
+
+config XENO_OPT_DEBUG_LOCKING
+	bool "Spinlock debugging support"
+	default y if SMP
+	help
+	  This option activates runtime assertions, and measurements
+	  of spinlocks spinning time and duration in the Cobalt
+	  kernel. It helps finding latency spots due to interrupt
+	  masked sections. Statistics about the longest masked section
+	  can be found in /proc/xenomai/debug/lock.
+
+	  This option may induce a measurable overhead on low end
+	  machines.
+
+config XENO_OPT_DEBUG_USER
+	bool "User consistency checks"
+	help
+	  This option enables a set of consistency checks for
+	  detecting wrong runtime behavior in user applications.
+
+	  With some of the debug categories, threads can ask for
+	  notification when a problem is detected, by turning on the
+	  PTHREAD_WARNSW mode bit with pthread_setmode_np().  Cobalt
+	  sends the Linux-originated SIGDEBUG signal for notifying
+	  threads, along with a reason code passed into the associated
+	  siginfo data (see pthread_setmode_np()).
+	
+	  Some of these runtime checks may induce overhead, enable
+	  them for debugging purposes only.
+
+if XENO_OPT_DEBUG_USER
+
+config XENO_OPT_DEBUG_MUTEX_RELAXED
+       bool "Detect relaxed mutex owner"
+       default y
+       help
+         A thread which attempts to acquire a mutex currently owned by
+         another thread running in secondary/relaxed mode thread will
+         suffer unwanted latencies, due to a priority inversion.
+         debug notifications are enabled for such thread, it receives
+         a SIGDEBUG signal.
+
+	 This option has some overhead in real-time mode over
+	 contented mutexes.
+ 
+config XENO_OPT_DEBUG_MUTEX_SLEEP
+       bool "Detect sleeping with mutex"
+       default y
+       help
+         A thread which goes sleeping while holding a mutex is prone
+         to cause unwanted latencies to other threads serialized by
+         the same lock. If debug notifications are enabled for such
+         thread, it receives a SIGDEBUG signal right before entering
+	 sleep.
+
+	 This option has noticeable overhead in real-time mode as it
+	 disables the normal fast mutex operations from user-space,
+	 causing a system call for each mutex acquisition/release.
+
+config XENO_OPT_DEBUG_LEGACY
+        bool "Detect usage of legacy constructs/features"
+	default n
+	help
+	    Turns on detection of legacy API usage.
+
+endif # XENO_OPT_DEBUG_USER
+
+config XENO_OPT_DEBUG_TRACE_RELAX
+	bool "Trace relax requests"
+	default n
+	help
+	  This option enables recording of unwanted relax requests from
+	  user-space applications leaving the real-time domain, logging
+	  the thread information and code location involved. All records
+	  are readable from /proc/xenomai/debug/relax, and can be
+	  decoded using the "slackspot" utility.
+
+config XENO_OPT_WATCHDOG
+	bool "Watchdog support"
+	default y
+	help
+	  This option activates a watchdog aimed at detecting runaway
+	  Cobalt threads. If enabled, the watchdog triggers after a
+	  given period of uninterrupted real-time activity has elapsed
+	  without Linux interaction in the meantime.
+
+	  In such an event, the current thread is moved out the
+	  real-time domain, receiving a SIGDEBUG signal from the Linux
+	  kernel immediately after.
+
+	  The timeout value of the watchdog can be set using the
+	  XENO_OPT_WATCHDOG_TIMEOUT parameter.
+
+config XENO_OPT_WATCHDOG_TIMEOUT
+	depends on XENO_OPT_WATCHDOG
+	int "Watchdog timeout"
+	default 4
+	range 1 60
+	help
+	  Watchdog timeout value (in seconds).
+
+endif # XENO_OPT_DEBUG
+
+config XENO_TODO
+	bool "Reveal TODO places"
+	help
+	  This option causes a build time assertion to trigger
+	  when the TODO() marker is found in the compiled code.
+++ linux-patched/kernel/xenomai/heap.c	2022-03-21 12:58:28.843894286 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-idle.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_heap Dynamic memory allocation services
+ *
+ * This code implements a variant of the allocator described in
+ * "Design of a General Purpose Memory Allocator for the 4.3BSD Unix
+ * Kernel" by Marshall K. McKusick and Michael J. Karels (USENIX
+ * 1988), see http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf.
+ * The free page list is maintained in rbtrees for fast lookups of
+ * multi-page memory ranges, and pages holding bucketed memory have a
+ * fast allocation bitmap to manage their blocks internally.
+ *@{
+ */
+struct xnheap cobalt_heap;		/* System heap */
+EXPORT_SYMBOL_GPL(cobalt_heap);
+
+static LIST_HEAD(heapq);	/* Heap list for v-file dump */
+
+static int nrheaps;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_rev_tag vfile_tag;
+
+static struct xnvfile_snapshot_ops vfile_ops;
+
+struct vfile_priv {
+	struct xnheap *curr;
+};
+
+struct vfile_data {
+	size_t all_mem;
+	size_t free_mem;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static struct xnvfile_snapshot vfile = {
+	.privsz = sizeof(struct vfile_priv),
+	.datasz = sizeof(struct vfile_data),
+	.tag = &vfile_tag,
+	.ops = &vfile_ops,
+};
+
+static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_priv *priv = xnvfile_iterator_priv(it);
+
+	if (list_empty(&heapq)) {
+		priv->curr = NULL;
+		return 0;
+	}
+
+	priv->curr = list_first_entry(&heapq, struct xnheap, next);
+
+	return nrheaps;
+}
+
+static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_data *p = data;
+	struct xnheap *heap;
+
+	if (priv->curr == NULL)
+		return 0;	/* We are done. */
+
+	heap = priv->curr;
+	if (list_is_last(&heap->next, &heapq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_entry(heap->next.next,
+					struct xnheap, next);
+
+	p->all_mem = xnheap_get_size(heap);
+	p->free_mem = xnheap_get_free(heap);
+	knamecpy(p->name, heap->name);
+
+	return 1;
+}
+
+static int vfile_show(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it, "%9s %9s  %s\n",
+			       "TOTAL", "FREE", "NAME");
+	else
+		xnvfile_printf(it, "%9zu %9zu  %s\n",
+			       p->all_mem,
+			       p->free_mem,
+			       p->name);
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_ops = {
+	.rewind = vfile_rewind,
+	.next = vfile_next,
+	.show = vfile_show,
+};
+
+void xnheap_init_proc(void)
+{
+	xnvfile_init_snapshot("heap", &vfile, &cobalt_vfroot);
+}
+
+void xnheap_cleanup_proc(void)
+{
+	xnvfile_destroy_snapshot(&vfile);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+enum xnheap_pgtype {
+	page_free =0,
+	page_cont =1,
+	page_list =2
+};
+
+static inline u32 __always_inline
+gen_block_mask(int log2size)
+{
+	return -1U >> (32 - (XNHEAP_PAGE_SIZE >> log2size));
+}
+
+static inline  __always_inline
+int addr_to_pagenr(struct xnheap *heap, void *p)
+{
+	return ((void *)p - heap->membase) >> XNHEAP_PAGE_SHIFT;
+}
+
+static inline  __always_inline
+void *pagenr_to_addr(struct xnheap *heap, int pg)
+{
+	return heap->membase + (pg << XNHEAP_PAGE_SHIFT);
+}
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MEMORY
+/*
+ * Setting page_cont/page_free in the page map is only required for
+ * enabling full checking of the block address in free requests, which
+ * may be extremely time-consuming when deallocating huge blocks
+ * spanning thousands of pages. We only do such marking when running
+ * in memory debug mode.
+ */
+static inline bool
+page_is_valid(struct xnheap *heap, int pg)
+{
+	switch (heap->pagemap[pg].type) {
+	case page_free:
+	case page_cont:
+		return false;
+	case page_list:
+	default:
+		return true;
+	}
+}
+
+static void mark_pages(struct xnheap *heap,
+		       int pg, int nrpages,
+		       enum xnheap_pgtype type)
+{
+	while (nrpages-- > 0)
+		heap->pagemap[pg].type = type;
+}
+
+#else
+
+static inline bool
+page_is_valid(struct xnheap *heap, int pg)
+{
+	return true;
+}
+
+static void mark_pages(struct xnheap *heap,
+		       int pg, int nrpages,
+		       enum xnheap_pgtype type)
+{ }
+
+#endif
+
+static struct xnheap_range *
+search_size_ge(struct rb_root *t, size_t size)
+{
+	struct rb_node *rb, *deepest = NULL;
+	struct xnheap_range *r;
+	
+	/*
+	 * We first try to find an exact match. If that fails, we walk
+	 * the tree in logical order by increasing size value from the
+	 * deepest node traversed until we find the first successor to
+	 * that node, or nothing beyond it, whichever comes first.
+	 */
+	rb = t->rb_node;
+	while (rb) {
+		deepest = rb;
+		r = rb_entry(rb, struct xnheap_range, size_node);
+		if (size < r->size) {
+			rb = rb->rb_left;
+			continue;
+		}
+		if (size > r->size) {
+			rb = rb->rb_right;
+			continue;
+		}
+		return r;
+	}
+
+	rb = deepest;
+	while (rb) {
+		r = rb_entry(rb, struct xnheap_range, size_node);
+		if (size <= r->size)
+			return r;
+		rb = rb_next(rb);
+	}
+
+	return NULL;
+}
+
+static struct xnheap_range *
+search_left_mergeable(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node *node = heap->addr_tree.rb_node;
+	struct xnheap_range *p;
+
+  	while (node) {
+		p = rb_entry(node, struct xnheap_range, addr_node);
+		if ((void *)p + p->size == (void *)r)
+			return p;
+		if (&r->addr_node < node)
+  			node = node->rb_left;
+		else
+  			node = node->rb_right;
+	}
+
+	return NULL;
+}
+
+static struct xnheap_range *
+search_right_mergeable(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node *node = heap->addr_tree.rb_node;
+	struct xnheap_range *p;
+
+  	while (node) {
+		p = rb_entry(node, struct xnheap_range, addr_node);
+		if ((void *)r + r->size == (void *)p)
+			return p;
+		if (&r->addr_node < node)
+  			node = node->rb_left;
+		else
+  			node = node->rb_right;
+	}
+
+	return NULL;
+}
+
+static void insert_range_bysize(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node **new = &heap->size_tree.rb_node, *parent = NULL;
+	struct xnheap_range *p;
+
+  	while (*new) {
+  		p = container_of(*new, struct xnheap_range, size_node);
+		parent = *new;
+  		if (r->size <= p->size)
+  			new = &((*new)->rb_left);
+  		else
+  			new = &((*new)->rb_right);
+  	}
+
+  	rb_link_node(&r->size_node, parent, new);
+  	rb_insert_color(&r->size_node, &heap->size_tree);
+}
+
+static void insert_range_byaddr(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node **new = &heap->addr_tree.rb_node, *parent = NULL;
+	struct xnheap_range *p;
+
+  	while (*new) {
+  		p = container_of(*new, struct xnheap_range, addr_node);
+		parent = *new;
+  		if (r < p)
+  			new = &((*new)->rb_left);
+  		else
+  			new = &((*new)->rb_right);
+  	}
+
+  	rb_link_node(&r->addr_node, parent, new);
+  	rb_insert_color(&r->addr_node, &heap->addr_tree);
+}
+
+static int reserve_page_range(struct xnheap *heap, size_t size)
+{
+	struct xnheap_range *new, *splitr;
+
+	/* Find a suitable range of pages covering 'size'. */
+	new = search_size_ge(&heap->size_tree, size);
+	if (new == NULL)
+		return -1;
+
+	rb_erase(&new->size_node, &heap->size_tree);
+	if (new->size == size) {
+		rb_erase(&new->addr_node, &heap->addr_tree);
+		return addr_to_pagenr(heap, new);
+	}
+
+	/*
+	 * The free range fetched is larger than what we need: split
+	 * it in two, the upper part is returned to the caller, the
+	 * lower part is sent back to the free list, which makes
+	 * reindexing by address pointless.
+	 */
+	splitr = new;
+	splitr->size -= size;
+	new = (struct xnheap_range *)((void *)new + splitr->size);
+	insert_range_bysize(heap, splitr);
+
+	return addr_to_pagenr(heap, new);
+}
+
+static void release_page_range(struct xnheap *heap,
+			       void *page, size_t size)
+{
+	struct xnheap_range *freed = page, *left, *right;
+	bool addr_linked = false;
+
+	freed->size = size;
+
+	left = search_left_mergeable(heap, freed);
+	if (left) {
+		rb_erase(&left->size_node, &heap->size_tree);
+		left->size += freed->size;
+		freed = left;
+		addr_linked = true;
+	}
+
+	right = search_right_mergeable(heap, freed);
+	if (right) {
+		rb_erase(&right->size_node, &heap->size_tree);
+		freed->size += right->size;
+		if (addr_linked)
+			rb_erase(&right->addr_node, &heap->addr_tree);
+		else
+			rb_replace_node(&right->addr_node, &freed->addr_node,
+					&heap->addr_tree);
+	} else if (!addr_linked)
+		insert_range_byaddr(heap, freed);
+
+	insert_range_bysize(heap, freed);
+	mark_pages(heap, addr_to_pagenr(heap, page),
+		   size >> XNHEAP_PAGE_SHIFT, page_free);
+}
+
+static void add_page_front(struct xnheap *heap,
+			   int pg, int log2size)
+{
+	struct xnheap_pgentry *new, *head, *next;
+	int ilog;
+
+	/* Insert page at front of the per-bucket page list. */
+	
+	ilog = log2size - XNHEAP_MIN_LOG2;
+	new = &heap->pagemap[pg];
+	if (heap->buckets[ilog] == -1U) {
+		heap->buckets[ilog] = pg;
+		new->prev = new->next = pg;
+	} else {
+		head = &heap->pagemap[heap->buckets[ilog]];
+		new->prev = heap->buckets[ilog];
+		new->next = head->next;
+		next = &heap->pagemap[new->next];
+		next->prev = pg;
+		head->next = pg;
+		heap->buckets[ilog] = pg;
+	}
+}
+
+static void remove_page(struct xnheap *heap,
+			int pg, int log2size)
+{
+	struct xnheap_pgentry *old, *prev, *next;
+	int ilog = log2size - XNHEAP_MIN_LOG2;
+
+	/* Remove page from the per-bucket page list. */
+
+	old = &heap->pagemap[pg];
+	if (pg == old->next)
+		heap->buckets[ilog] = -1U;
+	else {
+		if (pg == heap->buckets[ilog])
+			heap->buckets[ilog] = old->next;
+		prev = &heap->pagemap[old->prev];
+		prev->next = old->next;
+		next = &heap->pagemap[old->next];
+		next->prev = old->prev;
+	}
+}
+
+static void move_page_front(struct xnheap *heap,
+			    int pg, int log2size)
+{
+	int ilog = log2size - XNHEAP_MIN_LOG2;
+
+	/* Move page at front of the per-bucket page list. */
+	
+	if (heap->buckets[ilog] == pg)
+		return;	 /* Already at front, no move. */
+		
+	remove_page(heap, pg, log2size);
+	add_page_front(heap, pg, log2size);
+}
+
+static void move_page_back(struct xnheap *heap,
+			   int pg, int log2size)
+{
+	struct xnheap_pgentry *old, *last, *head, *next;
+	int ilog;
+
+	/* Move page at end of the per-bucket page list. */
+	
+	old = &heap->pagemap[pg];
+	if (pg == old->next) /* Singleton, no move. */
+		return;
+		
+	remove_page(heap, pg, log2size);
+
+	ilog = log2size - XNHEAP_MIN_LOG2;
+	head = &heap->pagemap[heap->buckets[ilog]];
+	last = &heap->pagemap[head->prev];
+	old->prev = head->prev;
+	old->next = last->next;
+	next = &heap->pagemap[old->next];
+	next->prev = pg;
+	last->next = pg;
+}
+
+static void *add_free_range(struct xnheap *heap,
+			    size_t bsize, int log2size)
+{
+	int pg;
+
+	pg = reserve_page_range(heap, ALIGN(bsize, XNHEAP_PAGE_SIZE));
+	if (pg < 0)
+		return NULL;
+	
+	/*
+	 * Update the page entry.  If @log2size is non-zero
+	 * (i.e. bsize < XNHEAP_PAGE_SIZE), bsize is (1 << log2Size)
+	 * between 2^XNHEAP_MIN_LOG2 and 2^(XNHEAP_PAGE_SHIFT - 1).
+	 * Save the log2 power into entry.type, then update the
+	 * per-page allocation bitmap to reserve the first block.
+	 *
+	 * Otherwise, we have a larger block which may span multiple
+	 * pages: set entry.type to page_list, indicating the start of
+	 * the page range, and entry.bsize to the overall block size.
+	 */
+	if (log2size) {
+		heap->pagemap[pg].type = log2size;
+		/*
+		 * Mark the first object slot (#0) as busy, along with
+		 * the leftmost bits we won't use for this log2 size.
+		 */
+		heap->pagemap[pg].map = ~gen_block_mask(log2size) | 1;
+		/*
+		 * Insert the new page at front of the per-bucket page
+		 * list, enforcing the assumption that pages with free
+		 * space live close to the head of this list.
+		 */
+		add_page_front(heap, pg, log2size);
+	} else {
+		heap->pagemap[pg].type = page_list;
+		heap->pagemap[pg].bsize = (u32)bsize;
+		mark_pages(heap, pg + 1,
+			   (bsize >> XNHEAP_PAGE_SHIFT) - 1, page_cont);
+	}
+
+	heap->used_size += bsize;
+
+	return pagenr_to_addr(heap, pg);
+}
+
+/**
+ * @fn void *xnheap_alloc(struct xnheap *heap, size_t size)
+ * @brief Allocate a memory block from a memory heap.
+ *
+ * Allocates a contiguous region of memory from an active memory heap.
+ * Such allocation is guaranteed to be time-bounded.
+ *
+ * @param heap The descriptor address of the heap to get memory from.
+ *
+ * @param size The size in bytes of the requested block.
+ *
+ * @return The address of the allocated region upon success, or NULL
+ * if no memory is available from the specified heap.
+ *
+ * @coretags{unrestricted}
+ */
+void *xnheap_alloc(struct xnheap *heap, size_t size)
+{
+	int log2size, ilog, pg, b = -1;
+	size_t bsize;
+	void *block;
+	spl_t s;
+
+	if (size == 0)
+		return NULL;
+
+	if (size < XNHEAP_MIN_ALIGN) {
+		bsize = size = XNHEAP_MIN_ALIGN;
+		log2size = XNHEAP_MIN_LOG2;
+	} else {
+		log2size = ilog2(size);
+		if (log2size < XNHEAP_PAGE_SHIFT) {
+			if (size & (size - 1))
+				log2size++;
+			bsize = 1 << log2size;
+		} else
+			bsize = ALIGN(size, XNHEAP_PAGE_SIZE);
+	}
+	
+	/*
+	 * Allocate entire pages directly from the pool whenever the
+	 * block is larger or equal to XNHEAP_PAGE_SIZE.  Otherwise,
+	 * use bucketed memory.
+	 *
+	 * NOTE: Fully busy pages from bucketed memory are moved back
+	 * at the end of the per-bucket page list, so that we may
+	 * always assume that either the heading page has some room
+	 * available, or no room is available from any page linked to
+	 * this list, in which case we should immediately add a fresh
+	 * page.
+	 */
+	xnlock_get_irqsave(&heap->lock, s);
+
+	if (bsize >= XNHEAP_PAGE_SIZE)
+		/* Add a range of contiguous free pages. */
+		block = add_free_range(heap, bsize, 0);
+	else {
+		ilog = log2size - XNHEAP_MIN_LOG2;
+		XENO_WARN_ON(MEMORY, ilog < 0 || ilog >= XNHEAP_MAX_BUCKETS);
+		pg = heap->buckets[ilog];
+		/*
+		 * Find a block in the heading page if any. If there
+		 * is none, there won't be any down the list: add a
+		 * new page right away.
+		 */
+		if (pg < 0 || heap->pagemap[pg].map == -1U)
+			block = add_free_range(heap, bsize, log2size);
+		else {
+			b = ffs(~heap->pagemap[pg].map) - 1;
+			/*
+			 * Got one block from the heading per-bucket
+			 * page, tag it as busy in the per-page
+			 * allocation map.
+			 */
+			heap->pagemap[pg].map |= (1U << b);
+			heap->used_size += bsize;
+			block = heap->membase +
+				(pg << XNHEAP_PAGE_SHIFT) +
+				(b << log2size);
+			if (heap->pagemap[pg].map == -1U)
+				move_page_back(heap, pg, log2size);
+		}
+	}
+
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return block;
+}
+EXPORT_SYMBOL_GPL(xnheap_alloc);
+
+/**
+ * @fn void xnheap_free(struct xnheap *heap, void *block)
+ * @brief Release a block to a memory heap.
+ *
+ * Releases a memory block to a heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @param block The block to be returned to the heap.
+ *
+ * @coretags{unrestricted}
+ */
+void xnheap_free(struct xnheap *heap, void *block)
+{
+	unsigned long pgoff, boff;
+	int log2size, pg, n;
+	size_t bsize;
+	u32 oldmap;
+	spl_t s;
+
+	xnlock_get_irqsave(&heap->lock, s);
+
+	/* Compute the heading page number in the page map. */
+	pgoff = block - heap->membase;
+	pg = pgoff >> XNHEAP_PAGE_SHIFT;
+
+	if (!page_is_valid(heap, pg))
+		goto bad;
+	
+	switch (heap->pagemap[pg].type) {
+	case page_list:
+		bsize = heap->pagemap[pg].bsize;
+		XENO_WARN_ON(MEMORY, (bsize & (XNHEAP_PAGE_SIZE - 1)) != 0);
+		release_page_range(heap, pagenr_to_addr(heap, pg), bsize);
+		break;
+
+	default:
+		log2size = heap->pagemap[pg].type;
+		bsize = (1 << log2size);
+		XENO_WARN_ON(MEMORY, bsize >= XNHEAP_PAGE_SIZE);
+		boff = pgoff & ~XNHEAP_PAGE_MASK;
+		if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+			goto bad;
+
+		n = boff >> log2size; /* Block position in page. */
+		oldmap = heap->pagemap[pg].map;
+		heap->pagemap[pg].map &= ~(1U << n);
+
+		/*
+		 * If the page the block was sitting on is fully idle,
+		 * return it to the pool. Otherwise, check whether
+		 * that page is transitioning from fully busy to
+		 * partially busy state, in which case it should move
+		 * toward the front of the per-bucket page list.
+		 */
+		if (heap->pagemap[pg].map == ~gen_block_mask(log2size)) {
+			remove_page(heap, pg, log2size);
+			release_page_range(heap, pagenr_to_addr(heap, pg),
+					   XNHEAP_PAGE_SIZE);
+		} else if (oldmap == -1U)
+			move_page_front(heap, pg, log2size);
+	}
+
+	heap->used_size -= bsize;
+
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return;
+bad:
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	XENO_WARN(MEMORY, 1, "invalid block %p in heap %s",
+		  block, heap->name);
+}
+EXPORT_SYMBOL_GPL(xnheap_free);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block)
+{
+	unsigned long pg, pgoff, boff;
+	ssize_t ret = -EINVAL;
+	size_t bsize;
+	spl_t s;
+
+	xnlock_get_irqsave(&heap->lock, s);
+
+	/* Calculate the page number from the block address. */
+	pgoff = block - heap->membase;
+	pg = pgoff >> XNHEAP_PAGE_SHIFT;
+	if (page_is_valid(heap, pg)) {
+		if (heap->pagemap[pg].type == page_list)
+			bsize = heap->pagemap[pg].bsize;
+		else {
+			bsize = (1 << heap->pagemap[pg].type);
+			boff = pgoff & ~XNHEAP_PAGE_MASK;
+			if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+				goto out;
+		}
+		ret = (ssize_t)bsize;
+	}
+out:
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnheap_check_block);
+
+/**
+ * @fn xnheap_init(struct xnheap *heap, void *membase, u32 size)
+ * @brief Initialize a memory heap.
+ *
+ * Initializes a memory heap suitable for time-bounded allocation
+ * requests of dynamic memory.
+ *
+ * @param heap The address of a heap descriptor to initialize.
+ *
+ * @param membase The address of the storage area.
+ *
+ * @param size The size in bytes of the storage area.  @a size must be
+ * a multiple of XNHEAP_PAGE_SIZE and smaller than (4Gb - PAGE_SIZE)
+ * in the current implementation.
+ *
+ * @return 0 is returned upon success, or:
+ *
+ * - -EINVAL is returned if @a size is either greater than
+ *   XNHEAP_MAX_HEAPSZ, or not aligned on PAGE_SIZE.
+ *
+ * - -ENOMEM is returned upon failure of allocating the meta-data area
+ * used internally to maintain the heap.
+ *
+ * @coretags{secondary-only}
+ */
+int xnheap_init(struct xnheap *heap, void *membase, size_t size)
+{
+	int n, nrpages;
+	spl_t s;
+
+	secondary_mode_only();
+
+ 	if (size > XNHEAP_MAX_HEAPSZ || !PAGE_ALIGNED(size))
+		return -EINVAL;
+
+	/* Reset bucket page lists, all empty. */
+	for (n = 0; n < XNHEAP_MAX_BUCKETS; n++)
+		heap->buckets[n] = -1U;
+
+	xnlock_init(&heap->lock);
+
+	nrpages = size >> XNHEAP_PAGE_SHIFT;
+	heap->pagemap = vzalloc(sizeof(struct xnheap_pgentry) * nrpages);
+	if (heap->pagemap == NULL)
+		return -ENOMEM;
+
+	heap->membase = membase;
+	heap->usable_size = size;
+	heap->used_size = 0;
+		      
+	/*
+	 * The free page pool is maintained as a set of ranges of
+	 * contiguous pages indexed by address and size in rbtrees.
+	 * Initially, we have a single range in those trees covering
+	 * the whole memory we have been given for the heap. Over
+	 * time, that range will be split then possibly re-merged back
+	 * as allocations and deallocations take place.
+	 */
+	heap->size_tree = RB_ROOT;
+	heap->addr_tree = RB_ROOT;
+	release_page_range(heap, membase, size);
+
+	/* Default name, override with xnheap_set_name() */
+	ksformat(heap->name, sizeof(heap->name), "(%p)", heap);
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&heap->next, &heapq);
+	nrheaps++;
+	xnvfile_touch_tag(&vfile_tag);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnheap_init);
+
+/**
+ * @fn void xnheap_destroy(struct xnheap *heap)
+ * @brief Destroys a memory heap.
+ *
+ * Destroys a memory heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void xnheap_destroy(struct xnheap *heap)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xnlock_get_irqsave(&nklock, s);
+	list_del(&heap->next);
+	nrheaps--;
+	xnvfile_touch_tag(&vfile_tag);
+	xnlock_put_irqrestore(&nklock, s);
+	vfree(heap->pagemap);
+}
+EXPORT_SYMBOL_GPL(xnheap_destroy);
+
+/**
+ * @fn xnheap_set_name(struct xnheap *heap,const char *name,...)
+ * @brief Set the heap's name string.
+ *
+ * Set the heap name that will be used in statistic outputs.
+ *
+ * @param heap The address of a heap descriptor.
+ *
+ * @param name Name displayed in statistic outputs. This parameter can
+ * be a printk()-like format argument list.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnheap_set_name(struct xnheap *heap, const char *name, ...)
+{
+	va_list args;
+
+	va_start(args, name);
+	kvsformat(heap->name, sizeof(heap->name), name, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(xnheap_set_name);
+
+void *xnheap_vmalloc(size_t size)
+{
+	/*
+	 * We want memory used in real-time context to be pulled from
+	 * ZONE_NORMAL, however we don't need it to be physically
+	 * contiguous.
+	 *
+	 * 32bit systems which would need HIGHMEM for running a Cobalt
+	 * configuration would also be required to support PTE
+	 * pinning, which not all architectures provide.  Moreover,
+	 * pinning PTEs eagerly for a potentially (very) large amount
+	 * of memory may quickly degrade performance.
+	 *
+	 * If using a different kernel/user memory split cannot be the
+	 * answer for those configs, it's likely that basing such
+	 * software on a 32bit system had to be wrong in the first
+	 * place anyway.
+	 */
+	return vmalloc_kernel(size, 0);
+}
+EXPORT_SYMBOL_GPL(xnheap_vmalloc);
+
+void xnheap_vfree(void *p)
+{
+	vfree(p);
+}
+EXPORT_SYMBOL_GPL(xnheap_vfree);
+
+/** @} */
+++ linux-patched/kernel/xenomai/sched-idle.c	2022-03-21 12:58:28.840894315 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/map.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+
+static struct xnthread *xnsched_idle_pick(struct xnsched *sched)
+{
+	return &sched->rootcb;
+}
+
+static bool xnsched_idle_setparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	return __xnsched_idle_setparam(thread, p);
+}
+
+static void xnsched_idle_getparam(struct xnthread *thread,
+				  union xnsched_policy_param *p)
+{
+	__xnsched_idle_getparam(thread, p);
+}
+
+static void xnsched_idle_trackprio(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	__xnsched_idle_trackprio(thread, p);
+}
+
+static void xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	__xnsched_idle_protectprio(thread, prio);
+}
+
+struct xnsched_class xnsched_class_idle = {
+	.sched_init		=	NULL,
+	.sched_enqueue		=	NULL,
+	.sched_dequeue		=	NULL,
+	.sched_requeue		=	NULL,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_declare		=	NULL,
+	.sched_pick		=	xnsched_idle_pick,
+	.sched_setparam		=	xnsched_idle_setparam,
+	.sched_getparam		=	xnsched_idle_getparam,
+	.sched_trackprio	=	xnsched_idle_trackprio,
+	.sched_protectprio	=	xnsched_idle_protectprio,
+	.weight			=	XNSCHED_CLASS_WEIGHT(0),
+	.policy			=	SCHED_IDLE,
+	.name			=	"idle"
+};
+++ linux-patched/kernel/xenomai/map.c	2022-03-21 12:58:28.836894354 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/thread.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/map.h>
+#include <asm/xenomai/machine.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_map Lightweight key-to-object mapping service
+ *
+ * A map is a simple indexing structure which associates unique
+ * integer keys with pointers to objects.  The current implementation
+ * supports reservation, for naming/indexing objects, either on a
+ * fixed, user-provided integer (i.e. a reserved key value), or by
+ * drawing the next available key internally if the caller did not
+ * specify any fixed key. For instance, in some given map, the key
+ * space ranging from 0 to 255 could be reserved for fixed keys,
+ * whilst the range from 256 to 511 could be available for drawing
+ * free keys dynamically.
+ *
+ * A maximum of 1024 unique keys per map is supported on 32bit
+ * machines.
+ *
+ * (This implementation should not be confused with C++ STL maps,
+ * which are dynamically expandable and allow arbitrary key types;
+ * Xenomai maps don't).
+ *
+ * @{
+ */
+
+/**
+ * @fn void xnmap_create(int nkeys, int reserve, int offset)
+ * @brief Create a map.
+ *
+ * Allocates a new map with the specified addressing capabilities. The
+ * memory is obtained from the Xenomai system heap.
+ *
+ * @param nkeys The maximum number of unique keys the map will be able
+ * to hold. This value cannot exceed the static limit represented by
+ * XNMAP_MAX_KEYS, and must be a power of two.
+ *
+ * @param reserve The number of keys which should be kept for
+ * reservation within the index space. Reserving a key means to
+ * specify a valid key to the xnmap_enter() service, which will then
+ * attempt to register this exact key, instead of drawing the next
+ * available key from the unreserved index space. When reservation is
+ * in effect, the unreserved index space will hold key values greater
+ * than @a reserve, keeping the low key values for the reserved space.
+ * For instance, passing @a reserve = 32 would cause the index range [
+ * 0 .. 31 ] to be kept for reserved keys.  When non-zero, @a reserve
+ * is rounded to the next multiple of BITS_PER_LONG. If @a reserve is
+ * zero no reservation will be available from the map.
+ *
+ * @param offset The lowest key value xnmap_enter() will return to the
+ * caller. Key values will be in the range [ 0 + offset .. @a nkeys +
+ * offset - 1 ]. Negative offsets are valid.
+ *
+ * @return the address of the new map is returned on success;
+ * otherwise, NULL is returned if @a nkeys is invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+struct xnmap *xnmap_create(int nkeys, int reserve, int offset)
+{
+	struct xnmap *map;
+	int mapsize;
+
+	if (nkeys <= 0 || (nkeys & (nkeys - 1)) != 0)
+		return NULL;
+
+	mapsize = sizeof(*map) + (nkeys - 1) * sizeof(map->objarray[0]);
+	map = xnmalloc(mapsize);
+
+	if (!map)
+		return NULL;
+
+	map->ukeys = 0;
+	map->nkeys = nkeys;
+	map->offset = offset;
+	map->himask = (1 << ((reserve + BITS_PER_LONG - 1) / BITS_PER_LONG)) - 1;
+	map->himap = ~0;
+	memset(map->lomap, ~0, sizeof(map->lomap));
+	memset(map->objarray, 0, sizeof(map->objarray[0]) * nkeys);
+
+	return map;
+}
+EXPORT_SYMBOL_GPL(xnmap_create);
+
+/**
+ * @fn void xnmap_delete(struct xnmap *map)
+ * @brief Delete a map.
+ *
+ * Deletes a map, freeing any associated memory back to the Xenomai
+ * system heap.
+ *
+ * @param map The address of the map to delete.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnmap_delete(struct xnmap *map)
+{
+	xnfree(map);
+}
+EXPORT_SYMBOL_GPL(xnmap_delete);
+
+/**
+ * @fn void xnmap_enter(struct xnmap *map, int key, void *objaddr)
+ * @brief Index an object into a map.
+ *
+ * Insert a new object into the given map.
+ *
+ * @param map The address of the map to insert into.
+ *
+ * @param key The key to index the object on. If this key is within
+ * the valid index range [ 0 - offset .. nkeys - offset - 1 ], then an
+ * attempt to reserve this exact key is made. If @a key has an
+ * out-of-range value lower or equal to 0 - offset - 1, then an
+ * attempt is made to draw a free key from the unreserved index space.
+ *
+ * @param objaddr The address of the object to index on the key. This
+ * value will be returned by a successful call to xnmap_fetch() with
+ * the same key.
+ *
+ * @return a valid key is returned on success, either @a key if
+ * reserved, or the next free key. Otherwise:
+ *
+ * - -EEXIST is returned upon attempt to reserve a busy key.
+ *
+ * - -ENOSPC when no more free key is available.
+ *
+ * @coretags{unrestricted}
+ */
+int xnmap_enter(struct xnmap *map, int key, void *objaddr)
+{
+	int hi, lo, ofkey = key - map->offset;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (ofkey >= 0 && ofkey < map->nkeys) {
+		if (map->objarray[ofkey] != NULL) {
+			key = -EEXIST;
+			goto unlock_and_exit;
+		}
+	} else if (map->ukeys >= map->nkeys) {
+		key = -ENOSPC;
+		goto unlock_and_exit;
+	}
+	else {
+		/* The himask implements a namespace reservation of
+		   half of the bitmap space which cannot be used to
+		   draw keys. */
+
+		hi = ffnz(map->himap & ~map->himask);
+		lo = ffnz(map->lomap[hi]);
+		ofkey = hi * BITS_PER_LONG + lo;
+		++map->ukeys;
+
+		map->lomap[hi] &= ~(1UL << lo);
+		if (map->lomap[hi] == 0)
+			map->himap &= ~(1UL << hi);
+	}
+
+	map->objarray[ofkey] = objaddr;
+
+      unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ofkey + map->offset;
+}
+EXPORT_SYMBOL_GPL(xnmap_enter);
+
+/**
+ * @fn void xnmap_remove(struct xnmap *map, int key)
+ * @brief Remove an object reference from a map.
+ *
+ * Removes an object reference from the given map, releasing the
+ * associated key.
+ *
+ * @param map The address of the map to remove from.
+ *
+ * @param key The key the object reference to be removed is indexed
+ * on.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ESRCH is returned if @a key is invalid.
+ *
+ * @coretags{unrestricted}
+ */
+int xnmap_remove(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset, hi, lo;
+	spl_t s;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return -ESRCH;
+
+	hi = ofkey / BITS_PER_LONG;
+	lo = ofkey % BITS_PER_LONG;
+	xnlock_get_irqsave(&nklock, s);
+	map->objarray[ofkey] = NULL;
+	map->himap |= (1UL << hi);
+	map->lomap[hi] |= (1UL << lo);
+	--map->ukeys;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnmap_remove);
+
+/**
+ * @fn void xnmap_fetch(struct xnmap *map, int key)
+ * @brief Search an object into a map.
+ *
+ * Retrieve an object reference from the given map by its index key.
+ *
+ * @param map The address of the map to retrieve from.
+ *
+ * @param key The key to be searched for in the map index.
+ *
+ * @return The indexed object address is returned on success,
+ * otherwise NULL is returned when @a key is invalid or no object is
+ * currently indexed on it.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnmap_fetch_nocheck(struct xnmap *map, int key)
+ * @brief Search an object into a map - unchecked form.
+ *
+ * Retrieve an object reference from the given map by its index key,
+ * but does not perform any sanity check on the provided key.
+ *
+ * @param map The address of the map to retrieve from.
+ *
+ * @param key The key to be searched for in the map index.
+ *
+ * @return The indexed object address is returned on success,
+ * otherwise NULL is returned when no object is currently indexed on
+ * @a key.
+ *
+ * @coretags{unrestricted}
+ */
+
+/** @} */
+++ linux-patched/kernel/xenomai/thread.c	2022-03-21 12:58:28.832894393 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2006-2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2001-2013 The Xenomai project <http://www.xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/signal.h>
+#include <linux/pid.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <pipeline/kevents.h>
+#include <pipeline/inband_work.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+#include "debug.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(join_all);
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_thread Thread services
+ * @{
+ */
+
+static void timeout_handler(struct xntimer *timer)
+{
+	struct xnthread *thread = container_of(timer, struct xnthread, rtimer);
+
+	xnthread_set_info(thread, XNTIMEO);	/* Interrupts are off. */
+	xnthread_resume(thread, XNDELAY);
+}
+
+static void periodic_handler(struct xntimer *timer)
+{
+	struct xnthread *thread = container_of(timer, struct xnthread, ptimer);
+	/*
+	 * Prevent unwanted round-robin, and do not wake up threads
+	 * blocked on a resource.
+	 */
+	if (xnthread_test_state(thread, XNDELAY|XNPEND) == XNDELAY)
+		xnthread_resume(thread, XNDELAY);
+
+	/*
+	 * The periodic thread might have migrated to another CPU
+	 * while passive, fix the timer affinity if need be.
+	 */
+	xntimer_set_affinity(&thread->ptimer, thread->sched);
+}
+
+static inline void enlist_new_thread(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	list_add_tail(&thread->glink, &nkthreadq);
+	cobalt_nrthreads++;
+	xnvfile_touch_tag(&nkthreadlist_tag);
+}
+
+struct kthread_arg {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct xnthread *thread;
+	struct completion *done;
+};
+
+static void do_parent_wakeup(struct pipeline_inband_work *inband_work)
+{
+	struct kthread_arg *ka;
+
+	ka = container_of(inband_work, struct kthread_arg, inband_work);
+	complete(ka->done);
+}
+
+static inline void init_kthread_info(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = NULL;
+}
+
+static int map_kthread(struct xnthread *thread, struct kthread_arg *ka)
+{
+	int ret;
+	spl_t s;
+
+	if (xnthread_test_state(thread, XNUSER))
+		return -EINVAL;
+
+	if (xnthread_current() || xnthread_test_state(thread, XNMAPPED))
+		return -EBUSY;
+
+	thread->u_window = NULL;
+	xnthread_pin_initial(thread);
+
+	pipeline_init_shadow_tcb(thread);
+	xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+	init_kthread_info(thread);
+	xnthread_set_state(thread, XNMAPPED);
+	xndebug_shadow_init(thread);
+	xnthread_run_handler(thread, map_thread);
+	pipeline_enable_kevents();
+
+	/*
+	 * CAUTION: Soon after xnthread_init() has returned,
+	 * xnthread_start() is commonly invoked from the root domain,
+	 * therefore the call site may expect the started kernel
+	 * shadow to preempt immediately. As a result of such
+	 * assumption, start attributes (struct xnthread_start_attr)
+	 * are often laid on the caller's stack.
+	 *
+	 * For this reason, we raise the completion signal to wake up
+	 * the xnthread_init() caller only once the emerging thread is
+	 * hardened, and __never__ before that point. Since we run
+	 * over the Xenomai domain upon return from xnthread_harden(),
+	 * we schedule a virtual interrupt handler in the root domain
+	 * to signal the completion object.
+	 */
+	xnthread_resume(thread, XNDORMANT);
+	ret = xnthread_harden();
+
+	trace_cobalt_lostage_request("wakeup", current);
+
+	ka->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*ka, do_parent_wakeup);
+	pipeline_post_inband_work(ka);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	enlist_new_thread(thread);
+	/*
+	 * Make sure xnthread_start() did not slip in from another CPU
+	 * while we were back from wakeup_parent().
+	 */
+	if (thread->entry == NULL)
+		xnthread_suspend(thread, XNDORMANT,
+				 XN_INFINITE, XN_RELATIVE, NULL);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_test_cancel();
+
+	xntrace_pid(xnthread_host_pid(thread),
+		    xnthread_current_priority(thread));
+
+	return ret;
+}
+
+static int kthread_trampoline(void *arg)
+{
+	struct kthread_arg *ka = arg;
+	struct xnthread *thread = ka->thread;
+	struct sched_param param;
+	int ret, policy, prio;
+
+	/*
+	 * It only makes sense to create Xenomai kthreads with the
+	 * SCHED_FIFO, SCHED_NORMAL or SCHED_WEAK policies. So
+	 * anything that is not from Xenomai's RT class is assumed to
+	 * belong to SCHED_NORMAL linux-wise.
+	 */
+	if (thread->sched_class != &xnsched_class_rt) {
+		policy = SCHED_NORMAL;
+		prio = 0;
+	} else {
+		policy = SCHED_FIFO;
+		prio = normalize_priority(thread->cprio);
+	}
+
+	param.sched_priority = prio;
+	sched_setscheduler(current, policy, &param);
+
+	ret = map_kthread(thread, ka);
+	if (ret) {
+		printk(XENO_WARNING "failed to create kernel shadow %s\n",
+		       thread->name);
+		return ret;
+	}
+
+	trace_cobalt_shadow_entry(thread);
+
+	thread->entry(thread->cookie);
+
+	xnthread_cancel(thread);
+
+	return 0;
+}
+
+static inline int spawn_kthread(struct xnthread *thread)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	struct kthread_arg ka = {
+		.thread = thread,
+		.done = &done
+	};
+	struct task_struct *p;
+
+	p = kthread_run(kthread_trampoline, &ka, "%s", thread->name);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param)
+{
+	int flags = attr->flags, ret, gravity;
+
+	flags &= ~(XNSUSP|XNBOOST);
+#ifndef CONFIG_XENO_ARCH_FPU
+	flags &= ~XNFPU;
+#endif
+	if ((flags & XNROOT) == 0)
+		flags |= XNDORMANT;
+
+	if (attr->name)
+		ksformat(thread->name,
+			 sizeof(thread->name), "%s", attr->name);
+	else
+		ksformat(thread->name,
+			 sizeof(thread->name), "@%p", thread);
+
+	/*
+	 * We mirror the global user debug state into the per-thread
+	 * state, to speed up branch taking in lib/cobalt wherever
+	 * this needs to be tested.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP))
+		flags |= XNDEBUG;
+
+	thread->personality = attr->personality;
+	cpumask_and(&thread->affinity, &attr->affinity, &cobalt_cpu_affinity);
+	thread->sched = sched;
+	thread->state = flags;
+	thread->info = 0;
+	thread->local_info = 0;
+	thread->wprio = XNSCHED_IDLE_PRIO;
+	thread->cprio = XNSCHED_IDLE_PRIO;
+	thread->bprio = XNSCHED_IDLE_PRIO;
+	thread->lock_count = 0;
+	thread->rrperiod = XN_INFINITE;
+	thread->wchan = NULL;
+	thread->wwake = NULL;
+	thread->wcontext = NULL;
+	thread->res_count = 0;
+	thread->handle = XN_NO_HANDLE;
+	memset(&thread->stat, 0, sizeof(thread->stat));
+	thread->selector = NULL;
+	INIT_LIST_HEAD(&thread->glink);
+	INIT_LIST_HEAD(&thread->boosters);
+	/* These will be filled by xnthread_start() */
+	thread->entry = NULL;
+	thread->cookie = NULL;
+	init_completion(&thread->exited);
+	memset(xnthread_archtcb(thread), 0, sizeof(struct xnarchtcb));
+	memset(thread->sigarray, 0, sizeof(thread->sigarray));
+
+	gravity = flags & XNUSER ? XNTIMER_UGRAVITY : XNTIMER_KGRAVITY;
+	xntimer_init(&thread->rtimer, &nkclock, timeout_handler,
+		     sched, gravity);
+	xntimer_set_name(&thread->rtimer, thread->name);
+	xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO);
+	xntimer_init(&thread->ptimer, &nkclock, periodic_handler,
+		     sched, gravity);
+	xntimer_set_name(&thread->ptimer, thread->name);
+	xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);
+
+	thread->base_class = NULL; /* xnsched_set_policy() will set it. */
+	ret = xnsched_init_thread(thread);
+	if (ret)
+		goto err_out;
+
+	ret = xnsched_set_policy(thread, sched_class, sched_param);
+	if (ret)
+		goto err_out;
+
+	if ((flags & (XNUSER|XNROOT)) == 0) {
+		ret = spawn_kthread(thread);
+		if (ret)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	xntimer_destroy(&thread->rtimer);
+	xntimer_destroy(&thread->ptimer);
+
+	return ret;
+}
+
+void xnthread_deregister(struct xnthread *thread)
+{
+	if (thread->handle != XN_NO_HANDLE)
+		xnregistry_remove(thread->handle);
+
+	thread->handle = XN_NO_HANDLE;
+}
+
+char *xnthread_format_status(unsigned long status, char *buf, int size)
+{
+	static const char labels[] = XNTHREAD_STATE_LABELS;
+	int pos, c, mask;
+	char *wp;
+
+	for (mask = (int)status, pos = 0, wp = buf;
+	     mask != 0 && wp - buf < size - 2;	/* 1-letter label + \0 */
+	     mask >>= 1, pos++) {
+		if ((mask & 1) == 0)
+			continue;
+
+		c = labels[pos];
+
+		switch (1 << pos) {
+		case XNROOT:
+			c = 'R'; /* Always mark root as runnable. */
+			break;
+		case XNREADY:
+			if (status & XNROOT)
+				continue; /* Already reported on XNROOT. */
+			break;
+		case XNDELAY:
+			/*
+			 * Only report genuine delays here, not timed
+			 * waits for resources.
+			 */
+			if (status & XNPEND)
+				continue;
+			break;
+		case XNPEND:
+			/* Report timed waits with lowercase symbol. */
+			if (status & XNDELAY)
+				c |= 0x20;
+			break;
+		default:
+			if (c == '.')
+				continue;
+		}
+		*wp++ = c;
+	}
+
+	*wp = '\0';
+
+	return buf;
+}
+
+pid_t xnthread_host_pid(struct xnthread *thread)
+{
+	if (xnthread_test_state(thread, XNROOT))
+		return 0;
+	if (!xnthread_host_task(thread))
+		return -1;
+
+	return task_pid_nr(xnthread_host_task(thread));
+}
+
+int xnthread_set_clock(struct xnthread *thread, struct xnclock *newclock)
+{
+	spl_t s;
+
+	if (thread == NULL) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+	}
+	
+	/* Change the clock the thread's periodic timer is paced by. */
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_set_clock(&thread->ptimer, newclock);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_clock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread, xnticks_t ns)
+{
+	struct xntimer *timer;
+	xnticks_t timeout;
+
+	if (!xnthread_test_state(thread,XNDELAY))
+		return 0LL;
+
+	if (xntimer_running_p(&thread->rtimer))
+		timer = &thread->rtimer;
+	else if (xntimer_running_p(&thread->ptimer))
+		timer = &thread->ptimer;
+	else
+		return 0LL;
+
+	timeout = xntimer_get_date(timer);
+	if (timeout <= ns)
+		return 1;
+
+	return timeout - ns;
+}
+EXPORT_SYMBOL_GPL(xnthread_get_timeout);
+
+xnticks_t xnthread_get_period(struct xnthread *thread)
+{
+	xnticks_t period = 0;
+	/*
+	 * The current thread period might be:
+	 * - the value of the timer interval for periodic threads (ns/ticks)
+	 * - or, the value of the alloted round-robin quantum (ticks)
+	 * - or zero, meaning "no periodic activity".
+	 */
+	if (xntimer_running_p(&thread->ptimer))
+		period = xntimer_interval(&thread->ptimer);
+	else if (xnthread_test_state(thread,XNRRB))
+		period = thread->rrperiod;
+
+	return period;
+}
+EXPORT_SYMBOL_GPL(xnthread_get_period);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc)
+{
+	struct xnthread *curr = xnthread_current();
+
+	wc->posted = 0;
+	curr->wcontext = wc;
+}
+EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
+
+static inline void release_all_ownerships(struct xnthread *curr)
+{
+	struct xnsynch *synch, *tmp;
+
+	/*
+	 * Release all the ownerships obtained by a thread on
+	 * synchronization objects. This routine must be entered
+	 * interrupts off.
+	 */
+	xnthread_for_each_booster_safe(synch, tmp, curr) {
+		xnsynch_release(synch, curr);
+		if (synch->cleanup)
+			synch->cleanup(synch);
+	}
+}
+
+static inline void cleanup_tcb(struct xnthread *curr) /* nklock held, irqs off */
+{
+	list_del(&curr->glink);
+	cobalt_nrthreads--;
+	xnvfile_touch_tag(&nkthreadlist_tag);
+
+	if (xnthread_test_state(curr, XNREADY)) {
+		XENO_BUG_ON(COBALT, xnthread_test_state(curr, XNTHREAD_BLOCK_BITS));
+		xnsched_dequeue(curr);
+		xnthread_clear_state(curr, XNREADY);
+	}
+
+	if (xnthread_test_state(curr, XNPEND))
+		xnsynch_forget_sleeper(curr);
+
+	xnthread_set_state(curr, XNZOMBIE);
+	/*
+	 * NOTE: we must be running over the root thread, or @curr
+	 * is dormant, which means that we don't risk sched->curr to
+	 * disappear due to voluntary rescheduling while holding the
+	 * nklock, despite @curr bears the zombie bit.
+	 */
+	release_all_ownerships(curr);
+
+	pipeline_finalize_thread(curr);
+	xnsched_forget(curr);
+	xnthread_deregister(curr);
+}
+
+void __xnthread_cleanup(struct xnthread *curr)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xntimer_destroy(&curr->rtimer);
+	xntimer_destroy(&curr->ptimer);
+
+	if (curr->selector) {
+		xnselector_destroy(curr->selector);
+		curr->selector = NULL;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+	cleanup_tcb(curr);
+	xnlock_put_irqrestore(&nklock, s);
+
+	/* Wake up the joiner if any (we can't have more than one). */
+	complete(&curr->exited);
+
+	/* Notify our exit to xnthread_killall() if need be. */
+	if (waitqueue_active(&join_all))
+		wake_up(&join_all);
+
+	/* Finalize last since this incurs releasing the TCB. */
+	xnthread_run_handler_stack(curr, finalize_thread);
+}
+
+/*
+ * Unwinds xnthread_init() ops for an unmapped thread.  Since the
+ * latter must be dormant, it can't be part of any runqueue.
+ */
+void __xnthread_discard(struct xnthread *thread)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xntimer_destroy(&thread->rtimer);
+	xntimer_destroy(&thread->ptimer);
+
+	xnlock_get_irqsave(&nklock, s);
+	if (!list_empty(&thread->glink)) {
+		list_del(&thread->glink);
+		cobalt_nrthreads--;
+		xnvfile_touch_tag(&nkthreadlist_tag);
+	}
+	xnthread_deregister(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+/**
+ * @fn void xnthread_init(struct xnthread *thread,const struct xnthread_init_attr *attr,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param)
+ * @brief Initialize a new thread.
+ *
+ * Initializes a new thread. The thread is left dormant until it is
+ * actually started by xnthread_start().
+ *
+ * @param thread The address of a thread descriptor Cobalt will use to
+ * store the thread-specific data.  This descriptor must always be
+ * valid while the thread is active therefore it must be allocated in
+ * permanent memory. @warning Some architectures may require the
+ * descriptor to be properly aligned in memory; this is an additional
+ * reason for descriptors not to be laid in the program stack where
+ * alignement constraints might not always be satisfied.
+ *
+ * @param attr A pointer to an attribute block describing the initial
+ * properties of the new thread. Members of this structure are defined
+ * as follows:
+ *
+ * - name: An ASCII string standing for the symbolic name of the
+ * thread. This name is copied to a safe place into the thread
+ * descriptor. This name might be used in various situations by Cobalt
+ * for issuing human-readable diagnostic messages, so it is usually a
+ * good idea to provide a sensible value here.  NULL is fine though
+ * and means "anonymous".
+ *
+ * - flags: A set of creation flags affecting the operation. The
+ * following flags can be part of this bitmask:
+ *
+ *   - XNSUSP creates the thread in a suspended state. In such a case,
+ * the thread shall be explicitly resumed using the xnthread_resume()
+ * service for its execution to actually begin, additionally to
+ * issuing xnthread_start() for it. This flag can also be specified
+ * when invoking xnthread_start() as a starting mode.
+ *
+ * - XNUSER shall be set if @a thread will be mapped over an existing
+ * user-space task. Otherwise, a new kernel host task is created, then
+ * paired with the new Xenomai thread.
+ *
+ * - XNFPU (enable FPU) tells Cobalt that the new thread may use the
+ * floating-point unit. XNFPU is implicitly assumed for user-space
+ * threads even if not set in @a flags.
+ *
+ * - affinity: The processor affinity of this thread. Passing
+ * CPU_MASK_ALL means "any cpu" from the allowed core affinity mask
+ * (cobalt_cpu_affinity). Passing an empty set is invalid.
+ *
+ * @param sched_class The initial scheduling class the new thread
+ * should be assigned to.
+ *
+ * @param sched_param The initial scheduling parameters to set for the
+ * new thread; @a sched_param must be valid within the context of @a
+ * sched_class.
+ *
+ * @return 0 is returned on success. Otherwise, the following error
+ * code indicates the cause of the failure:
+ *
+ * - -EINVAL is returned if @a attr->flags has invalid bits set, or @a
+ *   attr->affinity is invalid (e.g. empty).
+ *
+ * @coretags{secondary-only}
+ */
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param)
+{
+	struct xnsched *sched;
+	cpumask_t affinity;
+	int ret;
+
+	if (attr->flags & ~(XNFPU | XNUSER | XNSUSP))
+		return -EINVAL;
+
+	/*
+	 * Pick an initial CPU for the new thread which is part of its
+	 * affinity mask, and therefore also part of the supported
+	 * CPUs. This CPU may change in pin_to_initial_cpu().
+	 */
+	cpumask_and(&affinity, &attr->affinity, &cobalt_cpu_affinity);
+	if (cpumask_empty(&affinity))
+		return -EINVAL;
+
+	sched = xnsched_struct(cpumask_first(&affinity));
+
+	ret = __xnthread_init(thread, attr, sched, sched_class, sched_param);
+	if (ret)
+		return ret;
+
+	trace_cobalt_thread_init(thread, attr, sched_class);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_init);
+
+/**
+ * @fn int xnthread_start(struct xnthread *thread,const struct xnthread_start_attr *attr)
+ * @brief Start a newly created thread.
+ *
+ * Starts a (newly) created thread, scheduling it for the first
+ * time. This call releases the target thread from the XNDORMANT
+ * state. This service also sets the initial mode for the new thread.
+ *
+ * @param thread The descriptor address of the started thread which
+ * must have been previously initialized by a call to xnthread_init().
+ *
+ * @param attr A pointer to an attribute block describing the
+ * execution properties of the new thread. Members of this structure
+ * are defined as follows:
+ *
+ * - mode: The initial thread mode. The following flags can be part of
+ * this bitmask:
+ *
+ *   - XNLOCK causes the thread to lock the scheduler when it starts.
+ * The target thread will have to call the xnsched_unlock()
+ * service to unlock the scheduler. A non-preemptible thread may still
+ * block, in which case, the lock is reasserted when the thread is
+ * scheduled back in.
+ *
+ *   - XNSUSP makes the thread start in a suspended state. In such a
+ * case, the thread will have to be explicitly resumed using the
+ * xnthread_resume() service for its execution to actually begin.
+ *
+ * - entry: The address of the thread's body routine. In other words,
+ * it is the thread entry point.
+ *
+ * - cookie: A user-defined opaque cookie Cobalt will pass to the
+ * emerging thread as the sole argument of its entry point.
+ *
+ * @retval 0 if @a thread could be started ;
+ *
+ * @retval -EBUSY if @a thread was not dormant or stopped ;
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!xnthread_test_state(thread, XNDORMANT)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	xnthread_set_state(thread, attr->mode & (XNTHREAD_MODE_BITS | XNSUSP));
+	thread->entry = attr->entry;
+	thread->cookie = attr->cookie;
+	if (attr->mode & XNLOCK)
+		thread->lock_count = 1;
+
+	/*
+	 * A user-space thread starts immediately Cobalt-wise since we
+	 * already have an underlying Linux context for it, so we can
+	 * enlist it now to make it visible from the /proc interface.
+	 */
+	if (xnthread_test_state(thread, XNUSER))
+		enlist_new_thread(thread);
+
+	trace_cobalt_thread_start(thread);
+
+	xnthread_resume(thread, XNDORMANT);
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_start);
+
+/**
+ * @fn void xnthread_set_mode(int clrmask,int setmask)
+ * @brief Change control mode of the current thread.
+ *
+ * Change the control mode of the current thread. The control mode
+ * affects several behaviours of the Cobalt core regarding this
+ * thread.
+ *
+ * @param clrmask Clears the corresponding bits from the control mode
+ * before setmask is applied. The scheduler lock held by the current
+ * thread can be forcibly released by passing the XNLOCK bit in this
+ * mask. In this case, the lock nesting count is also reset to zero.
+ *
+ * @param setmask The new thread mode. The following flags may be set
+ * in this bitmask:
+ *
+ * - XNLOCK makes the current thread non-preemptible by other threads.
+ * Unless XNTRAPLB is also set for the thread, the latter may still
+ * block, dropping the lock temporarily, in which case, the lock will
+ * be reacquired automatically when the thread resumes execution.
+ *
+ * - XNWARN enables debugging notifications for the current thread.  A
+ * SIGDEBUG (Linux-originated) signal is sent when the following
+ * atypical or abnormal behavior is detected:
+ *
+ *    - the current thread switches to secondary mode. Such notification
+ *      comes in handy for detecting spurious relaxes.
+ *
+ *    - CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED is enabled in the kernel
+ *      configuration, and the current thread is sleeping on a Cobalt
+ *      mutex currently owned by a thread running in secondary mode,
+ *      which reveals a priority inversion.
+ *
+ *    - the current thread is about to sleep while holding a Cobalt
+ *      mutex, and CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP is enabled in the
+ *      kernel configuration. Blocking for acquiring a mutex does not
+ *      trigger such a signal though.
+ *
+ *    - the current thread has both XNTRAPLB and XNLOCK set, and
+ *      attempts to block on a Cobalt service, which would cause a
+ *      lock break.
+ *
+ * - XNTRAPLB disallows breaking the scheduler lock. In the default
+ * case, a thread which holds the scheduler lock is allowed to drop it
+ * temporarily for sleeping. If this mode bit is set, such thread
+ * would return immediately with XNBREAK set from
+ * xnthread_suspend(). If XNWARN is set for the current thread,
+ * SIGDEBUG is sent in addition to raising the break condition.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note Setting @a clrmask and @a setmask to zero leads to a nop,
+ * in which case xnthread_set_mode() returns the current mode.
+ */
+int xnthread_set_mode(int clrmask, int setmask)
+{
+	int oldmode, lock_count;
+	struct xnthread *curr;
+	spl_t s;
+
+	primary_mode_only();
+
+	xnlock_get_irqsave(&nklock, s);
+	curr = xnsched_current_thread();
+	oldmode = xnthread_get_state(curr) & XNTHREAD_MODE_BITS;
+	lock_count = curr->lock_count;
+	xnthread_clear_state(curr, clrmask & XNTHREAD_MODE_BITS);
+	xnthread_set_state(curr, setmask & XNTHREAD_MODE_BITS);
+	trace_cobalt_thread_set_mode(curr);
+
+	if (setmask & XNLOCK) {
+		if (lock_count == 0)
+			xnsched_lock();
+	} else if (clrmask & XNLOCK) {
+		if (lock_count > 0) {
+			curr->lock_count = 0;
+			xnthread_clear_localinfo(curr, XNLBALERT);
+			xnsched_run();
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (lock_count > 0)
+		oldmode |= XNLOCK;
+
+	return oldmode;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_mode);
+
+/**
+ * @fn void xnthread_suspend(struct xnthread *thread, int mask,xnticks_t timeout, xntmode_t timeout_mode,struct xnsynch *wchan)
+ * @brief Suspend a thread.
+ *
+ * Suspends the execution of a thread according to a given suspensive
+ * condition. This thread will not be eligible for scheduling until it
+ * all the pending suspensive conditions set by this service are
+ * removed by one or more calls to xnthread_resume().
+ *
+ * @param thread The descriptor address of the suspended thread.
+ *
+ * @param mask The suspension mask specifying the suspensive condition
+ * to add to the thread's wait mask. Possible values usable by the
+ * caller are:
+ *
+ * - XNSUSP. This flag forcibly suspends a thread, regardless of any
+ * resource to wait for. A reverse call to xnthread_resume()
+ * specifying the XNSUSP bit must be issued to remove this condition,
+ * which is cumulative with other suspension bits.@a wchan should be
+ * NULL when using this suspending mode.
+ *
+ * - XNDELAY. This flags denotes a counted delay wait (in ticks) which
+ * duration is defined by the value of the timeout parameter.
+ *
+ * - XNPEND. This flag denotes a wait for a synchronization object to
+ * be signaled. The wchan argument must points to this object. A
+ * timeout value can be passed to bound the wait. This suspending mode
+ * should not be used directly by the client interface, but rather
+ * through the xnsynch_sleep_on() call.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on a resource. This value is a wait time given in
+ * nanoseconds. It can either be relative, absolute monotonic, or
+ * absolute adjustable depending on @a timeout_mode.
+ *
+ * Passing XN_INFINITE @b and setting @a timeout_mode to XN_RELATIVE
+ * specifies an unbounded wait. All other values are used to
+ * initialize a watchdog timer. If the current operation mode of the
+ * system timer is oneshot and @a timeout elapses before
+ * xnthread_suspend() has completed, then the target thread will not
+ * be suspended, and this routine leads to a null effect.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @param wchan The address of a pended resource. This parameter is
+ * used internally by the synchronization object implementation code
+ * to specify on which object the suspended thread pends. NULL is a
+ * legitimate value when this parameter does not apply to the current
+ * suspending mode (e.g. XNSUSP).
+ *
+ * @note If the target thread has received a Linux-originated signal,
+ * then this service immediately exits without suspending the thread,
+ * but raises the XNBREAK condition in its information mask.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void xnthread_suspend(struct xnthread *thread, int mask,
+		      xnticks_t timeout, xntmode_t timeout_mode,
+		      struct xnsynch *wchan)
+{
+	unsigned long oldstate;
+	struct xnsched *sched;
+	spl_t s;
+
+	/* No, you certainly do not want to suspend the root thread. */
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+	/* No built-in support for conjunctive wait. */
+	XENO_BUG_ON(COBALT, wchan && thread->wchan);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_suspend(thread, mask, timeout, timeout_mode, wchan);
+
+	sched = thread->sched;
+	oldstate = thread->state;
+
+	/*
+	 * If attempting to suspend a runnable thread which is pending
+	 * a forced switch to secondary mode (XNKICKED), just raise
+	 * the XNBREAK status and return immediately, except if we
+	 * are precisely doing such switch by applying XNRELAX.
+	 *
+	 * In the latter case, we also make sure to clear XNKICKED,
+	 * since we won't go through prepare_for_signal() once
+	 * relaxed.
+	 */
+	if (likely((oldstate & XNTHREAD_BLOCK_BITS) == 0)) {
+		if (likely((mask & XNRELAX) == 0)) {
+			if (xnthread_test_info(thread, XNKICKED))
+				goto abort;
+			if (thread == sched->curr &&
+			    thread->lock_count > 0 &&
+			    (oldstate & XNTRAPLB) != 0)
+				goto lock_break;
+		}
+		/*
+		 * Do not destroy the info left behind by yet unprocessed
+		 * wakeups when suspending a remote thread.
+		 */
+		if (thread == sched->curr)
+			xnthread_clear_info(thread, XNRMID|XNTIMEO|XNBREAK|
+						    XNWAKEN|XNROBBED|XNKICKED);
+	}
+
+	/*
+	 * Don't start the timer for a thread delayed indefinitely.
+	 */
+	if (timeout != XN_INFINITE || timeout_mode != XN_RELATIVE) {
+		xntimer_set_affinity(&thread->rtimer, thread->sched);
+		if (xntimer_start(&thread->rtimer, timeout, XN_INFINITE,
+				  timeout_mode)) {
+			/* (absolute) timeout value in the past, bail out. */
+			if (wchan) {
+				thread->wchan = wchan;
+				xnsynch_forget_sleeper(thread);
+			}
+			xnthread_set_info(thread, XNTIMEO);
+			goto out;
+		}
+		xnthread_set_state(thread, XNDELAY);
+	}
+
+	if (oldstate & XNREADY) {
+		xnsched_dequeue(thread);
+		xnthread_clear_state(thread, XNREADY);
+	}
+
+	xnthread_set_state(thread, mask);
+
+	/*
+	 * We must make sure that we don't clear the wait channel if a
+	 * thread is first blocked (wchan != NULL) then forcibly
+	 * suspended (wchan == NULL), since these are conjunctive
+	 * conditions.
+	 */
+	if (wchan)
+		thread->wchan = wchan;
+
+	if (likely(thread == sched->curr)) {
+		xnsched_set_resched(sched);
+		/*
+		 * Transition to secondary mode (XNRELAX) is a
+		 * separate path which is only available to
+		 * xnthread_relax(). Using __xnsched_run() there for
+		 * rescheduling allows us to break the scheduler lock
+		 * temporarily.
+		 */
+		if (unlikely(mask & XNRELAX)) {
+			pipeline_leave_oob_unlock();
+			__xnsched_run(sched);
+			return;
+		}
+		/*
+		 * If the thread is runnning on a remote CPU,
+		 * xnsched_run() will trigger the IPI as required.  In
+		 * this case, sched refers to a remote runqueue, so
+		 * make sure to always kick the rescheduling procedure
+		 * for the local one.
+		 */
+		__xnsched_run(xnsched_current());
+		goto out;
+	}
+
+	/*
+	 * Ok, this one is an interesting corner case, which requires
+	 * a bit of background first. Here, we handle the case of
+	 * suspending a _relaxed_ user shadow which is _not_ the
+	 * current thread.
+	 *
+	 *  The net effect is that we are attempting to stop the
+	 * shadow thread for Cobalt, whilst this thread is actually
+	 * running some code under the control of the Linux scheduler
+	 * (i.e. it's relaxed).
+	 *
+	 *  To make this possible, we force the target Linux task to
+	 * migrate back to the Xenomai domain by sending it a
+	 * SIGSHADOW signal the interface libraries trap for this
+	 * specific internal purpose, whose handler is expected to
+	 * call back Cobalt's migration service.
+	 *
+	 * By forcing this migration, we make sure that Cobalt
+	 * controls, hence properly stops, the target thread according
+	 * to the requested suspension condition. Otherwise, the
+	 * shadow thread in secondary mode would just keep running
+	 * into the Linux domain, thus breaking the most common
+	 * assumptions regarding suspended threads.
+	 *
+	 * We only care for threads that are not current, and for
+	 * XNSUSP, XNDELAY, XNDORMANT and XNHELD conditions, because:
+	 *
+	 * - There is no point in dealing with a relaxed thread which
+	 * is current, since personalities have to ask for primary
+	 * mode switch when processing any syscall which may block the
+	 * caller (i.e. __xn_exec_primary).
+	 *
+	 * - among all blocking bits (XNTHREAD_BLOCK_BITS), only
+	 * XNSUSP, XNDELAY, XNHELD and XNDBGSTOP may be applied by the
+	 * current thread to a non-current thread. XNPEND is always
+	 * added by the caller to its own state, XNMIGRATE, XNRELAX
+	 * and XNDBGSTOP have special semantics escaping this issue.
+	 *
+	 * We don't signal threads which are already in a dormant
+	 * state, since they are suspended by definition.
+	 */
+	if (((oldstate & (XNTHREAD_BLOCK_BITS|XNUSER)) == (XNRELAX|XNUSER)) &&
+	    (mask & (XNDELAY | XNSUSP | XNHELD)) != 0)
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+	return;
+
+lock_break:
+	/* NOTE: thread is current */
+	if (xnthread_test_state(thread, XNWARN) &&
+	    !xnthread_test_localinfo(thread, XNLBALERT)) {
+		xnthread_set_info(thread, XNKICKED);
+		xnthread_set_localinfo(thread, XNLBALERT);
+		__xnthread_signal(thread, SIGDEBUG, SIGDEBUG_LOCK_BREAK);
+	}
+abort:
+	if (wchan) {
+		thread->wchan = wchan;
+		xnsynch_forget_sleeper(thread);
+	}
+	xnthread_clear_info(thread, XNRMID | XNTIMEO);
+	xnthread_set_info(thread, XNBREAK);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_suspend);
+
+/**
+ * @fn void xnthread_resume(struct xnthread *thread,int mask)
+ * @brief Resume a thread.
+ *
+ * Resumes the execution of a thread previously suspended by one or
+ * more calls to xnthread_suspend(). This call removes a suspensive
+ * condition affecting the target thread. When all suspensive
+ * conditions are gone, the thread is left in a READY state at which
+ * point it becomes eligible anew for scheduling.
+ *
+ * @param thread The descriptor address of the resumed thread.
+ *
+ * @param mask The suspension mask specifying the suspensive condition
+ * to remove from the thread's wait mask. Possible values usable by
+ * the caller are:
+ *
+ * - XNSUSP. This flag removes the explicit suspension condition. This
+ * condition might be additive to the XNPEND condition.
+ *
+ * - XNDELAY. This flag removes the counted delay wait condition.
+ *
+ * - XNPEND. This flag removes the resource wait condition. If a
+ * watchdog is armed, it is automatically disarmed by this
+ * call. Unlike the two previous conditions, only the current thread
+ * can set this condition for itself, i.e. no thread can force another
+ * one to pend on a resource.
+ *
+ * When the thread is eventually resumed by one or more calls to
+ * xnthread_resume(), the caller of xnthread_suspend() in the awakened
+ * thread that suspended itself should check for the following bits in
+ * its own information mask to determine what caused its wake up:
+ *
+ * - XNRMID means that the caller must assume that the pended
+ * synchronization object has been destroyed (see xnsynch_flush()).
+ *
+ * - XNTIMEO means that the delay elapsed, or the watchdog went off
+ * before the corresponding synchronization object was signaled.
+ *
+ * - XNBREAK means that the wait has been forcibly broken by a call to
+ * xnthread_unblock().
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void xnthread_resume(struct xnthread *thread, int mask)
+{
+	unsigned long oldstate;
+	struct xnsched *sched;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_resume(thread, mask);
+
+	xntrace_pid(xnthread_host_pid(thread), xnthread_current_priority(thread));
+
+	sched = thread->sched;
+	oldstate = thread->state;
+
+	if ((oldstate & XNTHREAD_BLOCK_BITS) == 0) {
+		if (oldstate & XNREADY)
+			xnsched_dequeue(thread);
+		goto enqueue;
+	}
+
+	/* Clear the specified block bit(s) */
+	xnthread_clear_state(thread, mask);
+
+	/*
+	 * If XNDELAY was set in the clear mask, xnthread_unblock()
+	 * was called for the thread, or a timeout has elapsed. In the
+	 * latter case, stopping the timer is a no-op.
+	 */
+	if (mask & XNDELAY)
+		xntimer_stop(&thread->rtimer);
+
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS))
+		goto clear_wchan;
+
+	if (mask & XNDELAY) {
+		mask = xnthread_test_state(thread, XNPEND);
+		if (mask == 0)
+			goto unlock_and_exit;
+		if (thread->wchan)
+			xnsynch_forget_sleeper(thread);
+		goto recheck_state;
+	}
+
+	if (xnthread_test_state(thread, XNDELAY)) {
+		if (mask & XNPEND) {
+			/*
+			 * A resource became available to the thread.
+			 * Cancel the watchdog timer.
+			 */
+			xntimer_stop(&thread->rtimer);
+			xnthread_clear_state(thread, XNDELAY);
+		}
+		goto recheck_state;
+	}
+
+	/*
+	 * The thread is still suspended, but is no more pending on a
+	 * resource.
+	 */
+	if ((mask & XNPEND) != 0 && thread->wchan)
+		xnsynch_forget_sleeper(thread);
+
+	goto unlock_and_exit;
+
+recheck_state:
+	if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS))
+		goto unlock_and_exit;
+
+clear_wchan:
+	if ((mask & ~XNDELAY) != 0 && thread->wchan != NULL)
+		/*
+		 * If the thread was actually suspended, clear the
+		 * wait channel.  -- this allows requests like
+		 * xnthread_suspend(thread,XNDELAY,...)  not to run
+		 * the following code when the suspended thread is
+		 * woken up while undergoing a simple delay.
+		 */
+		xnsynch_forget_sleeper(thread);
+
+	if (unlikely((oldstate & mask) & XNHELD)) {
+		xnsched_requeue(thread);
+		goto ready;
+	}
+enqueue:
+	xnsched_enqueue(thread);
+ready:
+	xnthread_set_state(thread, XNREADY);
+	xnsched_set_resched(sched);
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_resume);
+
+/**
+ * @fn int xnthread_unblock(struct xnthread *thread)
+ * @brief Unblock a thread.
+ *
+ * Breaks the thread out of any wait it is currently in.  This call
+ * removes the XNDELAY and XNPEND suspensive conditions previously put
+ * by xnthread_suspend() on the target thread. If all suspensive
+ * conditions are gone, the thread is left in a READY state at which
+ * point it becomes eligible anew for scheduling.
+ *
+ * @param thread The descriptor address of the unblocked thread.
+ *
+ * This call neither releases the thread from the XNSUSP, XNRELAX,
+ * XNDORMANT or XNHELD suspensive conditions.
+ *
+ * When the thread resumes execution, the XNBREAK bit is set in the
+ * unblocked thread's information mask. Unblocking a non-blocked
+ * thread is perfectly harmless.
+ *
+ * @return non-zero is returned if the thread was actually unblocked
+ * from a pending wait state, 0 otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+int xnthread_unblock(struct xnthread *thread)
+{
+	int ret = 1;
+	spl_t s;
+
+	/*
+	 * Attempt to abort an undergoing wait for the given thread.
+	 * If this state is due to an alarm that has been armed to
+	 * limit the sleeping thread's waiting time while it pends for
+	 * a resource, the corresponding XNPEND state will be cleared
+	 * by xnthread_resume() in the same move. Otherwise, this call
+	 * may abort an undergoing infinite wait for a resource (if
+	 * any).
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_unblock(thread);
+
+	if (xnthread_test_state(thread, XNDELAY))
+		xnthread_resume(thread, XNDELAY);
+	else if (xnthread_test_state(thread, XNPEND))
+		xnthread_resume(thread, XNPEND);
+	else
+		ret = 0;
+
+	/*
+	 * We should not clear a previous break state if this service
+	 * is called more than once before the target thread actually
+	 * resumes, so we only set the bit here and never clear
+	 * it. However, we must not raise the XNBREAK bit if the
+	 * target thread was already awake at the time of this call,
+	 * so that downstream code does not get confused by some
+	 * "successful but interrupted syscall" condition. IOW, a
+	 * break state raised here must always trigger an error code
+	 * downstream, and an already successful syscall cannot be
+	 * marked as interrupted.
+	 */
+	if (ret)
+		xnthread_set_info(thread, XNBREAK);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_unblock);
+
+/**
+ * @fn int xnthread_set_periodic(struct xnthread *thread,xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
+ * @brief Make a thread periodic.
+ *
+ * Make a thread periodic by programming its first release point and
+ * its period in the processor time line.  Subsequent calls to
+ * xnthread_wait_period() will delay the thread until the next
+ * periodic release point in the processor timeline is reached.
+ *
+ * @param thread The core thread to make periodic. If NULL, the
+ * current thread is assumed.
+ *
+ * @param idate The initial (absolute) date of the first release
+ * point, expressed in nanoseconds. The affected thread will be
+ * delayed by the first call to xnthread_wait_period() until this
+ * point is reached. If @a idate is equal to XN_INFINITE, the first
+ * release point is set to @a period nanoseconds after the current
+ * date. In the latter case, @a timeout_mode is not considered and can
+ * have any valid value.
+ *
+ * @param timeout_mode The mode of the @a idate parameter. It can
+ * either be set to XN_ABSOLUTE or XN_REALTIME with @a idate different
+ * from XN_INFINITE (see also xntimer_start()).
+ *
+ * @param period The period of the thread, expressed in nanoseconds.
+ * As a side-effect, passing XN_INFINITE attempts to stop the thread's
+ * periodic timer; in the latter case, the routine always exits
+ * succesfully, regardless of the previous state of this timer.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned @a idate is different from XN_INFINITE and
+ * represents a date in the past.
+ *
+ * - -EINVAL is returned if @a period is different from XN_INFINITE
+ * but shorter than the scheduling latency value for the target
+ * system, as available from /proc/xenomai/latency. -EINVAL is also
+ * returned if @a timeout_mode is not compatible with @a idate, such
+ * as XN_RELATIVE with @a idate different from XN_INFINITE.
+ *
+ * - -EPERM is returned if @a thread is NULL, but the caller is not a
+ * Xenomai thread.
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate,
+			  xntmode_t timeout_mode, xnticks_t period)
+{
+	int ret = 0;
+	spl_t s;
+
+	if (thread == NULL) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+	}
+		
+	xnlock_get_irqsave(&nklock, s);
+
+	if (period == XN_INFINITE) {
+		if (xntimer_running_p(&thread->ptimer))
+			xntimer_stop(&thread->ptimer);
+
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * LART: detect periods which are shorter than the core clock
+	 * gravity for kernel thread timers. This can't work, caller
+	 * must have messed up arguments.
+	 */
+	if (period < xnclock_ticks_to_ns(&nkclock,
+			 xnclock_get_gravity(&nkclock, kernel))) {
+		ret = -EINVAL;
+		goto unlock_and_exit;
+	}
+
+	xntimer_set_affinity(&thread->ptimer, thread->sched);
+
+	if (idate == XN_INFINITE)
+		xntimer_start(&thread->ptimer, period, period, XN_RELATIVE);
+	else {
+		if (timeout_mode == XN_REALTIME)
+			idate -= xnclock_get_offset(xntimer_clock(&thread->ptimer));
+		else if (timeout_mode != XN_ABSOLUTE) {
+			ret = -EINVAL;
+			goto unlock_and_exit;
+		}
+		ret = xntimer_start(&thread->ptimer, idate, period,
+				    XN_ABSOLUTE);
+	}
+
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_periodic);
+
+/**
+ * @fn int xnthread_wait_period(unsigned long *overruns_r)
+ * @brief Wait for the next periodic release point.
+ *
+ * Make the current thread wait for the next periodic release point in
+ * the processor time line.
+ *
+ * @param overruns_r If non-NULL, @a overruns_r must be a pointer to a
+ * memory location which will be written with the count of pending
+ * overruns. This value is copied only when xnthread_wait_period()
+ * returns -ETIMEDOUT or success; the memory location remains
+ * unmodified otherwise. If NULL, this count will never be copied
+ * back.
+ *
+ * @return 0 is returned upon success; if @a overruns_r is valid, zero
+ * is copied to the pointed memory location. Otherwise:
+ *
+ * - -EWOULDBLOCK is returned if xnthread_set_periodic() has not
+ * previously been called for the calling thread.
+ *
+ * - -EINTR is returned if xnthread_unblock() has been called for the
+ * waiting thread before the next periodic release point has been
+ * reached. In this case, the overrun counter is reset too.
+ *
+ * - -ETIMEDOUT is returned if the timer has overrun, which indicates
+ * that one or more previous release points have been missed by the
+ * calling thread. If @a overruns_r is valid, the count of pending
+ * overruns is copied to the pointed memory location.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnthread_wait_period(unsigned long *overruns_r)
+{
+	unsigned long overruns = 0;
+	struct xnthread *thread;
+	struct xnclock *clock;
+	xnticks_t now;
+	int ret = 0;
+	spl_t s;
+
+	thread = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(!xntimer_running_p(&thread->ptimer))) {
+		ret = -EWOULDBLOCK;
+		goto out;
+	}
+
+	trace_cobalt_thread_wait_period(thread);
+
+	clock = xntimer_clock(&thread->ptimer);
+	now = xnclock_read_raw(clock);
+	if (likely((xnsticks_t)(now - xntimer_pexpect(&thread->ptimer)) < 0)) {
+		xnthread_suspend(thread, XNDELAY, XN_INFINITE, XN_RELATIVE, NULL);
+		if (unlikely(xnthread_test_info(thread, XNBREAK))) {
+			ret = -EINTR;
+			goto out;
+		}
+
+		now = xnclock_read_raw(clock);
+	}
+
+	overruns = xntimer_get_overruns(&thread->ptimer, thread, now);
+	if (overruns) {
+		ret = -ETIMEDOUT;
+		trace_cobalt_thread_missed_period(thread);
+	}
+
+	if (likely(overruns_r != NULL))
+		*overruns_r = overruns;
+ out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_wait_period);
+
+/**
+ * @fn int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
+ * @brief Set thread time-slicing information.
+ *
+ * Update the time-slicing information for a given thread. This
+ * service enables or disables round-robin scheduling for the thread,
+ * depending on the value of @a quantum. By default, times-slicing is
+ * disabled for a new thread initialized by a call to xnthread_init().
+ *
+ * @param thread The descriptor address of the affected thread.
+ *
+ * @param quantum The time quantum assigned to the thread expressed in
+ * nanoseconds. If @a quantum is different from XN_INFINITE, the
+ * time-slice for the thread is set to that value and its current time
+ * credit is refilled (i.e. the thread is given a full time-slice to
+ * run next). Otherwise, if @a quantum equals XN_INFINITE,
+ * time-slicing is stopped for that thread.
+ *
+ * @return 0 is returned upon success. Otherwise, -EINVAL is returned
+ * if @a quantum is not XN_INFINITE and:
+ *
+ *   - the base scheduling class of the target thread does not support
+ *   time-slicing,
+ *
+ *   - @a quantum is smaller than the master clock gravity for a user
+ * thread, which denotes a spurious value.
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
+{
+	struct xnsched *sched;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = thread->sched;
+	thread->rrperiod = quantum;
+
+	if (quantum != XN_INFINITE) {
+		if (quantum <= xnclock_get_gravity(&nkclock, user) ||
+		    thread->base_class->sched_tick == NULL) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		xnthread_set_state(thread, XNRRB);
+		if (sched->curr == thread)
+			xntimer_start(&sched->rrbtimer,
+				      quantum, XN_INFINITE, XN_RELATIVE);
+	} else {
+		xnthread_clear_state(thread, XNRRB);
+		if (sched->curr == thread)
+			xntimer_stop(&sched->rrbtimer);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_slice);
+
+/**
+ * @fn void xnthread_cancel(struct xnthread *thread)
+ * @brief Cancel a thread.
+ *
+ * Request cancellation of a thread. This service forces @a thread to
+ * exit from any blocking call, then to switch to secondary mode.
+ * @a thread will terminate as soon as it reaches a cancellation
+ * point. Cancellation points are defined for the following
+ * situations:
+ *
+ * - @a thread self-cancels by a call to xnthread_cancel().
+ * - @a thread invokes a Linux syscall (user-space shadow only).
+ * - @a thread receives a Linux signal (user-space shadow only).
+ * - @a thread unblocks from a Xenomai syscall (user-space shadow only).
+ * - @a thread attempts to block on a Xenomai syscall (user-space shadow only).
+ * - @a thread explicitly calls xnthread_test_cancel().
+ *
+ * @param thread The descriptor address of the thread to terminate.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note In addition to the common actions taken upon cancellation, a
+ * thread which belongs to the SCHED_WEAK class is sent a regular
+ * SIGTERM signal.
+ */
+void xnthread_cancel(struct xnthread *thread)
+{
+	spl_t s;
+
+	/* Right, so you want to kill the kernel?! */
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_info(thread, XNCANCELD))
+		goto check_self_cancel;
+
+	trace_cobalt_thread_cancel(thread);
+
+	xnthread_set_info(thread, XNCANCELD);
+
+	/*
+	 * If @thread is not started yet, fake a start request,
+	 * raising the kicked condition bit to make sure it will reach
+	 * xnthread_test_cancel() on its wakeup path.
+	 */
+	if (xnthread_test_state(thread, XNDORMANT)) {
+		xnthread_set_info(thread, XNKICKED);
+		xnthread_resume(thread, XNDORMANT);
+		goto out;
+	}
+
+check_self_cancel:
+	if (xnthread_current() == thread) {
+		xnlock_put_irqrestore(&nklock, s);
+		xnthread_test_cancel();
+		/*
+		 * May return if on behalf of an IRQ handler which has
+		 * preempted @thread.
+		 */
+		return;
+	}
+
+	/*
+	 * Force the non-current thread to exit:
+	 *
+	 * - unblock a user thread, switch it to weak scheduling,
+	 * then send it SIGTERM.
+	 *
+	 * - just unblock a kernel thread, it is expected to reach a
+	 * cancellation point soon after
+	 * (i.e. xnthread_test_cancel()).
+	 */
+	if (xnthread_test_state(thread, XNUSER)) {
+		__xnthread_demote(thread);
+		__xnthread_signal(thread, SIGTERM, 0);
+	} else
+		__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_cancel);
+
+struct wait_grace_struct {
+	struct completion done;
+	struct rcu_head rcu;
+};
+
+static void grace_elapsed(struct rcu_head *head)
+{
+	struct wait_grace_struct *wgs;
+
+	wgs = container_of(head, struct wait_grace_struct, rcu);
+	complete(&wgs->done);
+}
+
+static void wait_for_rcu_grace_period(struct pid *pid)
+{
+	struct wait_grace_struct wait = {
+		.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+	};
+	struct task_struct *p;
+
+	init_rcu_head_on_stack(&wait.rcu);
+	
+	for (;;) {
+		call_rcu(&wait.rcu, grace_elapsed);
+		wait_for_completion(&wait.done);
+		if (pid == NULL)
+			break;
+		rcu_read_lock();
+		p = pid_task(pid, PIDTYPE_PID);
+		rcu_read_unlock();
+		if (p == NULL)
+			break;
+		reinit_completion(&wait.done);
+	}
+}
+
+/**
+ * @fn void xnthread_join(struct xnthread *thread, bool uninterruptible)
+ * @brief Join with a terminated thread.
+ *
+ * This service waits for @a thread to terminate after a call to
+ * xnthread_cancel().  If that thread has already terminated or is
+ * dormant at the time of the call, then xnthread_join() returns
+ * immediately.
+ *
+ * xnthread_join() adapts to the calling context (primary or
+ * secondary), switching to secondary mode if needed for the duration
+ * of the wait. Upon return, the original runtime mode is restored,
+ * unless a Linux signal is pending.
+ *
+ * @param thread The descriptor address of the thread to join with.
+ *
+ * @param uninterruptible Boolean telling whether the service should
+ * wait for completion uninterruptible.
+ *
+ * @return 0 is returned on success. Otherwise, the following error
+ * codes indicate the cause of the failure:
+ *
+ * - -EDEADLK is returned if the current thread attempts to join
+ * itself.
+ *
+ * - -EINTR is returned if the current thread was unblocked while
+ *   waiting for @a thread to terminate.
+ *
+ * - -EBUSY indicates that another thread is already waiting for @a
+ *   thread to terminate.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int xnthread_join(struct xnthread *thread, bool uninterruptible)
+{
+	struct xnthread *curr = xnthread_current();
+	int ret = 0, switched = 0;
+	struct pid *pid;
+	pid_t tpid;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+
+	if (thread == curr)
+		return -EDEADLK;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNJOINED)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (xnthread_test_info(thread, XNDORMANT))
+		goto out;
+
+	trace_cobalt_thread_join(thread);
+
+	xnthread_set_state(thread, XNJOINED);
+	tpid = xnthread_host_pid(thread);
+
+	if (curr && !xnthread_test_state(curr, XNRELAX)) {
+		xnlock_put_irqrestore(&nklock, s);
+		xnthread_relax(0, 0);
+		switched = 1;
+	} else
+		xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Since in theory, we might be sleeping there for a long
+	 * time, we get a reference on the pid struct holding our
+	 * target, then we check for its existence upon wake up.
+	 */
+	pid = find_get_pid(tpid);
+	if (pid == NULL)
+		goto done;
+
+	/*
+	 * We have a tricky issue to deal with, which involves code
+	 * relying on the assumption that a destroyed thread will have
+	 * scheduled away from do_exit() before xnthread_join()
+	 * returns. A typical example is illustrated by the following
+	 * sequence, with a RTDM kernel task implemented in a
+	 * dynamically loaded module:
+	 *
+	 * CPU0:  rtdm_task_destroy(ktask)
+	 *           xnthread_cancel(ktask)
+	 *           xnthread_join(ktask)
+	 *        ...<back to user>..
+	 *        rmmod(module)
+	 *
+	 * CPU1:  in ktask()
+	 *        ...
+	 *        ...
+	 *          __xnthread_test_cancel()
+	 *             do_exit()
+         *                schedule()
+	 *
+	 * In such a sequence, the code on CPU0 would expect the RTDM
+	 * task to have scheduled away upon return from
+	 * rtdm_task_destroy(), so that unmapping the destroyed task
+	 * code and data memory when unloading the module is always
+	 * safe.
+	 *
+	 * To address this, the joiner first waits for the joinee to
+	 * signal completion from the Cobalt thread cleanup handler
+	 * (__xnthread_cleanup), then waits for a full RCU grace
+	 * period to have elapsed. Since the completion signal is sent
+	 * on behalf of do_exit(), we may assume that the joinee has
+	 * scheduled away before the RCU grace period ends.
+	 */
+	if (uninterruptible)
+		wait_for_completion(&thread->exited);
+	else {
+		ret = wait_for_completion_interruptible(&thread->exited);
+		if (ret < 0) {
+			put_pid(pid);
+			return -EINTR;
+		}
+	}
+
+	/* Make sure the joinee has scheduled away ultimately. */
+	wait_for_rcu_grace_period(pid);
+
+	put_pid(pid);
+done:
+	ret = 0;
+	if (switched)
+		ret = xnthread_harden();
+
+	return ret;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_join);
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (thread->sched == sched)
+		return;
+
+	trace_cobalt_thread_migrate_passive(thread, xnsched_cpu(sched));
+	/*
+	 * Timer migration is postponed until the next timeout happens
+	 * for the periodic and rrb timers. The resource timer will be
+	 * moved to the right CPU next time it is armed in
+	 * xnthread_suspend().
+	 */
+	xnsched_migrate_passive(thread, sched);
+
+	xnstat_exectime_reset_stats(&thread->stat.lastperiod);
+}
+
+#endif	/* CONFIG_SMP */
+
+/**
+ * @fn int xnthread_set_schedparam(struct xnthread *thread,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param)
+ * @brief Change the base scheduling parameters of a thread.
+ *
+ * Changes the base scheduling policy and paramaters of a thread. If
+ * the thread is currently blocked, waiting in priority-pending mode
+ * (XNSYNCH_PRIO) for a synchronization object to be signaled, Cobalt
+ * will attempt to reorder the object's wait queue so that it reflects
+ * the new sleeper's priority, unless the XNSYNCH_DREORD flag has been
+ * set for the pended object.
+ *
+ * @param thread The descriptor address of the affected thread. See
+ * note.
+ *
+ * @param sched_class The new scheduling class the thread should be
+ * assigned to.
+ *
+ * @param sched_param The scheduling parameters to set for the thread;
+ * @a sched_param must be valid within the context of @a sched_class.
+ *
+ * It is absolutely required to use this service to change a thread
+ * priority, in order to have all the needed housekeeping chores
+ * correctly performed. i.e. Do *not* call xnsched_set_policy()
+ * directly or worse, change the thread.cprio field by hand in any
+ * case.
+ *
+ * @return 0 is returned on success. Otherwise, a negative error code
+ * indicates the cause of a failure that happened in the scheduling
+ * class implementation for @a sched_class. Invalid parameters passed
+ * into @a sched_param are common causes of error.
+ *
+ * @sideeffect
+ *
+ * - This service does not call the rescheduling procedure but may
+ * affect the state of the run queue for the previous and new
+ * scheduling classes.
+ *
+ * - Assigning the same scheduling class and parameters to a running
+ * or ready thread moves it to the end of the run queue, thus causing
+ * a manual round-robin, except if a priority boost is undergoing.
+ *
+ * @coretags{task-unregistred}
+ *
+ * @note The changes only apply to the Xenomai scheduling parameters
+ * for @a thread. There is no propagation/translation of such changes
+ * to the Linux scheduler for the task mated to the Xenomai target
+ * thread.
+ */
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param)
+{
+	spl_t s;
+	int ret;
+
+	xnlock_get_irqsave(&nklock, s);
+	ret = __xnthread_set_schedparam(thread, sched_class, sched_param);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param)
+{
+	int old_wprio, new_wprio, ret;
+
+	old_wprio = thread->wprio;
+
+	ret = xnsched_set_policy(thread, sched_class, sched_param);
+	if (ret)
+		return ret;
+
+	new_wprio = thread->wprio;
+
+	/*
+	 * If the thread is waiting on a synchronization object,
+	 * update its position in the corresponding wait queue, unless
+	 * 1) reordering is explicitly disabled, or 2) the (weighted)
+	 * priority has not changed (to prevent spurious round-robin
+	 * effects).
+	 */
+	if (old_wprio != new_wprio && thread->wchan &&
+	    (thread->wchan->status & (XNSYNCH_DREORD|XNSYNCH_PRIO))
+	    == XNSYNCH_PRIO)
+		xnsynch_requeue_sleeper(thread);
+	/*
+	 * We should not move the thread at the end of its priority
+	 * group, if any of these conditions is true:
+	 *
+	 * - thread is not runnable;
+	 * - thread bears the ready bit which means that xnsched_set_policy()
+	 * already reordered the run queue;
+	 * - thread currently holds the scheduler lock, so we don't want
+	 * any round-robin effect to take place;
+	 * - a priority boost is undergoing for this thread.
+	 */
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY|XNBOOST) &&
+	    thread->lock_count == 0)
+		xnsched_putback(thread);
+
+	xnthread_set_info(thread, XNSCHEDP);
+	/* Ask the target thread to call back if relaxed. */
+	if (xnthread_test_state(thread, XNRELAX))
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HOME);
+
+	return ret;
+}
+
+void __xnthread_test_cancel(struct xnthread *curr)
+{
+	/*
+	 * Just in case xnthread_test_cancel() is called from an IRQ
+	 * handler, in which case we may not take the exit path.
+	 *
+	 * NOTE: curr->sched is stable from our POV and can't change
+	 * under our feet.
+	 */
+	if (curr->sched->lflags & XNINIRQ)
+		return;
+
+	if (!xnthread_test_state(curr, XNRELAX))
+		xnthread_relax(0, 0);
+
+	do_exit(0);
+	/* ... won't return ... */
+	XENO_BUG(COBALT);
+}
+EXPORT_SYMBOL_GPL(__xnthread_test_cancel);
+
+/**
+ * @internal
+ * @fn int xnthread_harden(void);
+ * @brief Migrate a Linux task to the Xenomai domain.
+ *
+ * This service causes the transition of "current" from the Linux
+ * domain to Xenomai. The shadow will resume in the Xenomai domain as
+ * returning from schedule().
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int xnthread_harden(void)
+{
+	struct task_struct *p = current;
+	struct xnthread *thread;
+	int ret;
+
+	secondary_mode_only();
+
+	thread = xnthread_current();
+	if (thread == NULL)
+		return -EPERM;
+
+	if (signal_pending(p))
+		return -ERESTARTSYS;
+
+	trace_cobalt_shadow_gohard(thread);
+
+	xnthread_clear_sync_window(thread, XNRELAX);
+
+	ret = pipeline_leave_inband();
+	if (ret) {
+		xnthread_test_cancel();
+		xnthread_set_sync_window(thread, XNRELAX);
+		return ret;
+	}
+
+	/* "current" is now running on the out-of-band stage. */
+
+	xnlock_clear_irqon(&nklock);
+	xnthread_test_cancel();
+
+	trace_cobalt_shadow_hardened(thread);
+
+	/*
+	 * Recheck pending signals once again. As we block task
+	 * wakeups during the migration and handle_sigwake_event()
+	 * ignores signals until XNRELAX is cleared, any signal
+	 * between entering TASK_HARDENING and starting the migration
+	 * is just silently queued up to here.
+	 */
+	if (signal_pending(p)) {
+		xnthread_relax(!xnthread_test_state(thread, XNSSTEP),
+			       SIGDEBUG_MIGRATE_SIGNAL);
+		return -ERESTARTSYS;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_harden);
+
+struct lostage_wakeup {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+};
+
+static void lostage_task_wakeup(struct pipeline_inband_work *inband_work)
+{
+	struct lostage_wakeup *rq;
+	struct task_struct *p;
+
+	rq = container_of(inband_work, struct lostage_wakeup, inband_work);
+	p = rq->task;
+
+	trace_cobalt_lostage_wakeup(p);
+
+	wake_up_process(p);
+}
+
+void __xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	int kpolicy = SCHED_FIFO, kprio = curr->bprio, ret;
+	struct task_struct *p = current;
+	struct sched_param param;
+	spl_t s;
+
+	/*
+	 * Test-set race for XNSCHEDP is ok, the propagation is meant
+	 * to be done asap but not guaranteed to be carried out
+	 * immediately, and the request will remain pending until it
+	 * is eventually handled. We just have to protect against a
+	 * set-clear race.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	xnthread_clear_info(curr, XNSCHEDP);
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Map our policies/priorities to the regular kernel's
+	 * (approximated).
+	 */
+	if (xnthread_test_state(curr, XNWEAK) && kprio == 0)
+		kpolicy = SCHED_NORMAL;
+	else if (kprio >= MAX_RT_PRIO)
+		kprio = MAX_RT_PRIO - 1;
+
+	if (p->policy != kpolicy || (kprio > 0 && p->rt_priority != kprio)) {
+		param.sched_priority = kprio;
+		ret = sched_setscheduler_nocheck(p, kpolicy, &param);
+		XENO_WARN_ON(COBALT, ret != 0);
+	}
+}
+
+/**
+ * @internal
+ * @fn void xnthread_relax(int notify, int reason);
+ * @brief Switch a shadow thread back to the Linux domain.
+ *
+ * This service yields the control of the running shadow back to
+ * Linux. This is obtained by suspending the shadow and scheduling a
+ * wake up call for the mated user task inside the Linux domain. The
+ * Linux task will resume on return from xnthread_suspend() on behalf
+ * of the root thread.
+ *
+ * @param notify A boolean flag indicating whether threads monitored
+ * from secondary mode switches should be sent a SIGDEBUG signal. For
+ * instance, some internal operations like task exit should not
+ * trigger such signal.
+ *
+ * @param reason The reason to report along with the SIGDEBUG signal.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note "current" is valid here since the shadow runs with the
+ * properties of the Linux task.
+ */
+void xnthread_relax(int notify, int reason)
+{
+	struct task_struct *p = current;
+	struct lostage_wakeup wakework = {
+		.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(wakework,
+					lostage_task_wakeup),
+		.task = p,
+	};
+	struct xnthread *thread = xnthread_current();
+	int cpu __maybe_unused, suspension;
+	kernel_siginfo_t si;
+
+	primary_mode_only();
+
+	/*
+	 * Enqueue the request to move the running shadow from the Xenomai
+	 * domain to the Linux domain.  This will cause the Linux task
+	 * to resume using the register state of the shadow thread.
+	 */
+	trace_cobalt_shadow_gorelax(reason);
+
+	/*
+	 * If you intend to change the following interrupt-free
+	 * sequence, /first/ make sure to check the special handling
+	 * of XNRELAX in xnthread_suspend() when switching out the
+	 * current thread, not to break basic assumptions we make
+	 * there.
+	 *
+	 * We disable interrupts during the migration sequence, but
+	 * xnthread_suspend() has an interrupts-on section built in.
+	 */
+	trace_cobalt_lostage_request("wakeup", p);
+	pipeline_post_inband_work(&wakework);
+	/*
+	 * Grab the nklock to synchronize the Linux task state
+	 * manipulation with handle_sigwake_event. This lock will be
+	 * dropped by xnthread_suspend().
+	 */
+	splmax();
+	xnlock_get(&nklock);
+	xnthread_run_handler_stack(thread, relax_thread);
+	suspension = pipeline_leave_oob_prepare();
+	xnthread_suspend(thread, suspension, XN_INFINITE, XN_RELATIVE, NULL);
+	splnone();
+
+	/*
+	 * Basic sanity check after an expected transition to secondary
+	 * mode.
+	 */
+	XENO_WARN(COBALT, is_primary_domain(),
+		  "xnthread_relax() failed for thread %s[%d]",
+		  thread->name, xnthread_host_pid(thread));
+
+	pipeline_leave_oob_finish();
+
+	/* Account for secondary mode switch. */
+	xnstat_counter_inc(&thread->stat.ssw);
+
+	/*
+	 * When relaxing, we check for propagating to the regular
+	 * kernel new Cobalt schedparams that might have been set for
+	 * us while we were running in primary mode.
+	 *
+	 * CAUTION: This obviously won't update the schedparams cached
+	 * by the glibc for the caller in user-space, but this is the
+	 * deal: we don't relax threads which issue
+	 * pthread_setschedparam[_ex]() from primary mode, but then
+	 * only the kernel side (Cobalt and the host kernel) will be
+	 * aware of the change, and glibc might cache obsolete
+	 * information.
+	 */
+	xnthread_propagate_schedparam(thread);
+
+	if (xnthread_test_state(thread, XNUSER) && notify) {
+		if (xnthread_test_state(thread, XNWARN)) {
+			/* Help debugging spurious relaxes. */
+			xndebug_notify_relax(thread, reason);
+			memset(&si, 0, sizeof(si));
+			si.si_signo = SIGDEBUG;
+			si.si_code = SI_QUEUE;
+			si.si_int = reason | sigdebug_marker;
+			send_sig_info(SIGDEBUG, &si, p);
+		}
+		xnsynch_detect_boosted_relax(thread);
+	}
+
+	/*
+	 * "current" is now running into the Linux domain on behalf of
+	 * the root thread.
+	 */
+	xnthread_sync_window(thread);
+
+#ifdef CONFIG_SMP
+	if (xnthread_test_localinfo(thread, XNMOVED)) {
+		xnthread_clear_localinfo(thread, XNMOVED);
+		cpu = xnsched_cpu(thread->sched);
+		set_cpus_allowed_ptr(p, cpumask_of(cpu));
+	}
+#endif
+	/*
+	 * After migration there will be no syscall restart (rather a signal
+	 * delivery).
+	 */
+	xnthread_clear_localinfo(thread, XNSYSRST);
+
+	pipeline_clear_mayday();
+
+	trace_cobalt_shadow_relaxed(thread);
+}
+EXPORT_SYMBOL_GPL(xnthread_relax);
+
+static void lostage_task_signal(struct pipeline_inband_work *inband_work)
+{
+	struct lostage_signal *rq;
+	struct task_struct *p;
+	kernel_siginfo_t si;
+	int signo, sigval;
+	spl_t s;
+
+	rq = container_of(inband_work, struct lostage_signal, inband_work);
+	/*
+	 * Revisit: I-pipe requirement. It passes a copy of the original work
+	 * struct, so retrieve the original one first in order to update is.
+	 */
+	rq = rq->self;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	p = rq->task;
+	signo = rq->signo;
+	sigval = rq->sigval;
+	rq->task = NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	trace_cobalt_lostage_signal(p, signo);
+
+	if (signo == SIGSHADOW || signo == SIGDEBUG) {
+		memset(&si, '\0', sizeof(si));
+		si.si_signo = signo;
+		si.si_code = SI_QUEUE;
+		si.si_int = sigval;
+		send_sig_info(signo, &si, p);
+	} else {
+		send_sig(signo, p, 1);
+	}
+}
+
+static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	int ret = 0;
+
+	if (xnthread_test_info(thread, XNKICKED))
+		return 1;
+
+	if (xnthread_unblock(thread)) {
+		xnthread_set_info(thread, XNKICKED);
+		ret = 1;
+	}
+
+	/*
+	 * CAUTION: we must NOT raise XNBREAK when clearing a forcible
+	 * block state, such as XNSUSP, XNHELD. The caller of
+	 * xnthread_suspend() we unblock shall proceed as for a normal
+	 * return, until it traverses a cancellation point if
+	 * XNCANCELD was raised earlier, or calls xnthread_suspend()
+	 * which will detect XNKICKED and act accordingly.
+	 *
+	 * Rationale: callers of xnthread_suspend() may assume that
+	 * receiving XNBREAK means that the process that motivated the
+	 * blocking did not go to completion. E.g. the wait context
+	 * (see. xnthread_prepare_wait()) was NOT posted before
+	 * xnsynch_sleep_on() returned, leaving no useful data there.
+	 * Therefore, in case only XNSUSP remains set for the thread
+	 * on entry to force_wakeup(), after XNPEND was lifted earlier
+	 * when the wait went to successful completion (i.e. no
+	 * timeout), then we want the kicked thread to know that it
+	 * did receive the requested resource, not finding XNBREAK in
+	 * its state word.
+	 *
+	 * Callers of xnthread_suspend() may inquire for XNKICKED to
+	 * detect forcible unblocks from XNSUSP, XNHELD, if they
+	 * should act upon this case specifically.
+	 */
+	if (xnthread_test_state(thread, XNSUSP|XNHELD)) {
+		xnthread_resume(thread, XNSUSP|XNHELD);
+		xnthread_set_info(thread, XNKICKED);
+	}
+
+	/*
+	 * Tricky cases:
+	 *
+	 * - a thread which was ready on entry wasn't actually
+	 * running, but nevertheless waits for the CPU in primary
+	 * mode, so we have to make sure that it will be notified of
+	 * the pending break condition as soon as it enters
+	 * xnthread_suspend() from a blocking Xenomai syscall.
+	 *
+	 * - a ready/readied thread on exit may be prevented from
+	 * running by the scheduling policy module it belongs
+	 * to. Typically, policies enforcing a runtime budget do not
+	 * block threads with no budget, but rather keep them out of
+	 * their run queue, so that ->sched_pick() won't elect
+	 * them. We tell the policy handler about the fact that we do
+	 * want such thread to run until it relaxes, whatever this
+	 * means internally for the implementation.
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_kick(thread);
+
+	return ret;
+}
+
+void __xnthread_kick(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	struct task_struct *p = xnthread_host_task(thread);
+
+	/* Thread is already relaxed -- nop. */
+	if (xnthread_test_state(thread, XNRELAX))
+		return;
+
+	/*
+	 * First, try to kick the thread out of any blocking syscall
+	 * Xenomai-wise. If that succeeds, then the thread will relax
+	 * on its return path to user-space.
+	 */
+	if (force_wakeup(thread))
+		return;
+
+	/*
+	 * If that did not work out because the thread was not blocked
+	 * (i.e. XNPEND/XNDELAY) in a syscall, then force a mayday
+	 * trap. Note that we don't want to send that thread any linux
+	 * signal, we only want to force it to switch to secondary
+	 * mode asap.
+	 *
+	 * It could happen that a thread is relaxed on a syscall
+	 * return path after it was resumed from self-suspension
+	 * (e.g. XNSUSP) then also forced to run a mayday trap right
+	 * after: this is still correct, at worst we would get a
+	 * useless mayday syscall leading to a no-op, no big deal.
+	 */
+	xnthread_set_info(thread, XNKICKED);
+
+	/*
+	 * We may send mayday signals to userland threads only.
+	 * However, no need to run a mayday trap if the current thread
+	 * kicks itself out of primary mode: it will relax on its way
+	 * back to userland via the current syscall
+	 * epilogue. Otherwise, we want that thread to enter the
+	 * mayday trap asap, to call us back for relaxing.
+	 */
+	if (thread != xnsched_current_thread() &&
+	    xnthread_test_state(thread, XNUSER))
+		pipeline_raise_mayday(p);
+}
+
+void xnthread_kick(struct xnthread *thread)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_kick(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_kick);
+
+void __xnthread_demote(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+
+	/*
+	 * First we kick the thread out of primary mode, and have it
+	 * resume execution immediately over the regular linux
+	 * context.
+	 */
+	__xnthread_kick(thread);
+
+	/*
+	 * Then we demote it, turning that thread into a non real-time
+	 * Xenomai shadow, which still has access to Xenomai
+	 * resources, but won't compete for real-time scheduling
+	 * anymore. In effect, moving the thread to a weak scheduling
+	 * class/priority will prevent it from sticking back to
+	 * primary mode.
+	 */
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	param.weak.prio = 0;
+	sched_class = &xnsched_class_weak;
+#else
+	param.rt.prio = 0;
+	sched_class = &xnsched_class_rt;
+#endif
+	__xnthread_set_schedparam(thread, sched_class, &param);
+}
+
+void xnthread_demote(struct xnthread *thread)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_demote(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_demote);
+
+static int get_slot_index_from_sig(int sig, int arg)
+{
+	int action;
+
+	switch (sig) {
+	case SIGDEBUG:
+		return XNTHREAD_SIGDEBUG;
+	case SIGSHADOW:
+		action = sigshadow_action(arg);
+		switch (action) {
+		case SIGSHADOW_ACTION_HARDEN:
+			return XNTHREAD_SIGSHADOW_HARDEN;
+		case SIGSHADOW_ACTION_BACKTRACE:
+			return XNTHREAD_SIGSHADOW_BACKTRACE;
+		case SIGSHADOW_ACTION_HOME:
+			return XNTHREAD_SIGSHADOW_HOME;
+		}
+		break;
+	case SIGTERM:
+		return XNTHREAD_SIGTERM;
+	}
+
+	return -1;
+}
+
+/* nklock locked, irqs off */
+void __xnthread_signal(struct xnthread *thread, int sig, int arg)
+{
+	struct lostage_signal *sigwork;
+	int slot;
+
+	if (XENO_WARN_ON(COBALT, !xnthread_test_state(thread, XNUSER)))
+		return;
+
+	slot = get_slot_index_from_sig(sig, arg);
+	if (WARN_ON_ONCE(slot < 0))
+		return;
+
+	sigwork = &thread->sigarray[slot];
+	if (sigwork->task)
+		return;
+
+	sigwork->inband_work = (struct pipeline_inband_work)
+			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
+							 lostage_task_signal);
+	sigwork->task = xnthread_host_task(thread);
+	sigwork->signo = sig;
+	sigwork->sigval = sig == SIGDEBUG ? arg | sigdebug_marker : arg;
+	sigwork->self = sigwork; /* Revisit: I-pipe requirement */
+
+	trace_cobalt_lostage_request("signal", sigwork->task);
+
+	pipeline_post_inband_work(sigwork);
+}
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_signal(thread, sig, arg);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_signal);
+
+void xnthread_pin_initial(struct xnthread *thread)
+{
+	struct task_struct *p = current;
+	struct xnsched *sched;
+	int cpu;
+	spl_t s;
+
+	/*
+	 * @thread is the Xenomai extension of the current kernel
+	 * task. If the current CPU is part of the affinity mask of
+	 * this thread, pin the latter on this CPU. Otherwise pin it
+	 * to the first CPU of that mask.
+	 */
+	cpu = task_cpu(p);
+	if (!cpumask_test_cpu(cpu, &thread->affinity))
+		cpu = cpumask_first(&thread->affinity);
+
+	set_cpus_allowed_ptr(p, cpumask_of(cpu));
+	/*
+	 * @thread is still unstarted Xenomai-wise, we are precisely
+	 * in the process of mapping the current kernel task to
+	 * it. Therefore xnthread_migrate_passive() is the right way
+	 * to pin it on a real-time CPU.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_struct(cpu);
+	xnthread_migrate_passive(thread, sched);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+/* nklock locked, irqs off */
+void xnthread_call_mayday(struct xnthread *thread, int reason)
+{
+	struct task_struct *p = xnthread_host_task(thread);
+
+	/* Mayday traps are available to userland threads only. */
+	XENO_BUG_ON(COBALT, !xnthread_test_state(thread, XNUSER));
+	xnthread_set_info(thread, XNKICKED);
+	__xnthread_signal(thread, SIGDEBUG, reason);
+	pipeline_raise_mayday(p);
+}
+EXPORT_SYMBOL_GPL(xnthread_call_mayday);
+
+int xnthread_killall(int grace, int mask)
+{
+	struct xnthread *t, *curr = xnthread_current();
+	int nrkilled = 0, nrthreads, count;
+	long ret;
+	spl_t s;
+
+	secondary_mode_only();
+
+	/*
+	 * We may hold the core lock across calls to xnthread_cancel()
+	 * provided that we won't self-cancel.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	nrthreads = cobalt_nrthreads;
+
+	xnsched_for_each_thread(t) {
+		if (xnthread_test_state(t, XNROOT) ||
+		    xnthread_test_state(t, mask) != mask ||
+		    t == curr)
+			continue;
+
+		if (XENO_DEBUG(COBALT))
+			printk(XENO_INFO "terminating %s[%d]\n",
+			       t->name, xnthread_host_pid(t));
+		nrkilled++;
+		xnthread_cancel(t);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Cancel then join all existing threads during the grace
+	 * period. It is the caller's responsibility to prevent more
+	 * threads to bind to the system if required, we won't make
+	 * any provision for this here.
+	 */
+	count = nrthreads - nrkilled;
+	if (XENO_DEBUG(COBALT))
+		printk(XENO_INFO "waiting for %d threads to exit\n",
+		       nrkilled);
+
+	if (grace > 0) {
+		ret = wait_event_interruptible_timeout(join_all,
+						       cobalt_nrthreads == count,
+						       grace * HZ);
+		if (ret == 0)
+			return -EAGAIN;
+	} else
+		ret = wait_event_interruptible(join_all,
+					       cobalt_nrthreads == count);
+
+	/* Wait for a full RCU grace period to expire. */
+	wait_for_rcu_grace_period(NULL);
+
+	if (XENO_DEBUG(COBALT))
+		printk(XENO_INFO "joined %d threads\n",
+		       count + nrkilled - cobalt_nrthreads);
+
+	return ret < 0 ? -EINTR : 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_killall);
+
+/* Xenomai's generic personality. */
+struct xnthread_personality xenomai_personality = {
+	.name = "core",
+	.magic = -1
+};
+EXPORT_SYMBOL_GPL(xenomai_personality);
+
+/** @} */
+++ linux-patched/kernel/xenomai/Makefile	2022-03-21 12:58:28.829894422 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/arith.c	1970-01-01 01:00:00.000000000 +0100
+obj-$(CONFIG_XENOMAI) += pipeline/ xenomai.o rtdm/ posix/
+
+xenomai-y :=	arith.o 	\
+		bufd.o		\
+		clock.o		\
+		heap.o		\
+		init.o		\
+		lock.o		\
+		registry.o	\
+		sched-idle.o	\
+		sched-rt.o	\
+		sched.o		\
+		select.o	\
+		synch.o		\
+		thread.o	\
+		time.o		\
+		timer.o		\
+		tree.o
+
+xenomai-$(CONFIG_XENO_OPT_SCHED_QUOTA) += sched-quota.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_WEAK) += sched-weak.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_SPORADIC) += sched-sporadic.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_TP) += sched-tp.o
+xenomai-$(CONFIG_XENO_OPT_DEBUG) += debug.o
+xenomai-$(CONFIG_XENO_OPT_PIPE) += pipe.o
+xenomai-$(CONFIG_XENO_OPT_MAP) += map.o
+xenomai-$(CONFIG_PROC_FS) += vfile.o procfs.o
+++ linux-patched/kernel/xenomai/arith.c	2022-03-21 12:58:28.825894461 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/tree.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_arith In-kernel arithmetics
+ *
+ * A collection of helpers performing arithmetics not implicitly
+ * available from kernel context via GCC helpers. Many of these
+ * routines enable 64bit arithmetics on 32bit systems. Xenomai
+ * architecture ports normally implement the performance critical ones
+ * in hand-crafted assembly code (see
+ * kernel/cobalt/arch/\<arch\>/include/asm/xenomai/uapi/arith.h).
+ * @{
+ */
+
+/**
+ * Architecture-independent div64 operation with remainder.
+ *
+ * @param a dividend
+ *
+ * @param b divisor
+ *
+ * @param rem if non-NULL, a pointer to a 64bit variable for
+ * collecting the remainder from the division.
+ */
+unsigned long long xnarch_generic_full_divmod64(unsigned long long a,
+						unsigned long long b,
+						unsigned long long *rem)
+{
+	unsigned long long q = 0, r = a;
+	int i;
+
+	for (i = fls(a >> 32) - fls(b >> 32), b <<= i; i >= 0; i--, b >>= 1) {
+		q <<= 1;
+		if (b <= r) {
+			r -= b;
+			q++;
+		}
+	}
+
+	if (rem)
+		*rem = r;
+	return q;
+}
+EXPORT_SYMBOL_GPL(xnarch_generic_full_divmod64);
+
+/** @} */
+++ linux-patched/kernel/xenomai/tree.c	2022-03-21 12:58:28.820894510 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-quota.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <cobalt/kernel/tree.h>
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id))
+{
+	struct rb_node *node, *next;
+
+	node = rb_first(t);
+	while (node) {
+		next = rb_next(node);
+
+		/* destroy is expected to remove the node from the rbtree */
+		destroy(cookie, container_of(node, struct xnid, link));
+
+		node = next;
+	}
+}
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key)
+{
+	struct rb_node **new = &t->rb_node, *parent = NULL;
+
+	while (*new) {
+		struct xnid *i = container_of(*new, struct xnid, link);
+
+		parent = *new;
+		if (key < i->key)
+			new = &((*new)->rb_left);
+		else if (key > i->key)
+			new = &((*new)->rb_right);
+		else
+			return -EEXIST;
+	}
+
+	xnid->key = key;
+	rb_link_node(&xnid->link, parent, new);
+	rb_insert_color(&xnid->link, t);
+
+	return 0;
+}
+++ linux-patched/kernel/xenomai/sched-quota.c	2022-03-21 12:58:28.816894549 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-rt.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/bitmap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/uapi/sched.h>
+#include <trace/events/cobalt-core.h>
+
+/*
+ * With this policy, each per-CPU scheduler slot maintains a list of
+ * active thread groups, picking from the sched_rt runqueue.
+ *
+ * Each time a thread is picked from the runqueue, we check whether we
+ * still have budget for running it, looking at the group it belongs
+ * to. If so, a timer is armed to elapse when that group has no more
+ * budget, would the incoming thread run unpreempted until then
+ * (i.e. xnsched_quota->limit_timer).
+ *
+ * Otherwise, if no budget remains in the group for running the
+ * candidate thread, we move the latter to a local expiry queue
+ * maintained by the group. This process is done on the fly as we pull
+ * from the runqueue.
+ *
+ * Updating the remaining budget is done each time the Cobalt core
+ * asks for replacing the current thread with the next runnable one,
+ * i.e. xnsched_quota_pick(). There we charge the elapsed run time of
+ * the outgoing thread to the relevant group, and conversely, we check
+ * whether the incoming thread has budget.
+ *
+ * Finally, a per-CPU timer (xnsched_quota->refill_timer) periodically
+ * ticks in the background, in accordance to the defined quota
+ * interval. Thread group budgets get replenished by its handler in
+ * accordance to their respective share, pushing all expired threads
+ * back to the run queue in the same move.
+ *
+ * NOTE: since the core logic enforcing the budget entirely happens in
+ * xnsched_quota_pick(), applying a budget change can be done as
+ * simply as forcing the rescheduling procedure to be invoked asap. As
+ * a result of this, the Cobalt core will ask for the next thread to
+ * run, which means calling xnsched_quota_pick() eventually.
+ *
+ * CAUTION: xnsched_quota_group->nr_active does count both the threads
+ * from that group linked to the sched_rt runqueue, _and_ the threads
+ * moved to the local expiry queue. As a matter of fact, the expired
+ * threads - those for which we consumed all the per-group budget -
+ * are still seen as runnable (i.e. not blocked/suspended) by the
+ * Cobalt core. This only means that the SCHED_QUOTA policy won't pick
+ * them until the corresponding budget is replenished.
+ */
+static DECLARE_BITMAP(group_map, CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS);
+
+static inline int group_is_active(struct xnsched_quota_group *tg)
+{
+	struct xnthread *curr = tg->sched->curr;
+
+	if (tg->nr_active)
+		return 1;
+
+	/*
+	 * Check whether the current thread belongs to the group, and
+	 * is still in running state (XNREADY denotes a thread linked
+	 * to the runqueue, in which case tg->nr_active already
+	 * accounts for it).
+	 */
+	if (curr->quota == tg &&
+	    xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0)
+		return 1;
+
+	return 0;
+}
+
+static inline void replenish_budget(struct xnsched_quota *qs,
+				    struct xnsched_quota_group *tg)
+{
+	xnticks_t budget_ns, credit_ns;
+
+	if (tg->quota_ns == tg->quota_peak_ns) {
+		/*
+		 * Fast path: we don't accumulate runtime credit.
+		 * This includes groups with no runtime limit
+		 * (i.e. quota off: quota >= period && quota == peak).
+		 */
+		tg->run_budget_ns = tg->quota_ns;
+		return;
+	}
+
+	/*
+	 * We have to deal with runtime credit accumulation, as the
+	 * group may consume more than its base quota during a single
+	 * interval, up to a peak duration though (not to monopolize
+	 * the CPU).
+	 *
+	 * - In the simplest case, a group is allotted a new full
+	 * budget plus the unconsumed portion of the previous budget,
+	 * provided the sum does not exceed the peak quota.
+	 *
+	 * - When there is too much budget for a single interval
+	 * (i.e. above peak quota), we spread the extra time over
+	 * multiple intervals through a credit accumulation mechanism.
+	 *
+	 * - The accumulated credit is dropped whenever a group has no
+	 * runnable threads.
+	 */
+	if (!group_is_active(tg)) {
+		/* Drop accumulated credit. */
+		tg->run_credit_ns = 0;
+		tg->run_budget_ns = tg->quota_ns;
+		return;
+	}
+
+	budget_ns = tg->run_budget_ns + tg->quota_ns;
+	if (budget_ns > tg->quota_peak_ns) {
+		/* Too much budget, spread it over intervals. */
+		tg->run_credit_ns += budget_ns - tg->quota_peak_ns;
+		tg->run_budget_ns = tg->quota_peak_ns;
+	} else if (tg->run_credit_ns) {
+		credit_ns = tg->quota_peak_ns - budget_ns;
+		/* Consume the accumulated credit. */
+		if (tg->run_credit_ns >= credit_ns)
+			tg->run_credit_ns -= credit_ns;
+		else {
+			credit_ns = tg->run_credit_ns;
+			tg->run_credit_ns = 0;
+		}
+		/* Allot extended budget, limited to peak quota. */
+		tg->run_budget_ns = budget_ns + credit_ns;
+	} else
+		/* No credit, budget was below peak quota. */
+		tg->run_budget_ns = budget_ns;
+}
+
+static void quota_refill_handler(struct xntimer *timer)
+{
+	struct xnsched_quota_group *tg;
+	struct xnthread *thread, *tmp;
+	struct xnsched_quota *qs;
+	struct xnsched *sched;
+
+	qs = container_of(timer, struct xnsched_quota, refill_timer);
+	XENO_BUG_ON(COBALT, list_empty(&qs->groups));
+	sched = container_of(qs, struct xnsched, quota);
+
+	trace_cobalt_schedquota_refill(0);
+
+	list_for_each_entry(tg, &qs->groups, next) {
+		/* Allot a new runtime budget for the group. */
+		replenish_budget(qs, tg);
+
+		if (tg->run_budget_ns == 0 || list_empty(&tg->expired))
+			continue;
+		/*
+		 * For each group living on this CPU, move all expired
+		 * threads back to the runqueue. Since those threads
+		 * were moved out of the runqueue as we were
+		 * considering them for execution, we push them back
+		 * in LIFO order to their respective priority group.
+		 * The expiry queue is FIFO to keep ordering right
+		 * among expired threads.
+		 */
+		list_for_each_entry_safe_reverse(thread, tmp, &tg->expired, quota_expired) {
+			list_del_init(&thread->quota_expired);
+			xnsched_addq(&sched->rt.runnable, thread);
+		}
+	}
+
+	xnsched_set_self_resched(timer->sched);
+}
+
+static void quota_limit_handler(struct xntimer *timer)
+{
+	struct xnsched *sched;
+
+	sched = container_of(timer, struct xnsched, quota.limit_timer);
+	/*
+	 * Force a rescheduling on the return path of the current
+	 * interrupt, so that the budget is re-evaluated for the
+	 * current group in xnsched_quota_pick().
+	 */
+	xnsched_set_self_resched(sched);
+}
+
+static int quota_sum_all(struct xnsched_quota *qs)
+{
+	struct xnsched_quota_group *tg;
+	int sum;
+
+	if (list_empty(&qs->groups))
+		return 0;
+
+	sum = 0;
+	list_for_each_entry(tg, &qs->groups, next)
+		sum += tg->quota_percent;
+
+	return sum;
+}
+
+static void xnsched_quota_init(struct xnsched *sched)
+{
+	char limiter_name[XNOBJECT_NAME_LEN], refiller_name[XNOBJECT_NAME_LEN];
+	struct xnsched_quota *qs = &sched->quota;
+
+	qs->period_ns = CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD * 1000ULL;
+	INIT_LIST_HEAD(&qs->groups);
+
+#ifdef CONFIG_SMP
+	ksformat(refiller_name, sizeof(refiller_name),
+		 "[quota-refill/%u]", sched->cpu);
+	ksformat(limiter_name, sizeof(limiter_name),
+		 "[quota-limit/%u]", sched->cpu);
+#else
+	strcpy(refiller_name, "[quota-refill]");
+	strcpy(limiter_name, "[quota-limit]");
+#endif
+	xntimer_init(&qs->refill_timer,
+		     &nkclock, quota_refill_handler, sched,
+		     XNTIMER_IGRAVITY);
+	xntimer_set_name(&qs->refill_timer, refiller_name);
+
+	xntimer_init(&qs->limit_timer,
+		     &nkclock, quota_limit_handler, sched,
+		     XNTIMER_IGRAVITY);
+	xntimer_set_name(&qs->limit_timer, limiter_name);
+}
+
+static bool xnsched_quota_setparam(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	struct xnsched_quota_group *tg;
+	struct xnsched_quota *qs;
+	bool effective;
+
+	xnthread_clear_state(thread, XNWEAK);
+	effective = xnsched_set_effective_priority(thread, p->quota.prio);
+
+	qs = &thread->sched->quota;
+	list_for_each_entry(tg, &qs->groups, next) {
+		if (tg->tgid != p->quota.tgid)
+			continue;
+		if (thread->quota) {
+			/* Dequeued earlier by our caller. */
+			list_del(&thread->quota_next);
+			thread->quota->nr_threads--;
+		}
+
+		trace_cobalt_schedquota_add_thread(tg, thread);
+
+		thread->quota = tg;
+		list_add(&thread->quota_next, &tg->members);
+		tg->nr_threads++;
+		return effective;
+	}
+
+	XENO_BUG(COBALT);
+
+	return false;
+}
+
+static void xnsched_quota_getparam(struct xnthread *thread,
+				   union xnsched_policy_param *p)
+{
+	p->quota.prio = thread->cprio;
+	p->quota.tgid = thread->quota->tgid;
+}
+
+static void xnsched_quota_trackprio(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	if (p) {
+		/* We should not cross groups during PI boost. */
+		XENO_WARN_ON(COBALT,
+			     thread->base_class == &xnsched_class_quota &&
+			     thread->quota->tgid != p->quota.tgid);
+		thread->cprio = p->quota.prio;
+	} else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_quota_protectprio(struct xnthread *thread, int prio)
+{
+	if (prio > XNSCHED_QUOTA_MAX_PRIO)
+		prio = XNSCHED_QUOTA_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_quota_chkparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	struct xnsched_quota_group *tg;
+	struct xnsched_quota *qs;
+	int tgid;
+
+	if (p->quota.prio < XNSCHED_QUOTA_MIN_PRIO ||
+	    p->quota.prio > XNSCHED_QUOTA_MAX_PRIO)
+		return -EINVAL;
+
+	tgid = p->quota.tgid;
+	if (tgid < 0 || tgid >= CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS)
+		return -EINVAL;
+
+	/*
+	 * The group must be managed on the same CPU the thread
+	 * currently runs on.
+	 */
+	qs = &thread->sched->quota;
+	list_for_each_entry(tg, &qs->groups, next) {
+		if (tg->tgid == tgid)
+			return 0;
+	}
+
+	/*
+	 * If that group exists nevertheless, we give userland a
+	 * specific error code.
+	 */
+	if (test_bit(tgid, group_map))
+		return -EPERM;
+
+	return -EINVAL;
+}
+
+static void xnsched_quota_forget(struct xnthread *thread)
+{
+	trace_cobalt_schedquota_remove_thread(thread->quota, thread);
+
+	thread->quota->nr_threads--;
+	XENO_BUG_ON(COBALT, thread->quota->nr_threads < 0);
+	list_del(&thread->quota_next);
+	thread->quota = NULL;
+}
+
+static void xnsched_quota_kick(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	/*
+	 * Allow a kicked thread to be elected for running until it
+	 * relaxes, even if the group it belongs to lacks runtime
+	 * budget.
+	 */
+	if (tg->run_budget_ns == 0 && !list_empty(&thread->quota_expired)) {
+		list_del_init(&thread->quota_expired);
+		xnsched_addq_tail(&sched->rt.runnable, thread);
+	}
+}
+
+static inline int thread_is_runnable(struct xnthread *thread)
+{
+	return thread->quota->run_budget_ns > 0 ||
+		xnthread_test_info(thread, XNKICKED);
+}
+
+static void xnsched_quota_enqueue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!thread_is_runnable(thread))
+		list_add_tail(&thread->quota_expired, &tg->expired);
+	else
+		xnsched_addq_tail(&sched->rt.runnable, thread);
+
+	tg->nr_active++;
+}
+
+static void xnsched_quota_dequeue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!list_empty(&thread->quota_expired))
+		list_del_init(&thread->quota_expired);
+	else
+		xnsched_delq(&sched->rt.runnable, thread);
+
+	tg->nr_active--;
+}
+
+static void xnsched_quota_requeue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!thread_is_runnable(thread))
+		list_add(&thread->quota_expired, &tg->expired);
+	else
+		xnsched_addq(&sched->rt.runnable, thread);
+
+	tg->nr_active++;
+}
+
+static struct xnthread *xnsched_quota_pick(struct xnsched *sched)
+{
+	struct xnthread *next, *curr = sched->curr;
+	struct xnsched_quota *qs = &sched->quota;
+	struct xnsched_quota_group *otg, *tg;
+	xnticks_t now, elapsed;
+	int ret;
+
+	now = xnclock_read_monotonic(&nkclock);
+	otg = curr->quota;
+	if (otg == NULL)
+		goto pick;
+	/*
+	 * Charge the time consumed by the outgoing thread to the
+	 * group it belongs to.
+	 */
+	elapsed = now - otg->run_start_ns;
+	if (elapsed < otg->run_budget_ns)
+		otg->run_budget_ns -= elapsed;
+	else
+		otg->run_budget_ns = 0;
+pick:
+	next = xnsched_getq(&sched->rt.runnable);
+	if (next == NULL) {
+		xntimer_stop(&qs->limit_timer);
+		return NULL;
+	}
+
+	/*
+	 * As we basically piggyback on the SCHED_FIFO runqueue, make
+	 * sure to detect non-quota threads.
+	 */
+	tg = next->quota;
+	if (tg == NULL)
+		return next;
+
+	tg->run_start_ns = now;
+
+	/*
+	 * Don't consider budget if kicked, we have to allow this
+	 * thread to run until it eventually relaxes.
+	 */
+	if (xnthread_test_info(next, XNKICKED)) {
+		xntimer_stop(&qs->limit_timer);
+		goto out;
+	}
+
+	if (tg->run_budget_ns == 0) {
+		/* Flush expired group members as we go. */
+		list_add_tail(&next->quota_expired, &tg->expired);
+		goto pick;
+	}
+
+	if (otg == tg && xntimer_running_p(&qs->limit_timer))
+		/* Same group, leave the running timer untouched. */
+		goto out;
+
+	/* Arm limit timer for the new running group. */
+	ret = xntimer_start(&qs->limit_timer, now + tg->run_budget_ns,
+			    XN_INFINITE, XN_ABSOLUTE);
+	if (ret) {
+		/* Budget exhausted: deactivate this group. */
+		tg->run_budget_ns = 0;
+		list_add_tail(&next->quota_expired, &tg->expired);
+		goto pick;
+	}
+out:
+	tg->nr_active--;
+
+	return next;
+}
+
+static void xnsched_quota_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	union xnsched_policy_param param;
+	/*
+	 * Runtime quota groups are defined per-CPU, so leaving the
+	 * current CPU means exiting the group. We do this by moving
+	 * the target thread to the plain RT class.
+	 */
+	param.rt.prio = thread->cprio;
+	__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+}
+
+/**
+ * @ingroup cobalt_core_sched
+ * @defgroup sched_quota SCHED_QUOTA scheduling policy
+ *
+ * The SCHED_QUOTA policy enforces a limitation on the CPU consumption
+ * of threads over a globally defined period, known as the quota
+ * interval. This is done by pooling threads with common requirements
+ * in groups, and giving each group a share of the global period
+ * (CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+ *
+ * When threads have entirely consumed the quota allotted to the group
+ * they belong to, the latter is suspended as a whole, until the next
+ * quota interval starts. At this point, a new runtime budget is
+ * given to each group, in accordance with its share.
+ *
+ *@{
+ */
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r)
+{
+	int tgid, nr_groups = CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS;
+	struct xnsched_quota *qs = &sched->quota;
+
+	atomic_only();
+
+	tgid = find_first_zero_bit(group_map, nr_groups);
+	if (tgid >= nr_groups)
+		return -ENOSPC;
+
+	__set_bit(tgid, group_map);
+	tg->tgid = tgid;
+	tg->sched = sched;
+	tg->run_budget_ns = qs->period_ns;
+	tg->run_credit_ns = 0;
+	tg->quota_percent = 100;
+	tg->quota_peak_percent = 100;
+	tg->quota_ns = qs->period_ns;
+	tg->quota_peak_ns = qs->period_ns;
+	tg->nr_active = 0;
+	tg->nr_threads = 0;
+	INIT_LIST_HEAD(&tg->members);
+	INIT_LIST_HEAD(&tg->expired);
+
+	trace_cobalt_schedquota_create_group(tg);
+
+	if (list_empty(&qs->groups))
+		xntimer_start(&qs->refill_timer,
+			      qs->period_ns, qs->period_ns, XN_RELATIVE);
+
+	list_add(&tg->next, &qs->groups);
+	*quota_sum_r = quota_sum_all(qs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_create_group);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force, int *quota_sum_r)
+{
+	struct xnsched_quota *qs = &tg->sched->quota;
+	union xnsched_policy_param param;
+	struct xnthread *thread, *tmp;
+
+	atomic_only();
+
+	if (!list_empty(&tg->members)) {
+		if (!force)
+			return -EBUSY;
+		/* Move group members to the rt class. */
+		list_for_each_entry_safe(thread, tmp, &tg->members, quota_next) {
+			param.rt.prio = thread->cprio;
+			__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+		}
+	}
+
+	trace_cobalt_schedquota_destroy_group(tg);
+
+	list_del(&tg->next);
+	__clear_bit(tg->tgid, group_map);
+
+	if (list_empty(&qs->groups))
+		xntimer_stop(&qs->refill_timer);
+
+	if (quota_sum_r)
+		*quota_sum_r = quota_sum_all(qs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_destroy_group);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r)
+{
+	struct xnsched *sched = tg->sched;
+	struct xnsched_quota *qs = &sched->quota;
+	xnticks_t old_quota_ns = tg->quota_ns;
+	struct xnthread *thread, *tmp, *curr;
+	xnticks_t now, elapsed, consumed;
+
+	atomic_only();
+
+	trace_cobalt_schedquota_set_limit(tg, quota_percent,
+					  quota_peak_percent);
+
+	if (quota_percent < 0 || quota_percent > 100) { /* Quota off. */
+		quota_percent = 100;
+		tg->quota_ns = qs->period_ns;
+	} else
+		tg->quota_ns = xnarch_div64(qs->period_ns * quota_percent, 100);
+
+	if (quota_peak_percent < quota_percent)
+		quota_peak_percent = quota_percent;
+
+	if (quota_peak_percent < 0 || quota_peak_percent > 100) {
+		quota_peak_percent = 100;
+		tg->quota_peak_ns = qs->period_ns;
+	} else
+		tg->quota_peak_ns = xnarch_div64(qs->period_ns * quota_peak_percent, 100);
+
+	tg->quota_percent = quota_percent;
+	tg->quota_peak_percent = quota_peak_percent;
+
+	curr = sched->curr;
+	if (curr->quota == tg &&
+	    xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0) {
+		now = xnclock_read_monotonic(&nkclock);
+
+		elapsed = now - tg->run_start_ns;
+		if (elapsed < tg->run_budget_ns)
+			tg->run_budget_ns -= elapsed;
+		else
+			tg->run_budget_ns = 0;
+
+		tg->run_start_ns = now;
+
+		xntimer_stop(&qs->limit_timer);
+	}
+
+	if (tg->run_budget_ns <= old_quota_ns)
+		consumed = old_quota_ns - tg->run_budget_ns;
+	else
+		consumed = 0;
+	if (tg->quota_ns >= consumed)
+		tg->run_budget_ns = tg->quota_ns - consumed;
+	else
+		tg->run_budget_ns = 0;
+
+	tg->run_credit_ns = 0;	/* Drop accumulated credit. */
+
+	*quota_sum_r = quota_sum_all(qs);
+
+	if (tg->run_budget_ns > 0) {
+		list_for_each_entry_safe_reverse(thread, tmp, &tg->expired,
+						 quota_expired) {
+			list_del_init(&thread->quota_expired);
+			xnsched_addq(&sched->rt.runnable, thread);
+		}
+	}
+
+	/*
+	 * Apply the new budget immediately, in case a member of this
+	 * group is currently running.
+	 */
+	xnsched_set_resched(sched);
+	xnsched_run();
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_set_limit);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid)
+{
+	struct xnsched_quota_group *tg;
+
+	atomic_only();
+
+	if (list_empty(&sched->quota.groups))
+		return NULL;
+
+	list_for_each_entry(tg, &sched->quota.groups, next) {
+		if (tg->tgid == tgid)
+			return tg;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_find_group);
+
+int xnsched_quota_sum_all(struct xnsched *sched)
+{
+	struct xnsched_quota *qs = &sched->quota;
+
+	atomic_only();
+
+	return quota_sum_all(qs);
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_sum_all);
+
+/** @} */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_quota_vfroot;
+
+struct vfile_sched_quota_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_quota_data {
+	int cpu;
+	pid_t pid;
+	int prio;
+	int tgid;
+	xnticks_t budget;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_quota_ops;
+
+static struct xnvfile_snapshot vfile_sched_quota = {
+	.privsz = sizeof(struct vfile_sched_quota_priv),
+	.datasz = sizeof(struct vfile_sched_quota_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_quota_ops,
+};
+
+static int vfile_sched_quota_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_quota.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_quota_next(struct xnvfile_snapshot_iterator *it,
+				  void *data)
+{
+	struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_quota_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_quota)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->tgid = thread->quota->tgid;
+	p->prio = thread->cprio;
+	p->budget = thread->quota->run_budget_ns;
+
+	return 1;
+}
+
+static int vfile_sched_quota_show(struct xnvfile_snapshot_iterator *it,
+				  void *data)
+{
+	struct vfile_sched_quota_data *p = data;
+	char buf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %-4s %-10s %s\n",
+			       "CPU", "PID", "TGID", "PRI", "BUDGET", "NAME");
+	else {
+		xntimer_format_time(p->budget, buf, sizeof(buf));
+		xnvfile_printf(it, "%3u  %-6d %-4d %-4d %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       p->tgid,
+			       p->prio,
+			       buf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_quota_ops = {
+	.rewind = vfile_sched_quota_rewind,
+	.next = vfile_sched_quota_next,
+	.show = vfile_sched_quota_show,
+};
+
+static int xnsched_quota_init_vfile(struct xnsched_class *schedclass,
+				    struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_quota_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_quota,
+				     &sched_quota_vfroot);
+}
+
+static void xnsched_quota_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_quota);
+	xnvfile_destroy_dir(&sched_quota_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_quota = {
+	.sched_init		=	xnsched_quota_init,
+	.sched_enqueue		=	xnsched_quota_enqueue,
+	.sched_dequeue		=	xnsched_quota_dequeue,
+	.sched_requeue		=	xnsched_quota_requeue,
+	.sched_pick		=	xnsched_quota_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	xnsched_quota_migrate,
+	.sched_chkparam		=	xnsched_quota_chkparam,
+	.sched_setparam		=	xnsched_quota_setparam,
+	.sched_getparam		=	xnsched_quota_getparam,
+	.sched_trackprio	=	xnsched_quota_trackprio,
+	.sched_protectprio	=	xnsched_quota_protectprio,
+	.sched_forget		=	xnsched_quota_forget,
+	.sched_kick		=	xnsched_quota_kick,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_quota_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_quota_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(3),
+	.policy			=	SCHED_QUOTA,
+	.name			=	"quota"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_quota);
+++ linux-patched/kernel/xenomai/sched-rt.c	2022-03-21 12:58:28.810894607 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/time.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+
+static void xnsched_rt_init(struct xnsched *sched)
+{
+	xnsched_initq(&sched->rt.runnable);
+}
+
+static void xnsched_rt_requeue(struct xnthread *thread)
+{
+	/*
+	 * Put back at same place: i.e. requeue to head of current
+	 * priority group (i.e. LIFO, used for preemption handling).
+	 */
+	__xnsched_rt_requeue(thread);
+}
+
+static void xnsched_rt_enqueue(struct xnthread *thread)
+{
+	/*
+	 * Enqueue for next pick: i.e. move to end of current priority
+	 * group (i.e. FIFO).
+	 */
+	__xnsched_rt_enqueue(thread);
+}
+
+static void xnsched_rt_dequeue(struct xnthread *thread)
+{
+	/*
+	 * Pull from the runnable thread queue.
+	 */
+	__xnsched_rt_dequeue(thread);
+}
+
+static void xnsched_rt_rotate(struct xnsched *sched,
+			      const union xnsched_policy_param *p)
+{
+	struct xnthread *thread, *curr;
+
+	if (xnsched_emptyq_p(&sched->rt.runnable))
+		return;	/* No runnable thread in this class. */
+
+	curr = sched->curr;
+
+	if (p->rt.prio == XNSCHED_RUNPRIO)
+		thread = curr;
+	else {
+		thread = xnsched_findq(&sched->rt.runnable, p->rt.prio);
+		if (thread == NULL)
+			return;
+	}
+
+	/*
+	 * In case we picked the current thread, we have to make sure
+	 * not to move it back to the run queue if it was blocked
+	 * before we were called. The same goes if the current thread
+	 * holds the scheduler lock.
+	 */
+	if (thread != curr ||
+	    (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS) &&
+	     curr->lock_count == 0))
+		xnsched_putback(thread);
+}
+
+void xnsched_rt_tick(struct xnsched *sched)
+{
+	/*
+	 * The round-robin time credit is only consumed by a running
+	 * thread that neither holds the scheduler lock nor was
+	 * blocked before entering this callback. As the time slice is
+	 * exhausted for the running thread, move it back to the
+	 * run queue at the end of its priority group.
+	 */
+	xnsched_putback(sched->curr);
+}
+
+static bool xnsched_rt_setparam(struct xnthread *thread,
+				const union xnsched_policy_param *p)
+{
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static void xnsched_rt_getparam(struct xnthread *thread,
+				union xnsched_policy_param *p)
+{
+	__xnsched_rt_getparam(thread, p);
+}
+
+static void xnsched_rt_trackprio(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	__xnsched_rt_trackprio(thread, p);
+}
+
+static void xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	__xnsched_rt_protectprio(thread, prio);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_rt_vfroot;
+
+struct vfile_sched_rt_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_rt_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	xnticks_t period;
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_rt_ops;
+
+static struct xnvfile_snapshot vfile_sched_rt = {
+	.privsz = sizeof(struct vfile_sched_rt_priv),
+	.datasz = sizeof(struct vfile_sched_rt_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_rt_ops,
+};
+
+static int vfile_sched_rt_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_rt.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_rt_next(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_rt_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_rt ||
+	    xnthread_test_state(thread, XNWEAK))
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+	p->period = xnthread_get_period(thread);
+
+	return 1;
+}
+
+static int vfile_sched_rt_show(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_rt_data *p = data;
+	char pribuf[16], ptbuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-8s %-10s %s\n",
+			       "CPU", "PID", "PRI", "PERIOD", "NAME");
+	else {
+		ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio);
+		xntimer_format_time(p->period, ptbuf, sizeof(ptbuf));
+		xnvfile_printf(it, "%3u  %-6d %-8s %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       pribuf,
+			       ptbuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_rt_ops = {
+	.rewind = vfile_sched_rt_rewind,
+	.next = vfile_sched_rt_next,
+	.show = vfile_sched_rt_show,
+};
+
+static int xnsched_rt_init_vfile(struct xnsched_class *schedclass,
+				 struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_rt_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_rt,
+				     &sched_rt_vfroot);
+}
+
+static void xnsched_rt_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_rt);
+	xnvfile_destroy_dir(&sched_rt_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_rt = {
+	.sched_init		=	xnsched_rt_init,
+	.sched_enqueue		=	xnsched_rt_enqueue,
+	.sched_dequeue		=	xnsched_rt_dequeue,
+	.sched_requeue		=	xnsched_rt_requeue,
+	.sched_pick		=	xnsched_rt_pick,
+	.sched_tick		=	xnsched_rt_tick,
+	.sched_rotate		=	xnsched_rt_rotate,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_declare		=	NULL,
+	.sched_setparam		=	xnsched_rt_setparam,
+	.sched_trackprio	=	xnsched_rt_trackprio,
+	.sched_protectprio	=	xnsched_rt_protectprio,
+	.sched_getparam		=	xnsched_rt_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_rt_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_rt_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(4),
+	.policy			=	SCHED_FIFO,
+	.name			=	"rt"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_rt);
+++ linux-patched/kernel/xenomai/time.c	2022-03-21 12:58:28.803894676 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/clock.c	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm-generic/xenomai/syscall.h>
+#include <cobalt/kernel/time.h>
+#include <linux/compat.h>
+
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts)
+{
+	struct __kernel_timespec kts;
+	int ret;
+
+	ret = cobalt_copy_from_user(&kts, uts, sizeof(kts));
+	if (ret)
+		return -EFAULT;
+
+	ts->tv_sec = kts.tv_sec;
+
+	/* Zero out the padding in compat mode */
+	if (in_compat_syscall())
+		kts.tv_nsec &= 0xFFFFFFFFUL;
+
+	/* In 32-bit mode, this drops the padding */
+	ts->tv_nsec = kts.tv_nsec;
+
+	return 0;
+}
+
+int cobalt_put_timespec64(const struct timespec64 *ts,
+		   struct __kernel_timespec __user *uts)
+{
+	struct __kernel_timespec kts = {
+		.tv_sec = ts->tv_sec,
+		.tv_nsec = ts->tv_nsec
+	};
+
+	return cobalt_copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
+}
+++ linux-patched/kernel/xenomai/clock.c	2022-03-21 12:58:28.792894783 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/registry.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/percpu.h>
+#include <linux/errno.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/uapi/time.h>
+#include <asm/xenomai/calibration.h>
+#include <trace/events/cobalt-core.h>
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_clock Clock services
+ *
+ * @{
+ */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+static struct xnarch_u32frac bln_frac;
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem)
+{
+	unsigned long long q;
+	unsigned r;
+
+	q = xnarch_nodiv_ullimd(value, bln_frac.frac, bln_frac.integ);
+	r = value - q * 1000000000;
+	if (r >= 1000000000) {
+		++q;
+		r -= 1000000000;
+	}
+	*rem = r;
+	return q;
+}
+
+#else
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem)
+{
+	return xnarch_ulldiv(value, 1000000000, rem);
+
+}
+
+#endif /* !XNARCH_HAVE_NODIV_LLIMD */
+
+EXPORT_SYMBOL_GPL(xnclock_divrem_billion);
+
+DEFINE_PRIVATE_XNLOCK(ratelimit_lock);
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func)
+{
+	spl_t s;
+	int ret;
+
+	if (!rs->interval)
+		return 1;
+
+	xnlock_get_irqsave(&ratelimit_lock, s);
+
+	if (!rs->begin)
+		rs->begin = xnclock_read_realtime(&nkclock);
+	if (xnclock_read_realtime(&nkclock) >= rs->begin + rs->interval) {
+		if (rs->missed)
+			printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+			       func, rs->missed);
+		rs->begin   = 0;
+		rs->printed = 0;
+		rs->missed  = 0;
+	}
+	if (rs->burst && rs->burst > rs->printed) {
+		rs->printed++;
+		ret = 1;
+	} else {
+		rs->missed++;
+		ret = 0;
+	}
+	xnlock_put_irqrestore(&ratelimit_lock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__xnclock_ratelimit);
+
+void xnclock_core_local_shot(struct xnsched *sched)
+{
+	struct xntimerdata *tmd;
+	struct xntimer *timer;
+	xnsticks_t delay;
+	xntimerh_t *h;
+
+	/*
+	 * Do not reprogram locally when inside the tick handler -
+	 * will be done on exit anyway. Also exit if there is no
+	 * pending timer.
+	 */
+	if (sched->status & XNINTCK)
+		return;
+
+	/*
+	 * Assume the core clock device always has percpu semantics in
+	 * SMP.
+	 */
+	tmd = xnclock_this_timerdata(&nkclock);
+	h = xntimerq_head(&tmd->q);
+	if (h == NULL) {
+		sched->lflags |= XNIDLE;
+		return;
+	}
+
+	/*
+	 * Here we try to defer the host tick heading the timer queue,
+	 * so that it does not preempt a real-time activity uselessly,
+	 * in two cases:
+	 *
+	 * 1) a rescheduling is pending for the current CPU. We may
+	 * assume that a real-time thread is about to resume, so we
+	 * want to move the host tick out of the way until the host
+	 * kernel resumes, unless there is no other outstanding
+	 * timers.
+	 *
+	 * 2) the current thread is running in primary mode, in which
+	 * case we may also defer the host tick until the host kernel
+	 * resumes.
+	 *
+	 * The host tick deferral is cleared whenever Xenomai is about
+	 * to yield control to the host kernel (see ___xnsched_run()),
+	 * or a timer with an earlier timeout date is scheduled,
+	 * whichever comes first.
+	 */
+	sched->lflags &= ~(XNHDEFER|XNIDLE|XNTSTOP);
+	timer = container_of(h, struct xntimer, aplink);
+	if (unlikely(timer == &sched->htimer)) {
+		if (xnsched_resched_p(sched) ||
+		    !xnthread_test_state(sched->curr, XNROOT)) {
+			h = xntimerq_second(&tmd->q, h);
+			if (h) {
+				sched->lflags |= XNHDEFER;
+				timer = container_of(h, struct xntimer, aplink);
+			}
+		}
+	}
+
+	delay = xntimerh_date(&timer->aplink) - xnclock_core_read_raw();
+	if (delay < 0)
+		delay = 0;
+	else if (delay > ULONG_MAX)
+		delay = ULONG_MAX;
+
+	xntrace_tick((unsigned)delay);
+
+	pipeline_set_timer_shot(delay);
+}
+
+#ifdef CONFIG_SMP
+void xnclock_core_remote_shot(struct xnsched *sched)
+{
+	pipeline_send_timer_ipi(cpumask_of(xnsched_cpu(sched)));
+}
+#endif
+
+static void adjust_timer(struct xntimer *timer, xntimerq_t *q,
+			 xnsticks_t delta)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xnticks_t period, div;
+	xnsticks_t diff;
+
+	xntimerh_date(&timer->aplink) -= delta;
+
+	if (xntimer_periodic_p(timer) == 0)
+		goto enqueue;
+
+	timer->start_date -= delta;
+	period = xntimer_interval(timer);
+	diff = xnclock_ticks_to_ns(clock,
+		xnclock_read_raw(clock) - xntimer_expiry(timer));
+
+	if ((xnsticks_t)(diff - period) >= 0) {
+		/*
+		 * Timer should tick several times before now, instead
+		 * of calling timer->handler several times, we change
+		 * the timer date without changing its pexpect, so
+		 * that timer will tick only once and the lost ticks
+		 * will be counted as overruns.
+		 */
+		div = xnarch_div64(diff, period);
+		timer->periodic_ticks += div;
+		xntimer_update_date(timer);
+	} else if (delta < 0
+		   && (timer->status & XNTIMER_FIRED)
+		   && (xnsticks_t) (diff + period) <= 0) {
+		/*
+		 * Timer is periodic and NOT waiting for its first
+		 * shot, so we make it tick sooner than its original
+		 * date in order to avoid the case where by adjusting
+		 * time to a sooner date, real-time periodic timers do
+		 * not tick until the original date has passed.
+		 */
+		div = xnarch_div64(-diff, period);
+		timer->periodic_ticks -= div;
+		timer->pexpect_ticks -= div;
+		xntimer_update_date(timer);
+	}
+
+enqueue:
+	xntimer_enqueue(timer, q);
+}
+
+void xnclock_apply_offset(struct xnclock *clock, xnsticks_t delta_ns)
+{
+	struct xntimer *timer, *tmp;
+	struct list_head adjq;
+	struct xnsched *sched;
+	xnsticks_t delta;
+	xntimerq_it_t it;
+	unsigned int cpu;
+	xntimerh_t *h;
+	xntimerq_t *q;
+
+	atomic_only();
+
+	/*
+	 * The (real-time) epoch just changed for the clock. Since
+	 * timeout dates of timers are expressed as monotonic ticks
+	 * internally, we need to apply the new offset to the
+	 * monotonic clock to all outstanding timers based on the
+	 * affected clock.
+	 */
+	INIT_LIST_HEAD(&adjq);
+	delta = xnclock_ns_to_ticks(clock, delta_ns);
+
+	for_each_online_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		q = &xnclock_percpu_timerdata(clock, cpu)->q;
+
+		for (h = xntimerq_it_begin(q, &it); h;
+		     h = xntimerq_it_next(q, &it, h)) {
+			timer = container_of(h, struct xntimer, aplink);
+			if (timer->status & XNTIMER_REALTIME)
+				list_add_tail(&timer->adjlink, &adjq);
+		}
+
+		if (list_empty(&adjq))
+			continue;
+
+		list_for_each_entry_safe(timer, tmp, &adjq, adjlink) {
+			list_del(&timer->adjlink);
+			xntimer_dequeue(timer, q);
+			adjust_timer(timer, q, delta);
+		}
+
+		if (sched != xnsched_current())
+			xnclock_remote_shot(clock, sched);
+		else
+			xnclock_program_shot(clock, sched);
+	}
+}
+EXPORT_SYMBOL_GPL(xnclock_apply_offset);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns)
+{
+	xnsticks_t old_offset_ns, offset_ns;
+	spl_t s;
+
+	/*
+	 * The epoch of CLOCK_REALTIME just changed. Since timeouts
+	 * are expressed as monotonic ticks, we need to apply the
+	 * wallclock-to-monotonic offset to all outstanding timers
+	 * based on this clock.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	old_offset_ns = nkclock.wallclock_offset;
+	offset_ns = (xnsticks_t)(epoch_ns - xnclock_core_read_monotonic());
+	nkclock.wallclock_offset = offset_ns;
+	nkvdso->wallclock_offset = offset_ns;
+	xnclock_apply_offset(&nkclock, offset_ns - old_offset_ns);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnclock_set_wallclock);
+
+xnticks_t xnclock_core_read_monotonic(void)
+{
+	return xnclock_core_ticks_to_ns(xnclock_core_read_raw());
+}
+EXPORT_SYMBOL_GPL(xnclock_core_read_monotonic);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static struct xnvfile_directory timerlist_vfroot;
+
+static struct xnvfile_snapshot_ops timerlist_ops;
+
+struct vfile_clock_priv {
+	struct xntimer *curr;
+};
+
+struct vfile_clock_data {
+	int cpu;
+	unsigned int scheduled;
+	unsigned int fired;
+	xnticks_t timeout;
+	xnticks_t interval;
+	unsigned long status;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static int timerlist_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_clock_priv *priv = xnvfile_iterator_priv(it);
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+
+	if (list_empty(&clock->timerq))
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&clock->timerq, struct xntimer, next_stat);
+
+	return clock->nrtimers;
+}
+
+static int timerlist_next(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_clock_priv *priv = xnvfile_iterator_priv(it);
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+	struct vfile_clock_data *p = data;
+	struct xntimer *timer;
+
+	if (priv->curr == NULL)
+		return 0;
+
+	timer = priv->curr;
+	if (list_is_last(&timer->next_stat, &clock->timerq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_entry(timer->next_stat.next,
+					struct xntimer, next_stat);
+
+	if (clock == &nkclock && xnstat_counter_get(&timer->scheduled) == 0)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(xntimer_sched(timer));
+	p->scheduled = xnstat_counter_get(&timer->scheduled);
+	p->fired = xnstat_counter_get(&timer->fired);
+	p->timeout = xntimer_get_timeout(timer);
+	p->interval = xntimer_interval(timer);
+	p->status = timer->status;
+	knamecpy(p->name, timer->name);
+
+	return 1;
+}
+
+static int timerlist_show(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_clock_data *p = data;
+	char timeout_buf[]  = "-         ";
+	char interval_buf[] = "-         ";
+	char hit_buf[32];
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-20s  %-10s  %-10s  %s\n",
+			       "CPU", "SCHED/SHOT", "TIMEOUT",
+			       "INTERVAL", "NAME");
+	else {
+		if (p->status & XNTIMER_RUNNING)
+			xntimer_format_time(p->timeout, timeout_buf,
+					    sizeof(timeout_buf));
+		if (p->status & XNTIMER_PERIODIC)
+			xntimer_format_time(p->interval, interval_buf,
+					    sizeof(interval_buf));
+		ksformat(hit_buf, sizeof(hit_buf), "%u/%u",
+			 p->scheduled, p->fired);
+		xnvfile_printf(it,
+			       "%-3u  %-20s  %-10s  %-10s  %s\n",
+			       p->cpu, hit_buf, timeout_buf,
+			       interval_buf, p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops timerlist_ops = {
+	.rewind = timerlist_rewind,
+	.next = timerlist_next,
+	.show = timerlist_show,
+};
+
+static void init_timerlist_proc(struct xnclock *clock)
+{
+	memset(&clock->timer_vfile, 0, sizeof(clock->timer_vfile));
+	clock->timer_vfile.privsz = sizeof(struct vfile_clock_priv);
+	clock->timer_vfile.datasz = sizeof(struct vfile_clock_data);
+	clock->timer_vfile.tag = &clock->timer_revtag;
+	clock->timer_vfile.ops = &timerlist_ops;
+
+	xnvfile_init_snapshot(clock->name, &clock->timer_vfile, &timerlist_vfroot);
+	xnvfile_priv(&clock->timer_vfile) = clock;
+}
+
+static void cleanup_timerlist_proc(struct xnclock *clock)
+{
+	xnvfile_destroy_snapshot(&clock->timer_vfile);
+}
+
+void init_timerlist_root(void)
+{
+	xnvfile_init_dir("timer", &timerlist_vfroot, &cobalt_vfroot);
+}
+
+void cleanup_timerlist_root(void)
+{
+	xnvfile_destroy_dir(&timerlist_vfroot);
+}
+
+#else  /* !CONFIG_XENO_OPT_STATS */
+
+static inline void init_timerlist_root(void) { }
+
+static inline void cleanup_timerlist_root(void) { }
+
+static inline void init_timerlist_proc(struct xnclock *clock) { }
+
+static inline void cleanup_timerlist_proc(struct xnclock *clock) { }
+
+#endif	/* !CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_directory clock_vfroot;
+
+void print_core_clock_status(struct xnclock *clock,
+			     struct xnvfile_regular_iterator *it)
+{
+	const char *wd_status = "off";
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	wd_status = "on";
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+
+	xnvfile_printf(it, "%8s: timer=%s, clock=%s\n",
+		       "devices", pipeline_timer_name(), pipeline_clock_name());
+	xnvfile_printf(it, "%8s: %s\n", "watchdog", wd_status);
+}
+
+static int clock_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+	xnticks_t now = xnclock_read_raw(clock);
+
+	if (clock->id >= 0)	/* External clock, print id. */
+		xnvfile_printf(it, "%7s: %d\n", "id", __COBALT_CLOCK_EXT(clock->id));
+
+	xnvfile_printf(it, "%7s: irq=%Ld kernel=%Ld user=%Ld\n", "gravity",
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, irq)),
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, kernel)),
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, user)));
+
+	xnclock_print_status(clock, it);
+
+	xnvfile_printf(it, "%7s: %Lu (%.4Lx %.4x)\n", "ticks",
+		       now, now >> 32, (u32)(now & -1U));
+
+	return 0;
+}
+
+static ssize_t clock_store(struct xnvfile_input *input)
+{
+	char buf[128], *args = buf, *p;
+	struct xnclock_gravity gravity;
+	struct xnvfile_regular *vfile;
+	unsigned long ns, ticks;
+	struct xnclock *clock;
+	ssize_t nbytes;
+	int ret;
+
+	nbytes = xnvfile_get_string(input, buf, sizeof(buf));
+	if (nbytes < 0)
+		return nbytes;
+
+	vfile = container_of(input->vfile, struct xnvfile_regular, entry);
+	clock = xnvfile_priv(vfile);
+	gravity = clock->gravity;
+
+	while ((p = strsep(&args, " \t:/,")) != NULL) {
+		if (*p == '\0')
+			continue;
+		ns = simple_strtol(p, &p, 10);
+		ticks = xnclock_ns_to_ticks(clock, ns);
+		switch (*p) {
+		case 'i':
+			gravity.irq = ticks;
+			break;
+		case 'k':
+			gravity.kernel = ticks;
+			break;
+		case 'u':
+		case '\0':
+			gravity.user = ticks;
+			break;
+		default:
+			return -EINVAL;
+		}
+		ret = xnclock_set_gravity(clock, &gravity);
+		if (ret)
+			return ret;
+	}
+
+	return nbytes;
+}
+
+static struct xnvfile_regular_ops clock_ops = {
+	.show = clock_show,
+	.store = clock_store,
+};
+
+static void init_clock_proc(struct xnclock *clock)
+{
+	memset(&clock->vfile, 0, sizeof(clock->vfile));
+	clock->vfile.ops = &clock_ops;
+	xnvfile_init_regular(clock->name, &clock->vfile, &clock_vfroot);
+	xnvfile_priv(&clock->vfile) = clock;
+	init_timerlist_proc(clock);
+}
+
+static void cleanup_clock_proc(struct xnclock *clock)
+{
+	cleanup_timerlist_proc(clock);
+	xnvfile_destroy_regular(&clock->vfile);
+}
+
+void xnclock_init_proc(void)
+{
+	xnvfile_init_dir("clock", &clock_vfroot, &cobalt_vfroot);
+	init_timerlist_root();
+}
+
+void xnclock_cleanup_proc(void)
+{
+	xnvfile_destroy_dir(&clock_vfroot);
+	cleanup_timerlist_root();
+}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static inline void init_clock_proc(struct xnclock *clock) { }
+
+static inline void cleanup_clock_proc(struct xnclock *clock) { }
+
+#endif	/* !CONFIG_XENO_OPT_VFILE */
+
+/**
+ * @brief Register a Xenomai clock.
+ *
+ * This service installs a new clock which may be used to drive
+ * Xenomai timers.
+ *
+ * @param clock The new clock to register.
+ *
+ * @param affinity The set of CPUs we may expect the backing clock
+ * device to tick on. As a special case, passing a NULL affinity mask
+ * means that timer IRQs cannot be seen as percpu events, in which
+ * case all outstanding timers will be maintained into a single global
+ * queue instead of percpu timer queues.
+ *
+ * @coretags{secondary-only}
+ */
+int xnclock_register(struct xnclock *clock, const cpumask_t *affinity)
+{
+	struct xntimerdata *tmd;
+	int cpu;
+
+	secondary_mode_only();
+
+#ifdef CONFIG_SMP
+	/*
+	 * A CPU affinity set may be defined for each clock,
+	 * enumerating the CPUs which can receive ticks from the
+	 * backing clock device.  When given, this set must be a
+	 * subset of the real-time CPU set.
+	 */
+	if (affinity) {
+		cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus);
+		if (cpumask_empty(&clock->affinity))
+			return -EINVAL;
+	} else	/* Device is global without particular IRQ affinity. */
+		cpumask_clear(&clock->affinity);
+#endif
+
+	/* Allocate the percpu timer queue slot. */
+	clock->timerdata = alloc_percpu(struct xntimerdata);
+	if (clock->timerdata == NULL)
+		return -ENOMEM;
+
+	/*
+	 * POLA: init all timer slots for the new clock, although some
+	 * of them might remain unused depending on the CPU affinity
+	 * of the event source(s). If the clock device is global
+	 * without any particular IRQ affinity, all timers will be
+	 * queued to CPU0.
+	 */
+	for_each_online_cpu(cpu) {
+		tmd = xnclock_percpu_timerdata(clock, cpu);
+		xntimerq_init(&tmd->q);
+	}
+
+#ifdef CONFIG_XENO_OPT_STATS
+	INIT_LIST_HEAD(&clock->timerq);
+#endif /* CONFIG_XENO_OPT_STATS */
+
+	init_clock_proc(clock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnclock_register);
+
+/**
+ * @fn void xnclock_deregister(struct xnclock *clock)
+ * @brief Deregister a Xenomai clock.
+ *
+ * This service uninstalls a Xenomai clock previously registered with
+ * xnclock_register().
+ *
+ * This service may be called once all timers driven by @a clock have
+ * been stopped.
+ *
+ * @param clock The clock to deregister.
+ *
+ * @coretags{secondary-only}
+ */
+void xnclock_deregister(struct xnclock *clock)
+{
+	struct xntimerdata *tmd;
+	int cpu;
+
+	secondary_mode_only();
+
+	cleanup_clock_proc(clock);
+
+	for_each_online_cpu(cpu) {
+		tmd = xnclock_percpu_timerdata(clock, cpu);
+		XENO_BUG_ON(COBALT, !xntimerq_empty(&tmd->q));
+		xntimerq_destroy(&tmd->q);
+	}
+
+	free_percpu(clock->timerdata);
+}
+EXPORT_SYMBOL_GPL(xnclock_deregister);
+
+/**
+ * @fn void xnclock_tick(struct xnclock *clock)
+ * @brief Process a clock tick.
+ *
+ * This routine processes an incoming @a clock event, firing elapsed
+ * timers as appropriate.
+ *
+ * @param clock The clock for which a new event was received.
+ *
+ * @coretags{coreirq-only, atomic-entry}
+ *
+ * @note The current CPU must be part of the real-time affinity set
+ * unless the clock device has no particular IRQ affinity, otherwise
+ * weird things may happen.
+ */
+void xnclock_tick(struct xnclock *clock)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xntimer *timer;
+	xnsticks_t delta;
+	xntimerq_t *tmq;
+	xnticks_t now;
+	xntimerh_t *h;
+
+	atomic_only();
+
+#ifdef CONFIG_SMP
+	/*
+	 * Some external clock devices may be global without any
+	 * particular IRQ affinity, in which case the associated
+	 * timers will be queued to CPU0.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_EXTCLOCK) &&
+	    clock != &nkclock &&
+	    !cpumask_test_cpu(xnsched_cpu(sched), &clock->affinity))
+		tmq = &xnclock_percpu_timerdata(clock, 0)->q;
+	else
+#endif
+		tmq = &xnclock_this_timerdata(clock)->q;
+
+	/*
+	 * Optimisation: any local timer reprogramming triggered by
+	 * invoked timer handlers can wait until we leave the tick
+	 * handler. Use this status flag as hint to xntimer_start().
+	 */
+	sched->status |= XNINTCK;
+
+	now = xnclock_read_raw(clock);
+	while ((h = xntimerq_head(tmq)) != NULL) {
+		timer = container_of(h, struct xntimer, aplink);
+		delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now);
+		if (delta > 0)
+			break;
+
+		trace_cobalt_timer_expire(timer);
+
+		xntimer_dequeue(timer, tmq);
+		xntimer_account_fired(timer);
+
+		/*
+		 * By postponing the propagation of the low-priority
+		 * host tick to the interrupt epilogue (see
+		 * xnintr_irq_handler()), we save some I-cache, which
+		 * translates into precious microsecs on low-end hw.
+		 */
+		if (unlikely(timer == &sched->htimer)) {
+			sched->lflags |= XNHTICK;
+			sched->lflags &= ~XNHDEFER;
+			if (timer->status & XNTIMER_PERIODIC)
+				goto advance;
+			continue;
+		}
+
+		timer->handler(timer);
+		now = xnclock_read_raw(clock);
+		timer->status |= XNTIMER_FIRED;
+		/*
+		 * Only requeue periodic timers which have not been
+		 * requeued, stopped or killed.
+		 */
+		if ((timer->status &
+		     (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_KILLED|XNTIMER_RUNNING)) !=
+		    (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_RUNNING))
+			continue;
+	advance:
+		do {
+			timer->periodic_ticks++;
+			xntimer_update_date(timer);
+		} while (xntimerh_date(&timer->aplink) < now);
+
+#ifdef CONFIG_SMP
+		/*
+		 * If the timer was migrated over its timeout handler,
+		 * xntimer_migrate() re-queued it already.
+		 */
+		if (unlikely(timer->sched != sched))
+			continue;
+#endif
+		xntimer_enqueue(timer, tmq);
+	}
+
+	sched->status &= ~XNINTCK;
+
+	xnclock_program_shot(clock, sched);
+}
+EXPORT_SYMBOL_GPL(xnclock_tick);
+
+static int set_core_clock_gravity(struct xnclock *clock,
+				  const struct xnclock_gravity *p)
+{
+	nkclock.gravity = *p;
+
+	return 0;
+}
+
+static void reset_core_clock_gravity(struct xnclock *clock)
+{
+	struct xnclock_gravity gravity;
+
+	xnarch_get_latencies(&gravity);
+	if (gravity.kernel == 0)
+		gravity.kernel = gravity.user;
+	set_core_clock_gravity(clock, &gravity);
+}
+
+struct xnclock nkclock = {
+	.name = "coreclk",
+	.resolution = 1,	/* nanosecond. */
+	.ops = {
+		.set_gravity = set_core_clock_gravity,
+		.reset_gravity = reset_core_clock_gravity,
+#ifdef CONFIG_XENO_OPT_VFILE
+		.print_status = print_core_clock_status,
+#endif
+	},
+	.id = -1,
+};
+EXPORT_SYMBOL_GPL(nkclock);
+
+void xnclock_cleanup(void)
+{
+	xnclock_deregister(&nkclock);
+}
+
+int __init xnclock_init()
+{
+	spl_t s;
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	xnarch_init_u32frac(&bln_frac, 1, 1000000000);
+#endif
+	pipeline_init_clock();
+	xnclock_reset_gravity(&nkclock);
+	xnlock_get_irqsave(&nklock, s);
+	nkclock.wallclock_offset = pipeline_read_wallclock() -
+		xnclock_core_read_monotonic();
+	xnlock_put_irqrestore(&nklock, s);
+	xnclock_register(&nkclock, &xnsched_realtime_cpus);
+
+	return 0;
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/registry.c	2022-03-21 12:58:28.776894939 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/lock.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/sirq.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_registry Registry services
+ *
+ * The registry provides a mean to index object descriptors on unique
+ * alphanumeric keys. When labeled this way, an object is globally
+ * exported; it can be searched for, and its descriptor returned to
+ * the caller for further use; the latter operation is called a
+ * "binding". When no object has been registered under the given name
+ * yet, the registry can be asked to set up a rendez-vous, blocking
+ * the caller until the object is eventually registered.
+ *
+ *@{
+ */
+
+struct xnobject *registry_obj_slots;
+EXPORT_SYMBOL_GPL(registry_obj_slots);
+
+static LIST_HEAD(free_object_list); /* Free objects. */
+
+static LIST_HEAD(busy_object_list); /* Active and exported objects. */
+
+static unsigned int nr_active_objects;
+
+static unsigned long next_object_stamp;
+
+static struct hlist_head *object_index;
+
+static int nr_object_entries;
+
+static struct xnsynch register_synch;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#include <linux/workqueue.h>
+
+static void proc_callback(struct work_struct *work);
+
+static irqreturn_t registry_proc_schedule(int virq, void *dev_id);
+
+static LIST_HEAD(proc_object_list);	/* Objects waiting for /proc handling. */
+
+static DECLARE_WORK(registry_proc_work, proc_callback);
+
+static int proc_virq;
+
+static struct xnvfile_directory registry_vfroot;
+
+static int usage_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%u/%u\n",
+		       nr_active_objects,
+		       CONFIG_XENO_OPT_REGISTRY_NRSLOTS);
+	return 0;
+}
+
+static struct xnvfile_regular_ops usage_vfile_ops = {
+	.show = usage_vfile_show,
+};
+
+static struct xnvfile_regular usage_vfile = {
+	.ops = &usage_vfile_ops,
+};
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+unsigned xnregistry_hash_size(void)
+{
+	static const int primes[] = {
+		101, 211, 307, 401, 503, 601,
+		701, 809, 907, 1009, 1103
+	};
+
+#define obj_hash_max(n)			 \
+((n) < sizeof(primes) / sizeof(int) ? \
+ (n) : sizeof(primes) / sizeof(int) - 1)
+
+	return primes[obj_hash_max(CONFIG_XENO_OPT_REGISTRY_NRSLOTS / 100)];
+}
+
+int xnregistry_init(void)
+{
+	int n, ret __maybe_unused;
+
+	registry_obj_slots = kmalloc(CONFIG_XENO_OPT_REGISTRY_NRSLOTS *
+				     sizeof(struct xnobject), GFP_KERNEL);
+	if (registry_obj_slots == NULL)
+		return -ENOMEM;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = xnvfile_init_dir("registry", &registry_vfroot, &cobalt_vfroot);
+	if (ret)
+		return ret;
+
+	ret = xnvfile_init_regular("usage", &usage_vfile, &registry_vfroot);
+	if (ret) {
+		xnvfile_destroy_dir(&registry_vfroot);
+		return ret;
+	}
+
+	proc_virq = pipeline_create_inband_sirq(registry_proc_schedule);
+	if (proc_virq < 0) {
+		xnvfile_destroy_regular(&usage_vfile);
+		xnvfile_destroy_dir(&registry_vfroot);
+		return proc_virq;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	next_object_stamp = 0;
+
+	for (n = 0; n < CONFIG_XENO_OPT_REGISTRY_NRSLOTS; n++) {
+		registry_obj_slots[n].objaddr = NULL;
+		list_add_tail(&registry_obj_slots[n].link, &free_object_list);
+	}
+
+	/* Slot #0 is reserved/invalid. */
+	list_get_entry(&free_object_list, struct xnobject, link);
+	nr_active_objects = 1;
+
+	nr_object_entries = xnregistry_hash_size();
+	object_index = kmalloc(sizeof(*object_index) *
+				      nr_object_entries, GFP_KERNEL);
+
+	if (object_index == NULL) {
+#ifdef CONFIG_XENO_OPT_VFILE
+		xnvfile_destroy_regular(&usage_vfile);
+		xnvfile_destroy_dir(&registry_vfroot);
+		pipeline_delete_inband_sirq(proc_virq);
+#endif /* CONFIG_XENO_OPT_VFILE */
+		return -ENOMEM;
+	}
+
+	for (n = 0; n < nr_object_entries; n++)
+		INIT_HLIST_HEAD(&object_index[n]);
+
+	xnsynch_init(&register_synch, XNSYNCH_FIFO, NULL);
+
+	return 0;
+}
+
+void xnregistry_cleanup(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct hlist_node *enext;
+	struct xnobject *ecurr;
+	struct xnpnode *pnode;
+	int n;
+
+	flush_scheduled_work();
+
+	for (n = 0; n < nr_object_entries; n++)
+		hlist_for_each_entry_safe(ecurr, enext, 
+					&object_index[n], hlink) {
+			pnode = ecurr->pnode;
+			if (pnode == NULL)
+				continue;
+
+			pnode->ops->unexport(ecurr, pnode);
+
+			if (--pnode->entries > 0)
+				continue;
+
+			xnvfile_destroy_dir(&pnode->vdir);
+
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(&pnode->root->vdir);
+		}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	kfree(object_index);
+	xnsynch_destroy(&register_synch);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	pipeline_delete_inband_sirq(proc_virq);
+	flush_scheduled_work();
+	xnvfile_destroy_regular(&usage_vfile);
+	xnvfile_destroy_dir(&registry_vfroot);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	kfree(registry_obj_slots);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static DEFINE_SEMAPHORE(export_mutex);
+
+/*
+ * The following stuff implements the mechanism for delegating
+ * export/unexport requests to/from the /proc interface from the
+ * Xenomai domain to the Linux kernel (i.e. the "lower stage"). This
+ * ends up being a bit complex due to the fact that such requests
+ * might lag enough before being processed by the Linux kernel so that
+ * subsequent requests might just contradict former ones before they
+ * even had a chance to be applied (e.g. export -> unexport in the
+ * Xenomai domain for short-lived objects). This situation and the
+ * like are hopefully properly handled due to a careful
+ * synchronization of operations across domains.
+ */
+static void proc_callback(struct work_struct *work)
+{
+	struct xnvfile_directory *rdir, *dir;
+	const char *rname, *type;
+	struct xnobject *object;
+	struct xnpnode *pnode;
+	int ret;
+	spl_t s;
+
+	down(&export_mutex);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (!list_empty(&proc_object_list)) {
+		object = list_get_entry(&proc_object_list,
+					struct xnobject, link);
+		pnode = object->pnode;
+		type = pnode->dirname;
+		dir = &pnode->vdir;
+		rdir = &pnode->root->vdir;
+		rname = pnode->root->dirname;
+
+		if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED)
+			goto unexport;
+
+		object->vfilp = XNOBJECT_EXPORT_INPROGRESS;
+		list_add_tail(&object->link, &busy_object_list);
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		if (pnode->entries++ == 0) {
+			if (pnode->root->entries++ == 0) {
+				/* Create the root directory on the fly. */
+				ret = xnvfile_init_dir(rname, rdir, &registry_vfroot);
+				if (ret) {
+					xnlock_get_irqsave(&nklock, s);
+					object->pnode = NULL;
+					pnode->root->entries = 0;
+					pnode->entries = 0;
+					continue;
+				}
+			}
+			/* Create the class directory on the fly. */
+			ret = xnvfile_init_dir(type, dir, rdir);
+			if (ret) {
+				if (pnode->root->entries == 1) {
+					pnode->root->entries = 0;
+					xnvfile_destroy_dir(rdir);
+				}
+				xnlock_get_irqsave(&nklock, s);
+				object->pnode = NULL;
+				pnode->entries = 0;
+				continue;
+			}
+		}
+
+		ret = pnode->ops->export(object, pnode);
+		if (ret && --pnode->entries == 0) {
+			xnvfile_destroy_dir(dir);
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(rdir);
+			xnlock_get_irqsave(&nklock, s);
+			object->pnode = NULL;
+		} else
+			xnlock_get_irqsave(&nklock, s);
+
+		continue;
+
+	unexport:
+		object->vfilp = NULL;
+		object->pnode = NULL;
+
+		if (object->vfilp == XNOBJECT_EXPORT_ABORTED)
+			object->objaddr = NULL;
+
+		if (object->objaddr)
+			list_add_tail(&object->link, &busy_object_list);
+		else {
+			/*
+			 * Trap the case where we are unexporting an
+			 * already unregistered object.
+			 */
+			list_add_tail(&object->link, &free_object_list);
+			nr_active_objects--;
+		}
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		pnode->ops->unexport(object, pnode);
+
+		if (--pnode->entries == 0) {
+			xnvfile_destroy_dir(dir);
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(rdir);
+		}
+
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	up(&export_mutex);
+}
+
+static irqreturn_t registry_proc_schedule(int virq, void *dev_id)
+{
+	/*
+	 * schedule_work() will check for us if the work has already
+	 * been scheduled, so just be lazy and submit blindly.
+	 */
+	schedule_work(&registry_proc_work);
+
+	return IRQ_HANDLED;
+}
+
+static int registry_export_vfsnap(struct xnobject *object,
+				  struct xnpnode *pnode)
+{
+	struct xnpnode_snapshot *p;
+	int ret;
+
+	/*
+	 * Make sure to initialize _all_ mandatory vfile fields; most
+	 * of the time we are using sane NULL defaults based on static
+	 * storage for the vfile struct, but here we are building up a
+	 * vfile object explicitly.
+	 */
+	p = container_of(pnode, struct xnpnode_snapshot, node);
+	object->vfile_u.vfsnap.file.datasz = p->vfile.datasz;
+	object->vfile_u.vfsnap.file.privsz = p->vfile.privsz;
+	/*
+	 * Make the vfile refer to the provided tag struct if any,
+	 * otherwise use our default tag space. In the latter case,
+	 * each object family has its own private revision tag.
+	 */
+	object->vfile_u.vfsnap.file.tag = p->vfile.tag ?:
+		&object->vfile_u.vfsnap.tag;
+	object->vfile_u.vfsnap.file.ops = p->vfile.ops;
+	object->vfile_u.vfsnap.file.entry.lockops = p->vfile.lockops;
+
+	ret = xnvfile_init_snapshot(object->key, &object->vfile_u.vfsnap.file,
+				    &pnode->vdir);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.vfsnap.file.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vfsnap(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_snapshot(&object->vfile_u.vfsnap.file);
+}
+
+static void registry_touch_vfsnap(struct xnobject *object)
+{
+	xnvfile_touch(&object->vfile_u.vfsnap.file);
+}
+
+struct xnpnode_ops xnregistry_vfsnap_ops = {
+	.export = registry_export_vfsnap,
+	.unexport = registry_unexport_vfsnap,
+	.touch = registry_touch_vfsnap,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vfsnap_ops);
+
+static int registry_export_vfreg(struct xnobject *object,
+				 struct xnpnode *pnode)
+{
+	struct xnpnode_regular *p;
+	int ret;
+
+	/* See registry_export_vfsnap() for hints. */
+	p = container_of(pnode, struct xnpnode_regular, node);
+	object->vfile_u.vfreg.privsz = p->vfile.privsz;
+	object->vfile_u.vfreg.ops = p->vfile.ops;
+	object->vfile_u.vfreg.entry.lockops = p->vfile.lockops;
+	object->vfile_u.vfreg.entry.refcnt = 0;
+
+	ret = xnvfile_init_regular(object->key, &object->vfile_u.vfreg,
+				   &pnode->vdir);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.vfreg.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vfreg(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_regular(&object->vfile_u.vfreg);
+}
+
+struct xnpnode_ops xnregistry_vfreg_ops = {
+	.export = registry_export_vfreg,
+	.unexport = registry_unexport_vfreg,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vfreg_ops);
+
+static int registry_export_vlink(struct xnobject *object,
+				 struct xnpnode *pnode)
+{
+	struct xnpnode_link *link_desc;
+	char *link_target;
+	int ret;
+
+	link_desc = container_of(pnode, struct xnpnode_link, node);
+	link_target = link_desc->target(object->objaddr);
+	if (link_target == NULL)
+		return -ENOMEM;
+
+	ret = xnvfile_init_link(object->key, link_target,
+				&object->vfile_u.link, &pnode->vdir);
+	kfree(link_target);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.link.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vlink(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_link(&object->vfile_u.link);
+}
+
+struct xnpnode_ops xnregistry_vlink_ops = {
+	.export = registry_export_vlink,
+	.unexport = registry_unexport_vlink,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vlink_ops);
+
+static inline void registry_export_pnode(struct xnobject *object,
+					 struct xnpnode *pnode)
+{
+	object->vfilp = XNOBJECT_EXPORT_SCHEDULED;
+	object->pnode = pnode;
+	list_del(&object->link);
+	list_add_tail(&object->link, &proc_object_list);
+	pipeline_post_sirq(proc_virq);
+}
+
+static inline void registry_unexport_pnode(struct xnobject *object)
+{
+	if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED) {
+		/*
+		 * We might have preempted a v-file read op, so bump
+		 * the object's revtag to make sure the data
+		 * collection is aborted next, if we end up deleting
+		 * the object being read.
+		 */
+		if (object->pnode->ops->touch)
+			object->pnode->ops->touch(object);
+		list_del(&object->link);
+		list_add_tail(&object->link, &proc_object_list);
+		pipeline_post_sirq(proc_virq);
+	} else {
+		/*
+		 * Unexporting before the lower stage has had a chance
+		 * to export. Move back the object to the busyq just
+		 * like if no export had been requested.
+		 */
+		list_del(&object->link);
+		list_add_tail(&object->link, &busy_object_list);
+		object->pnode = NULL;
+		object->vfilp = NULL;
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+static unsigned registry_hash_crunch(const char *key)
+{
+	unsigned int h = 0, g;
+
+#define HQON    24		/* Higher byte position */
+#define HBYTE   0xf0000000	/* Higher nibble on */
+
+	while (*key) {
+		h = (h << 4) + *key++;
+		if ((g = (h & HBYTE)) != 0)
+			h = (h ^ (g >> HQON)) ^ g;
+	}
+
+	return h % nr_object_entries;
+}
+
+static inline int registry_hash_enter(const char *key, struct xnobject *object)
+{
+	struct xnobject *ecurr;
+	unsigned s;
+
+	object->key = key;
+	s = registry_hash_crunch(key);
+
+	hlist_for_each_entry(ecurr, &object_index[s], hlink)
+		if (ecurr == object || strcmp(key, ecurr->key) == 0)
+			return -EEXIST;
+
+	hlist_add_head(&object->hlink, &object_index[s]);
+
+	return 0;
+}
+
+static inline int registry_hash_remove(struct xnobject *object)
+{
+	unsigned int s = registry_hash_crunch(object->key);
+	struct xnobject *ecurr;
+
+	hlist_for_each_entry(ecurr, &object_index[s], hlink)
+		if (ecurr == object) {
+			hlist_del(&ecurr->hlink);
+			return 0;
+		}
+
+	return -ESRCH;
+}
+
+static struct xnobject *registry_hash_find(const char *key)
+{
+	struct xnobject *ecurr;
+
+	hlist_for_each_entry(ecurr, 
+			&object_index[registry_hash_crunch(key)], hlink)
+		if (strcmp(key, ecurr->key) == 0)
+			return ecurr;
+
+	return NULL;
+}
+
+struct registry_wait_context {
+	struct xnthread_wait_context wc;
+	const char *key;
+};
+
+static inline int registry_wakeup_sleepers(const char *key)
+{
+	struct registry_wait_context *rwc;
+	struct xnthread_wait_context *wc;
+	struct xnthread *sleeper, *tmp;
+	int cnt = 0;
+
+	xnsynch_for_each_sleeper_safe(sleeper, tmp, &register_synch) {
+		wc = xnthread_get_wait_context(sleeper);
+		rwc = container_of(wc, struct registry_wait_context, wc);
+		if (*key == *rwc->key && strcmp(key, rwc->key) == 0) {
+			xnsynch_wakeup_this_sleeper(&register_synch, sleeper);
+			++cnt;
+		}
+	}
+
+	return cnt;
+}
+
+/**
+ * @fn int xnregistry_enter(const char *key,void *objaddr,xnhandle_t *phandle,struct xnpnode *pnode)
+ * @brief Register a real-time object.
+ *
+ * This service allocates a new registry slot for an associated
+ * object, and indexes it by an alphanumeric key for later retrieval.
+ *
+ * @param key A valid NULL-terminated string by which the object will
+ * be indexed and later retrieved in the registry. Since it is assumed
+ * that such key is stored into the registered object, it will *not*
+ * be copied but only kept by reference in the registry. Pass an empty
+ * or NULL string if the object shall only occupy a registry slot for
+ * handle-based lookups. The slash character is not accepted in @a key
+ * if @a pnode is non-NULL.
+ *
+ * @param objaddr An opaque pointer to the object to index by @a
+ * key.
+ *
+ * @param phandle A pointer to a generic handle defined by the
+ * registry which will uniquely identify the indexed object, until the
+ * latter is unregistered using the xnregistry_remove() service.
+ *
+ * @param pnode A pointer to an optional /proc node class
+ * descriptor. This structure provides the information needed to
+ * export all objects from the given class through the /proc
+ * filesystem, under the /proc/xenomai/registry entry. Passing NULL
+ * indicates that no /proc support is available for the newly
+ * registered object.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a objaddr is NULL.
+ *
+ * - -EINVAL if @a pnode is non-NULL, and @a key points to a valid
+ * string containing a '/' character.
+ *
+ * - -ENOMEM is returned if the system fails to get enough dynamic
+ * memory from the global real-time heap in order to register the
+ * object.
+ *
+ * - -EEXIST is returned if the @a key is already in use.
+ *
+ * @coretags{unrestricted, might-switch, atomic-entry}
+ */
+int xnregistry_enter(const char *key, void *objaddr,
+		     xnhandle_t *phandle, struct xnpnode *pnode)
+{
+	struct xnobject *object;
+	spl_t s;
+	int ret;
+
+	if (objaddr == NULL ||
+	    (pnode != NULL && key != NULL && strchr(key, '/')))
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&free_object_list)) {
+		ret = -EAGAIN;
+		goto unlock_and_exit;
+	}
+
+	object = list_get_entry(&free_object_list, struct xnobject, link);
+	nr_active_objects++;
+	object->objaddr = objaddr;
+	object->cstamp = ++next_object_stamp;
+	trace_cobalt_registry_enter(key, objaddr);
+#ifdef CONFIG_XENO_OPT_VFILE
+	object->pnode = NULL;
+#endif
+	if (key == NULL || *key == '\0') {
+		object->key = NULL;
+		*phandle = object - registry_obj_slots;
+		ret = 0;
+		goto unlock_and_exit;
+	}
+
+	ret = registry_hash_enter(key, object);
+	if (ret) {
+		nr_active_objects--;
+		list_add_tail(&object->link, &free_object_list);
+		goto unlock_and_exit;
+	}
+
+	list_add_tail(&object->link, &busy_object_list);
+
+	/*
+	 * <!> Make sure the handle is written back before the
+	 * rescheduling takes place.
+	 */
+	*phandle = object - registry_obj_slots;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if (pnode)
+		registry_export_pnode(object, pnode);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	if (registry_wakeup_sleepers(key))
+		xnsched_run();
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_enter);
+
+/**
+ * @fn int xnregistry_bind(const char *key,xnticks_t timeout,int timeout_mode,xnhandle_t *phandle)
+ * @brief Bind to a real-time object.
+ *
+ * This service retrieves the registry handle of a given object
+ * identified by its key. Unless otherwise specified, this service
+ * will block the caller if the object is not registered yet, waiting
+ * for such registration to occur.
+ *
+ * @param key A valid NULL-terminated string which identifies the
+ * object to bind to.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread wait for the object to be registered. This value is a wait
+ * time given as a count of nanoseconds. It can either be relative,
+ * absolute monotonic (XN_ABSOLUTE), or absolute adjustable
+ * (XN_REALTIME) depending on @a timeout_mode. Passing XN_INFINITE @b
+ * and setting @a timeout_mode to XN_RELATIVE specifies an unbounded
+ * wait. Passing XN_NONBLOCK causes the service to return immediately
+ * without waiting if the object is not registered on entry. All other
+ * values are used as a wait limit.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @param phandle A pointer to a memory location which will be written
+ * upon success with the generic handle defined by the registry for
+ * the retrieved object. Contents of this memory is undefined upon
+ * failure.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a key is NULL.
+ *
+ * - -EINTR is returned if xnthread_unblock() has been called for the
+ * waiting thread before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to XN_NONBLOCK
+ * and the searched object is not registered on entry. As a special
+ * exception, this error is also returned if this service should
+ * block, but was called from a context which cannot sleep
+ * (e.g. interrupt, non-realtime or scheduler locked).
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note xnregistry_bind() only returns the index portion of a handle,
+ * which might include other fixed bits to be complete
+ * (e.g. XNSYNCH_PSHARED). The caller is responsible for completing
+ * the handle returned with those bits if applicable, depending on the
+ * context.
+ */
+int xnregistry_bind(const char *key, xnticks_t timeout, int timeout_mode,
+		    xnhandle_t *phandle)
+{
+	struct registry_wait_context rwc;
+	struct xnobject *object;
+	int ret = 0, info;
+	spl_t s;
+
+	if (key == NULL)
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (timeout_mode == XN_RELATIVE &&
+	    timeout != XN_INFINITE && timeout != XN_NONBLOCK) {
+		timeout_mode = XN_ABSOLUTE;
+		timeout += xnclock_read_monotonic(&nkclock);
+	}
+
+	for (;;) {
+		object = registry_hash_find(key);
+		if (object) {
+			*phandle = object - registry_obj_slots;
+			goto unlock_and_exit;
+		}
+
+		if ((timeout_mode == XN_RELATIVE && timeout == XN_NONBLOCK) ||
+		    xnsched_unblockable_p()) {
+			ret = -EWOULDBLOCK;
+			goto unlock_and_exit;
+		}
+
+		rwc.key = key;
+		xnthread_prepare_wait(&rwc.wc);
+		info = xnsynch_sleep_on(&register_synch, timeout, timeout_mode);
+		if (info & XNTIMEO) {
+			ret = -ETIMEDOUT;
+			goto unlock_and_exit;
+		}
+		if (info & XNBREAK) {
+			ret = -EINTR;
+			goto unlock_and_exit;
+		}
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_bind);
+
+/**
+ * @fn int xnregistry_remove(xnhandle_t handle)
+ * @brief Forcibly unregister a real-time object.
+ *
+ * This service forcibly removes an object from the registry. The
+ * removal is performed regardless of the current object's locking
+ * status.
+ *
+ * @param handle The generic handle of the object to remove.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -ESRCH is returned if @a handle does not reference a registered
+ * object.
+ *
+ * @coretags{unrestricted}
+ */
+int xnregistry_remove(xnhandle_t handle)
+{
+	struct xnobject *object;
+	void *objaddr;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	object = xnregistry_validate(handle);
+	if (object == NULL) {
+		ret = -ESRCH;
+		goto unlock_and_exit;
+	}
+
+	trace_cobalt_registry_remove(object->key, object->objaddr);
+
+	objaddr = object->objaddr;
+	object->objaddr = NULL;
+	object->cstamp = 0;
+
+	if (object->key) {
+		registry_hash_remove(object);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+		if (object->pnode) {
+			if (object->vfilp == XNOBJECT_EXPORT_INPROGRESS) {
+				object->vfilp = XNOBJECT_EXPORT_ABORTED;
+				object->objaddr = objaddr;
+			}
+
+			registry_unexport_pnode(object);
+			/*
+			 * Leave the update of the object queues to
+			 * the work callback if it has been kicked.
+			 */
+			if (object->pnode) {
+				xnlock_put_irqrestore(&nklock, s);
+				if (is_secondary_domain())
+					flush_work(&registry_proc_work);
+				return 0;
+			}
+		}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+		list_del(&object->link);
+	}
+
+	if (!IS_ENABLED(CONFIG_XENO_OPT_VFILE) || !object->objaddr) {
+		list_add_tail(&object->link, &free_object_list);
+		nr_active_objects--;
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_remove);
+
+/**
+ * Turn a named object into an anonymous object
+ *
+ * @coretags{unrestricted}
+ */
+int xnregistry_unlink(const char *key)
+{
+	struct xnobject *object;
+	int ret = 0;
+	spl_t s;
+
+	if (key == NULL)
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	object = registry_hash_find(key);
+	if (object == NULL) {
+		ret = -ESRCH;
+		goto unlock_and_exit;
+	}
+
+	trace_cobalt_registry_unlink(object->key, object->objaddr);
+
+	ret = registry_hash_remove(object);
+	if (ret < 0)
+		goto unlock_and_exit;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if (object->pnode) {
+		registry_unexport_pnode(object);
+		/*
+		 * Leave the update of the object queues to
+		 * the work callback if it has been kicked.
+		 */
+		if (object->pnode)
+			goto unlock_and_exit;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	list_del(&object->link);
+
+	object->key = NULL;
+
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+/**
+ * @fn void *xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
+ * @brief Find a real-time object into the registry.
+ *
+ * This service retrieves an object from its handle into the registry
+ * and returns the memory address of its descriptor. Optionally, it
+ * also copies back the object's creation stamp which is unique across
+ * object registration calls.
+ *
+ * @param handle The generic handle of the object to fetch.
+ *
+ * @param cstamp_r If not-NULL, the object's creation stamp will be
+ * copied to this memory area.
+ *
+ * @return The memory address of the object's descriptor is returned
+ * on success. Otherwise, NULL is returned if @a handle does not
+ * reference a registered object.
+ *
+ * @coretags{unrestricted}
+ */
+
+/** @} */
+++ linux-patched/kernel/xenomai/lock.c	2022-03-21 12:58:28.761895085 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipe.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_lock Locking services
+ *
+ * The Xenomai core deals with concurrent activities from two distinct
+ * kernels running side-by-side. When interrupts are involved, the
+ * services from this section control the @b hard interrupt state
+ * exclusively, for protecting against processor-local or SMP
+ * concurrency.
+ *
+ * @note In a dual kernel configuration, <i>hard interrupts</i> are
+ * gated by the CPU. When enabled, hard interrupts are immediately
+ * delivered to the Xenomai core if they belong to a real-time source,
+ * or deferred until enabled by a second-stage virtual interrupt mask,
+ * if they belong to regular Linux devices/sources.
+ *
+ * @{
+ */
+DEFINE_XNLOCK(nklock);
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+EXPORT_SYMBOL_GPL(nklock);
+
+#ifdef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	return ____xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+}
+EXPORT_SYMBOL_GPL(___xnlock_get);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	____xnlock_put(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+}
+EXPORT_SYMBOL_GPL(___xnlock_put);
+#endif /* out of line xnlock */
+#endif /* CONFIG_SMP || XENO_DEBUG(LOCKING) */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+DEFINE_PER_CPU(struct xnlockinfo, xnlock_stats);
+EXPORT_PER_CPU_SYMBOL_GPL(xnlock_stats);
+#endif
+
+/** @} */
+++ linux-patched/kernel/xenomai/pipe.c	2022-03-21 12:58:28.746895231 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/debug.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2005 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/termios.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/xenomai/syscall.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/pipe.h>
+#include <pipeline/sirq.h>
+
+static int xnpipe_asyncsig = SIGIO;
+
+struct xnpipe_state xnpipe_states[XNPIPE_NDEVS];
+EXPORT_SYMBOL_GPL(xnpipe_states);
+
+#define XNPIPE_BITMAP_SIZE	((XNPIPE_NDEVS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+static unsigned long xnpipe_bitmap[XNPIPE_BITMAP_SIZE];
+
+static LIST_HEAD(xnpipe_sleepq);
+
+static LIST_HEAD(xnpipe_asyncq);
+
+static int xnpipe_wakeup_virq;
+
+static struct class *xnpipe_class;
+
+/* Allocation of minor values */
+
+static inline int xnpipe_minor_alloc(int minor)
+{
+	spl_t s;
+
+	if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (minor == XNPIPE_MINOR_AUTO)
+		minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);
+
+	if (minor == XNPIPE_NDEVS ||
+	    (xnpipe_bitmap[minor / BITS_PER_LONG] &
+	     (1UL << (minor % BITS_PER_LONG))))
+		minor = -EBUSY;
+	else
+		xnpipe_bitmap[minor / BITS_PER_LONG] |=
+			(1UL << (minor % BITS_PER_LONG));
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return minor;
+}
+
+static inline void xnpipe_minor_free(int minor)
+{
+	xnpipe_bitmap[minor / BITS_PER_LONG] &=
+		~(1UL << (minor % BITS_PER_LONG));
+}
+
+static inline void xnpipe_enqueue_wait(struct xnpipe_state *state, int mask)
+{
+	if (state->wcount != 0x7fffffff && state->wcount++ == 0)
+		list_add_tail(&state->slink, &xnpipe_sleepq);
+
+	state->status |= mask;
+}
+
+static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
+{
+	if (state->status & mask)
+		if (--state->wcount == 0) {
+			list_del(&state->slink);
+			state->status &= ~mask;
+		}
+}
+
+static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask)
+{
+	if (state->status & mask) {
+		if (state->wcount) {
+			state->wcount = 0;
+			list_del(&state->slink);
+			state->status &= ~mask;
+		}
+	}
+}
+
+/* Must be entered with nklock held, interrupts off. */
+#define xnpipe_wait(__state, __mask, __s, __cond)			\
+({									\
+	wait_queue_head_t *__waitq;					\
+	DEFINE_WAIT(__wait);						\
+	int __sigpending;						\
+									\
+	if ((__mask) & XNPIPE_USER_WREAD)				\
+		__waitq = &(__state)->readq;				\
+	else								\
+		__waitq = &(__state)->syncq;				\
+									\
+	xnpipe_enqueue_wait(__state, __mask);				\
+	xnlock_put_irqrestore(&nklock, __s);				\
+									\
+	for (;;) {							\
+		__sigpending = signal_pending(current);			\
+		if (__sigpending)					\
+			break;						\
+		prepare_to_wait_exclusive(__waitq, &__wait, TASK_INTERRUPTIBLE); \
+		if (__cond || (__state)->status & XNPIPE_KERN_LCLOSE)	\
+			break;						\
+		schedule();						\
+	}								\
+									\
+	finish_wait(__waitq, &__wait);					\
+									\
+	/* Restore the interrupt state initially set by the caller. */	\
+	xnlock_get_irqsave(&nklock, __s);				\
+	xnpipe_dequeue_wait(__state, __mask);				\
+									\
+	__sigpending;							\
+})
+
+static irqreturn_t xnpipe_wakeup_proc(int sirq, void *dev_id)
+{
+	struct xnpipe_state *state;
+	unsigned long rbits;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * NOTE: sleepers might enter/leave the queue while we don't
+	 * hold the nklock in these wakeup loops. So we iterate over
+	 * each sleeper list until we find no more candidate for
+	 * wakeup after an entire scan, redoing the scan from the list
+	 * head otherwise.
+	 */
+	for (;;) {
+		if (list_empty(&xnpipe_sleepq))
+			goto check_async;
+
+		state = list_first_entry(&xnpipe_sleepq, struct xnpipe_state, slink);
+
+		for (;;) {
+			rbits = state->status & XNPIPE_USER_ALL_READY;
+			if (rbits)
+				break;
+			if (list_is_last(&state->slink, &xnpipe_sleepq))
+				goto check_async;
+			state = list_next_entry(state, slink);
+		}
+
+		state->status &= ~rbits;
+
+		if ((rbits & XNPIPE_USER_WREAD_READY) != 0) {
+			if (waitqueue_active(&state->readq)) {
+				xnlock_put_irqrestore(&nklock, s);
+				wake_up_interruptible(&state->readq);
+				xnlock_get_irqsave(&nklock, s);
+			}
+		}
+		if ((rbits & XNPIPE_USER_WSYNC_READY) != 0) {
+			if (waitqueue_active(&state->syncq)) {
+				xnlock_put_irqrestore(&nklock, s);
+				wake_up_interruptible(&state->syncq);
+				xnlock_get_irqsave(&nklock, s);
+			}
+		}
+	}
+
+check_async:
+	/*
+	 * Scan the async queue, sending the proper signal to
+	 * subscribers.
+	 */
+	for (;;) {
+		if (list_empty(&xnpipe_asyncq))
+			goto out;
+
+		state = list_first_entry(&xnpipe_asyncq, struct xnpipe_state, alink);
+
+		for (;;) {
+			if (state->status & XNPIPE_USER_SIGIO)
+				break;
+			if (list_is_last(&state->alink, &xnpipe_asyncq))
+				goto out;
+			state = list_next_entry(state, alink);
+		}
+
+		state->status &= ~XNPIPE_USER_SIGIO;
+		xnlock_put_irqrestore(&nklock, s);
+		kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return IRQ_HANDLED;
+}
+
+static inline void xnpipe_schedule_request(void) /* hw IRQs off */
+{
+	pipeline_post_sirq(xnpipe_wakeup_virq);
+}
+
+static inline ssize_t xnpipe_flush_bufq(void (*fn)(void *buf, void *xstate),
+					struct list_head *q,
+					void *xstate)
+{
+	struct xnpipe_mh *mh, *tmp;
+	ssize_t n = 0;
+
+	if (list_empty(q))
+		return 0;
+
+	/* Queue is private, no locking is required. */
+	list_for_each_entry_safe(mh, tmp, q, link) {
+		list_del(&mh->link);
+		n += xnpipe_m_size(mh);
+		fn(mh, xstate);
+	}
+
+	/* Return the overall count of bytes flushed. */
+	return n;
+}
+
+/*
+ * Move the specified queue contents to a private queue, then call the
+ * flush handler to purge it. The latter runs without locking.
+ * Returns the number of bytes flushed. Must be entered with nklock
+ * held, interrupts off.
+ */
+#define xnpipe_flushq(__state, __q, __f, __s)				\
+({									\
+	LIST_HEAD(__privq);						\
+	ssize_t __n;							\
+									\
+	list_splice_init(&(state)->__q, &__privq);			\
+	(__state)->nr ## __q = 0;					\
+	xnlock_put_irqrestore(&nklock, (__s));				\
+	__n = xnpipe_flush_bufq((__state)->ops.__f, &__privq, (__state)->xstate);	\
+	xnlock_get_irqsave(&nklock, (__s));				\
+									\
+	__n;								\
+})
+
+static void *xnpipe_default_alloc_ibuf(size_t size, void *xstate)
+{
+	void *buf;
+
+	buf = xnmalloc(size);
+	if (likely(buf != NULL))
+		return buf;
+
+	if (size > xnheap_get_size(&cobalt_heap))
+		/* Request will never succeed. */
+		return (struct xnpipe_mh *)-1;
+
+	return NULL;
+}
+
+static void xnpipe_default_free_ibuf(void *buf, void *xstate)
+{
+	xnfree(buf);
+}
+
+static void xnpipe_default_release(void *xstate)
+{
+}
+
+static inline int xnpipe_set_ops(struct xnpipe_state *state,
+				 struct xnpipe_operations *ops)
+{
+	state->ops = *ops;
+
+	if (ops->free_obuf == NULL)
+		/*
+		 * Caller must provide a way to free unread outgoing
+		 * buffers.
+		 */
+		return -EINVAL;
+
+	/* Set some default handlers for common usage. */
+	if (ops->alloc_ibuf == NULL)
+		state->ops.alloc_ibuf = xnpipe_default_alloc_ibuf;
+	if (ops->free_ibuf == NULL)
+		state->ops.free_ibuf = xnpipe_default_free_ibuf;
+	if (ops->release == NULL)
+		state->ops.release = xnpipe_default_release;
+
+	return 0;
+}
+
+int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0, ret;
+	spl_t s;
+
+	minor = xnpipe_minor_alloc(minor);
+	if (minor < 0)
+		return minor;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	ret = xnpipe_set_ops(state, ops);
+	if (ret) {
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	state->status |= XNPIPE_KERN_CONN;
+	xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
+	state->xstate = xstate;
+	state->ionrd = 0;
+
+	if (state->status & XNPIPE_USER_CONN) {
+		if (state->status & XNPIPE_USER_WREAD) {
+			/*
+			 * Wake up the regular Linux task waiting for
+			 * the kernel side to connect (xnpipe_open).
+			 */
+			state->status |= XNPIPE_USER_WREAD_READY;
+			need_sched = 1;
+		}
+
+		if (state->asyncq) {	/* Schedule asynch sig. */
+			state->status |= XNPIPE_USER_SIGIO;
+			need_sched = 1;
+		}
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return minor;
+}
+EXPORT_SYMBOL_GPL(xnpipe_connect);
+
+int xnpipe_disconnect(int minor)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	state->status &= ~XNPIPE_KERN_CONN;
+
+	state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
+
+	if ((state->status & XNPIPE_USER_CONN) == 0)
+		goto cleanup;
+
+	xnpipe_flushq(state, inq, free_ibuf, s);
+
+	if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED)
+		xnsched_run();
+
+	if (state->status & XNPIPE_USER_WREAD) {
+		/*
+		 * Wake up the regular Linux task waiting for some
+		 * operation from the Xenomai side (read/write or
+		 * poll).
+		 */
+		state->status |= XNPIPE_USER_WREAD_READY;
+		need_sched = 1;
+	}
+
+	if (state->asyncq) {	/* Schedule asynch sig. */
+		state->status |= XNPIPE_USER_SIGIO;
+		need_sched = 1;
+	}
+
+cleanup:
+	/*
+	 * If xnpipe_release() has not fully run, enter lingering
+	 * close. This will prevent the extra state from being wiped
+	 * out until then.
+	 */
+	if (state->status & XNPIPE_USER_CONN)
+		state->status |= XNPIPE_KERN_LCLOSE;
+	else {
+		xnlock_put_irqrestore(&nklock, s);
+		state->ops.release(state->xstate);
+		xnlock_get_irqsave(&nklock, s);
+		xnpipe_minor_free(minor);
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnpipe_disconnect);
+
+ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (size <= sizeof(*mh))
+		return -EINVAL;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	xnpipe_m_size(mh) = size - sizeof(*mh);
+	xnpipe_m_rdoff(mh) = 0;
+	state->ionrd += xnpipe_m_size(mh);
+
+	if (flags & XNPIPE_URGENT)
+		list_add(&mh->link, &state->outq);
+	else
+		list_add_tail(&mh->link, &state->outq);
+
+	state->nroutq++;
+
+	if ((state->status & XNPIPE_USER_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return (ssize_t) size;
+	}
+
+	if (state->status & XNPIPE_USER_WREAD) {
+		/*
+		 * Wake up the regular Linux task waiting for input
+		 * from the Xenomai side.
+		 */
+		state->status |= XNPIPE_USER_WREAD_READY;
+		need_sched = 1;
+	}
+
+	if (state->asyncq) {	/* Schedule asynch sig. */
+		state->status |= XNPIPE_USER_SIGIO;
+		need_sched = 1;
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t) size;
+}
+EXPORT_SYMBOL_GPL(xnpipe_send);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size)
+{
+	struct xnpipe_state *state;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (size < 0)
+		return -EINVAL;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	xnpipe_m_size(mh) += size;
+	state->ionrd += size;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t) size;
+}
+EXPORT_SYMBOL_GPL(xnpipe_mfixup);
+
+ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout)
+{
+	struct xnpipe_state *state;
+	struct xnpipe_mh *mh;
+	xntmode_t mode;
+	ssize_t ret;
+	int info;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (xnsched_interrupt_p())
+		return -EPERM;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		ret = -EBADF;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * If we received a relative timespec, rescale it to an
+	 * absolute time value based on the monotonic clock.
+	 */
+	mode = XN_RELATIVE;
+	if (timeout != XN_NONBLOCK && timeout != XN_INFINITE) {
+		mode = XN_ABSOLUTE;
+		timeout += xnclock_read_monotonic(&nkclock);
+	}
+
+	for (;;) {
+		if (!list_empty(&state->inq))
+			break;
+
+		if (timeout == XN_NONBLOCK) {
+			ret = -EWOULDBLOCK;
+			goto unlock_and_exit;
+		}
+
+		info = xnsynch_sleep_on(&state->synchbase, timeout, mode);
+		if (info & XNTIMEO) {
+			ret = -ETIMEDOUT;
+			goto unlock_and_exit;
+		}
+		if (info & XNBREAK) {
+			ret = -EINTR;
+			goto unlock_and_exit;
+		}
+		if (info & XNRMID) {
+			ret = -EIDRM;
+			goto unlock_and_exit;
+		}
+	}
+
+	mh = list_get_entry(&state->inq, struct xnpipe_mh, link);
+	*pmh = mh;
+	state->nrinq--;
+	ret = (ssize_t)xnpipe_m_size(mh);
+
+	if (state->status & XNPIPE_USER_WSYNC) {
+		state->status |= XNPIPE_USER_WSYNC_READY;
+		xnpipe_schedule_request();
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnpipe_recv);
+
+int xnpipe_flush(int minor, int mode)
+{
+	struct xnpipe_state *state;
+	int msgcount;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	msgcount = state->nroutq + state->nrinq;
+
+	if (mode & XNPIPE_OFLUSH)
+		state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
+
+	if (mode & XNPIPE_IFLUSH)
+		xnpipe_flushq(state, inq, free_ibuf, s);
+
+	if ((state->status & XNPIPE_USER_WSYNC) &&
+	    msgcount > state->nroutq + state->nrinq) {
+		state->status |= XNPIPE_USER_WSYNC_READY;
+		xnpipe_schedule_request();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnpipe_flush);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r)
+{
+	struct xnpipe_state *state;
+	int ret = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = xnpipe_states + minor;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (state->status & XNPIPE_KERN_CONN) {
+		*mask_r = POLLOUT;
+		if (!list_empty(&state->inq))
+			*mask_r |= POLLIN;
+	} else
+		ret = -EIO;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnpipe_pollstate);
+
+/* Must be entered with nklock held, interrupts off. */
+#define xnpipe_cleanup_user_conn(__state, __s)				\
+	do {								\
+		xnpipe_flushq((__state), outq, free_obuf, (__s));	\
+		xnpipe_flushq((__state), inq, free_ibuf, (__s));	\
+		(__state)->status &= ~XNPIPE_USER_CONN;			\
+		if ((__state)->status & XNPIPE_KERN_LCLOSE) {		\
+			(__state)->status &= ~XNPIPE_KERN_LCLOSE;	\
+			xnlock_put_irqrestore(&nklock, (__s));		\
+			(__state)->ops.release((__state)->xstate);	\
+			xnlock_get_irqsave(&nklock, (__s));		\
+			xnpipe_minor_free(xnminor_from_state(__state));	\
+		}							\
+	} while(0)
+
+/*
+ * Open the pipe from user-space.
+ */
+
+static int xnpipe_open(struct inode *inode, struct file *file)
+{
+	int minor, err = 0, sigpending;
+	struct xnpipe_state *state;
+	spl_t s;
+
+	minor = MINOR(inode->i_rdev);
+
+	if (minor >= XNPIPE_NDEVS)
+		return -ENXIO;	/* TssTss... stop playing with mknod() ;o) */
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* Enforce exclusive open for the message queues. */
+	if (state->status & (XNPIPE_USER_CONN | XNPIPE_USER_LCONN)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	state->status |= XNPIPE_USER_LCONN;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	file->private_data = state;
+	init_waitqueue_head(&state->readq);
+	init_waitqueue_head(&state->syncq);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	state->status |= XNPIPE_USER_CONN;
+	state->status &= ~XNPIPE_USER_LCONN;
+	state->wcount = 0;
+
+	state->status &=
+		~(XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
+		  XNPIPE_USER_SIGIO);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		if (file->f_flags & O_NONBLOCK) {
+			xnpipe_cleanup_user_conn(state, s);
+			xnlock_put_irqrestore(&nklock, s);
+			return -EWOULDBLOCK;
+		}
+
+		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
+					 state->status & XNPIPE_KERN_CONN);
+		if (sigpending) {
+			xnpipe_cleanup_user_conn(state, s);
+			xnlock_put_irqrestore(&nklock, s);
+			return -ERESTARTSYS;
+		}
+	}
+
+	if (err)
+		xnpipe_cleanup_user_conn(state, s);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static int xnpipe_release(struct inode *inode, struct file *file)
+{
+	struct xnpipe_state *state = file->private_data;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnpipe_dequeue_all(state, XNPIPE_USER_WREAD);
+	xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC);
+
+	if (state->status & XNPIPE_KERN_CONN) {
+		/* Unblock waiters. */
+		if (xnsynch_pended_p(&state->synchbase)) {
+			xnsynch_flush(&state->synchbase, XNRMID);
+			xnsched_run();
+		}
+	}
+
+	if (state->ops.input)
+		state->ops.input(NULL, -EPIPE, state->xstate);
+
+	if (state->asyncq) {	/* Clear the async queue */
+		list_del(&state->alink);
+		state->status &= ~XNPIPE_USER_SIGIO;
+		xnlock_put_irqrestore(&nklock, s);
+		fasync_helper(-1, file, 0, &state->asyncq);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnpipe_cleanup_user_conn(state, s);
+	/*
+	 * The extra state may not be available from now on, if
+	 * xnpipe_disconnect() entered lingering close before we got
+	 * there; so calling xnpipe_cleanup_user_conn() should be the
+	 * last thing we do.
+	 */
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static ssize_t xnpipe_read(struct file *file,
+			   char *buf, size_t count, loff_t *ppos)
+{
+	struct xnpipe_state *state = file->private_data;
+	int sigpending, err = 0;
+	size_t nbytes, inbytes;
+	struct xnpipe_mh *mh;
+	ssize_t ret;
+	spl_t s;
+
+	if (!access_wok(buf, count))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPIPE;
+	}
+	/*
+	 * Queue probe and proc enqueuing must be seen atomically,
+	 * including from the Xenomai side.
+	 */
+	if (list_empty(&state->outq)) {
+		if (file->f_flags & O_NONBLOCK) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EWOULDBLOCK;
+		}
+
+		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
+					 !list_empty(&state->outq));
+
+		if (list_empty(&state->outq)) {
+			xnlock_put_irqrestore(&nklock, s);
+			return sigpending ? -ERESTARTSYS : 0;
+		}
+	}
+
+	mh = list_get_entry(&state->outq, struct xnpipe_mh, link);
+	state->nroutq--;
+
+	/*
+	 * We allow more data to be appended to the current message
+	 * bucket while its contents is being copied to the user
+	 * buffer, therefore, we need to loop until: 1) all the data
+	 * has been copied, 2) we consumed the user buffer space
+	 * entirely.
+	 */
+
+	inbytes = 0;
+
+	for (;;) {
+		nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh);
+
+		if (nbytes + inbytes > count)
+			nbytes = count - inbytes;
+
+		if (nbytes == 0)
+			break;
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		/* More data could be appended while doing this: */
+		err = __copy_to_user(buf + inbytes,
+				     xnpipe_m_data(mh) + xnpipe_m_rdoff(mh),
+				     nbytes);
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if (err) {
+			err = -EFAULT;
+			break;
+		}
+
+		inbytes += nbytes;
+		xnpipe_m_rdoff(mh) += nbytes;
+	}
+
+	state->ionrd -= inbytes;
+	ret = inbytes;
+
+	if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh)) {
+		list_add(&mh->link, &state->outq);
+		state->nroutq++;
+	} else {
+		/*
+		 * We always want to fire the output handler because
+		 * whatever the error state is for userland (e.g
+		 * -EFAULT), we did pull a message from our output
+		 * queue.
+		 */
+		if (state->ops.output)
+			state->ops.output(mh, state->xstate);
+		xnlock_put_irqrestore(&nklock, s);
+		state->ops.free_obuf(mh, state->xstate);
+		xnlock_get_irqsave(&nklock, s);
+		if (state->status & XNPIPE_USER_WSYNC) {
+			state->status |= XNPIPE_USER_WSYNC_READY;
+			xnpipe_schedule_request();
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err ? : ret;
+}
+
+static ssize_t xnpipe_write(struct file *file,
+			    const char *buf, size_t count, loff_t *ppos)
+{
+	struct xnpipe_state *state = file->private_data;
+	struct xnpipe_mh *mh;
+	int pollnum, ret;
+	spl_t s;
+
+	if (count == 0)
+		return 0;
+
+	if (!access_rok(buf, count))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+retry:
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPIPE;
+	}
+
+	pollnum = state->nrinq + state->nroutq;
+	xnlock_put_irqrestore(&nklock, s);
+
+	mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
+	if (mh == (struct xnpipe_mh *)-1)
+		return -ENOMEM;
+
+	if (mh == NULL) {
+		if (file->f_flags & O_NONBLOCK)
+			return -EWOULDBLOCK;
+
+		xnlock_get_irqsave(&nklock, s);
+		if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
+				pollnum > state->nrinq + state->nroutq)) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -ERESTARTSYS;
+		}
+		goto retry;
+	}
+
+	xnpipe_m_size(mh) = count;
+	xnpipe_m_rdoff(mh) = 0;
+
+	if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
+		state->ops.free_ibuf(mh, state->xstate);
+		return -EFAULT;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	list_add_tail(&mh->link, &state->inq);
+	state->nrinq++;
+
+	/* Wake up a Xenomai sleeper if any. */
+	if (xnsynch_wakeup_one_sleeper(&state->synchbase))
+		xnsched_run();
+
+	if (state->ops.input) {
+		ret = state->ops.input(mh, 0, state->xstate);
+		if (ret)
+			count = (size_t)ret;
+	}
+
+	if (file->f_flags & O_SYNC) {
+		if (!list_empty(&state->inq)) {
+			if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
+					list_empty(&state->inq)))
+				count = -ERESTARTSYS;
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t)count;
+}
+
+static long xnpipe_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct xnpipe_state *state = file->private_data;
+	int ret = 0;
+	ssize_t n;
+	spl_t s;
+
+	switch (cmd) {
+	case XNPIPEIOC_GET_NRDEV:
+
+		if (put_user(XNPIPE_NDEVS, (int *)arg))
+			return -EFAULT;
+
+		break;
+
+	case XNPIPEIOC_OFLUSH:
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if ((state->status & XNPIPE_KERN_CONN) == 0) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EPIPE;
+		}
+
+		n = xnpipe_flushq(state, outq, free_obuf, s);
+		state->ionrd -= n;
+		goto kick_wsync;
+
+	case XNPIPEIOC_IFLUSH:
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if ((state->status & XNPIPE_KERN_CONN) == 0) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EPIPE;
+		}
+
+		n = xnpipe_flushq(state, inq, free_ibuf, s);
+
+	kick_wsync:
+
+		if (n > 0 && (state->status & XNPIPE_USER_WSYNC)) {
+			state->status |= XNPIPE_USER_WSYNC_READY;
+			xnpipe_schedule_request();
+		}
+
+		xnlock_put_irqrestore(&nklock, s);
+		ret = n;
+		break;
+
+	case XNPIPEIOC_SETSIG:
+
+		if (arg < 1 || arg >= _NSIG)
+			return -EINVAL;
+
+		xnpipe_asyncsig = arg;
+		break;
+
+	case FIONREAD:
+
+		n = (state->status & XNPIPE_KERN_CONN) ? state->ionrd : 0;
+
+		if (put_user(n, (int *)arg))
+			return -EFAULT;
+
+		break;
+
+	case TCGETS:
+		/* For isatty() probing. */
+		return -ENOTTY;
+
+	default:
+
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Could be replaced with compat_ptr_ioctl if support for kernels < 5.4 is
+ * dropped.
+ */
+static long xnpipe_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	return xnpipe_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define xnpipe_compat_ioctl	NULL
+#endif
+
+static int xnpipe_fasync(int fd, struct file *file, int on)
+{
+	struct xnpipe_state *state = file->private_data;
+	int ret, queued;
+	spl_t s;
+
+	queued = (state->asyncq != NULL);
+	ret = fasync_helper(fd, file, on, &state->asyncq);
+
+	if (state->asyncq) {
+		if (!queued) {
+			xnlock_get_irqsave(&nklock, s);
+			list_add_tail(&state->alink, &xnpipe_asyncq);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	} else if (queued) {
+		xnlock_get_irqsave(&nklock, s);
+		list_del(&state->alink);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return ret;
+}
+
+static unsigned xnpipe_poll(struct file *file, poll_table *pt)
+{
+	struct xnpipe_state *state = file->private_data;
+	unsigned r_mask = 0, w_mask = 0;
+	spl_t s;
+
+	poll_wait(file, &state->readq, pt);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (state->status & XNPIPE_KERN_CONN)
+		w_mask |= (POLLOUT | POLLWRNORM);
+	else
+		r_mask |= POLLHUP;
+
+	if (!list_empty(&state->outq))
+		r_mask |= (POLLIN | POLLRDNORM);
+	else
+		/*
+		 * Procs which have issued a timed out poll req will
+		 * remain linked to the sleepers queue, and will be
+		 * silently unlinked the next time the Xenomai side
+		 * kicks xnpipe_wakeup_proc().
+		 */
+		xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return r_mask | w_mask;
+}
+
+static struct file_operations xnpipe_fops = {
+	.read = xnpipe_read,
+	.write = xnpipe_write,
+	.poll = xnpipe_poll,
+	.unlocked_ioctl = xnpipe_ioctl,
+	.compat_ioctl = xnpipe_compat_ioctl,
+	.open = xnpipe_open,
+	.release = xnpipe_release,
+	.fasync = xnpipe_fasync
+};
+
+int xnpipe_mount(void)
+{
+	struct xnpipe_state *state;
+	struct device *cldev;
+	int i;
+
+	for (state = &xnpipe_states[0];
+	     state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
+		state->status = 0;
+		state->asyncq = NULL;
+		INIT_LIST_HEAD(&state->inq);
+		state->nrinq = 0;
+		INIT_LIST_HEAD(&state->outq);
+		state->nroutq = 0;
+	}
+
+	xnpipe_class = class_create(THIS_MODULE, "rtpipe");
+	if (IS_ERR(xnpipe_class)) {
+		printk(XENO_ERR "error creating rtpipe class, err=%ld\n",
+		       PTR_ERR(xnpipe_class));
+		return -EBUSY;
+	}
+
+	for (i = 0; i < XNPIPE_NDEVS; i++) {
+		cldev = device_create(xnpipe_class, NULL,
+				      MKDEV(XNPIPE_DEV_MAJOR, i),
+				      NULL, "rtp%d", i);
+		if (IS_ERR(cldev)) {
+			printk(XENO_ERR
+			       "can't add device class, major=%d, minor=%d, err=%ld\n",
+			       XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
+			class_destroy(xnpipe_class);
+			return -EBUSY;
+		}
+	}
+
+	if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
+		printk(XENO_ERR
+		       "unable to reserve major #%d for message pipes\n",
+		       XNPIPE_DEV_MAJOR);
+		return -EBUSY;
+	}
+
+	xnpipe_wakeup_virq = pipeline_create_inband_sirq(xnpipe_wakeup_proc);
+	if (xnpipe_wakeup_virq < 0) {
+		printk(XENO_ERR
+		       "unable to reserve synthetic IRQ for message pipes\n");
+		return xnpipe_wakeup_virq;
+	}
+
+	return 0;
+}
+
+void xnpipe_umount(void)
+{
+	int i;
+
+	pipeline_delete_inband_sirq(xnpipe_wakeup_virq);
+
+	unregister_chrdev(XNPIPE_DEV_MAJOR, "rtpipe");
+
+	for (i = 0; i < XNPIPE_NDEVS; i++)
+		device_destroy(xnpipe_class, MKDEV(XNPIPE_DEV_MAJOR, i));
+
+	class_destroy(xnpipe_class);
+}
+++ linux-patched/kernel/xenomai/debug.c	2022-03-21 12:58:28.731895378 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/timer.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/ctype.h>
+#include <linux/jhash.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+#include <linux/vmalloc.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/syscall.h>
+#include "posix/process.h"
+#include "debug.h"
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_debug Debugging services
+ * @{
+ */
+struct xnvfile_directory cobalt_debug_vfroot;
+EXPORT_SYMBOL_GPL(cobalt_debug_vfroot);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX
+
+#define SYMBOL_HSLOTS	(1 << 8)
+
+struct hashed_symbol {
+	struct hashed_symbol *next;
+	char symbol[0];
+};
+
+static struct hashed_symbol *symbol_jhash[SYMBOL_HSLOTS];
+
+static struct xnheap memory_pool;
+
+/*
+ * This is a permanent storage for ASCII strings which comes handy to
+ * get a unique and constant reference to a symbol while preserving
+ * storage space. Hashed symbols have infinite lifetime and are never
+ * flushed.
+ */
+DEFINE_PRIVATE_XNLOCK(symbol_lock);
+
+static const char *hash_symbol(const char *symbol)
+{
+	struct hashed_symbol *p, **h;
+	const char *str;
+	size_t len;
+	u32 hash;
+	spl_t s;
+
+	len = strlen(symbol);
+	hash = jhash(symbol, len, 0);
+
+	xnlock_get_irqsave(&symbol_lock, s);
+
+	h = &symbol_jhash[hash & (SYMBOL_HSLOTS - 1)];
+	p = *h;
+	while (p &&
+	       (*p->symbol != *symbol ||
+		strcmp(p->symbol + 1, symbol + 1)))
+	       p = p->next;
+
+	if (p)
+		goto done;
+
+	p = xnheap_alloc(&memory_pool, sizeof(*p) + len + 1);
+	if (p == NULL) {
+		str = NULL;
+		goto out;
+	}
+
+	strcpy(p->symbol, symbol);
+	p->next = *h;
+	*h = p;
+done:
+	str = p->symbol;
+out:
+	xnlock_put_irqrestore(&symbol_lock, s);
+
+	return str;
+}
+
+/*
+ * We define a static limit (RELAX_SPOTNR) for spot records to limit
+ * the memory consumption (we pull record memory from the system
+ * heap). The current value should be reasonable enough unless the
+ * application is extremely unsane, given that we only keep unique
+ * spots. Said differently, if the application has more than
+ * RELAX_SPOTNR distinct code locations doing spurious relaxes, then
+ * the first issue to address is likely PEBKAC.
+ */
+#define RELAX_SPOTNR	128
+#define RELAX_HSLOTS	(1 << 8)
+
+struct relax_record {
+	/* Number of hits for this location */
+	u32 hits;
+	struct relax_spot {
+		/* Faulty thread name. */
+		char thread[XNOBJECT_NAME_LEN];
+		/* call stack the relax originates from. */
+		int depth;
+		struct backtrace {
+			unsigned long pc;
+			const char *mapname;
+		} backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+		/* Program hash value of the caller. */
+		u32 proghash;
+		/* Pid of the caller. */
+		pid_t pid;
+		/* Reason for relaxing. */
+		int reason;
+	} spot;
+	struct relax_record *r_next;
+	struct relax_record *h_next;
+	const char *exe_path;
+};
+
+static struct relax_record *relax_jhash[RELAX_HSLOTS];
+
+static struct relax_record *relax_record_list;
+
+static int relax_overall, relax_queued;
+
+DEFINE_PRIVATE_XNLOCK(relax_lock);
+
+/*
+ * The motivation to centralize tracing information about relaxes
+ * directly into kernel space is fourfold:
+ *
+ * - this allows to gather all the trace data into a single location
+ * and keep it safe there, with no external log file involved.
+ *
+ * - enabling the tracing does not impose any requirement on the
+ * application (aside of being compiled with debug symbols for best
+ * interpreting that information). We only need a kernel config switch
+ * for this (i.e. CONFIG_XENO_OPT_DEBUG_TRACE_RELAX).
+ *
+ * - the data is collected and can be made available exactly the same
+ * way regardless of the application emitting the relax requests, or
+ * whether it is still alive when the trace data are displayed.
+ *
+ * - the kernel is able to provide accurate and detailed trace
+ * information, such as the relative offset of instructions causing
+ * relax requests within dynamic shared objects, without having to
+ * guess it roughly from /proc/pid/maps, or relying on ldd's
+ * --function-relocs feature, which both require to run on the target
+ * system to get the needed information. Instead, we allow a build
+ * host to use a cross-compilation toolchain later to extract the
+ * source location, from the raw data the kernel has provided on the
+ * target system.
+ *
+ * However, collecting the call frames within the application to
+ * determine the full context of a relax spot is not something we can
+ * do purely from kernel space, notably because it depends on build
+ * options we just don't know about (e.g. frame pointers availability
+ * for the app, or other nitty-gritty details depending on the
+ * toolchain). To solve this, we ask the application to send us a
+ * complete backtrace taken from the context of a specific signal
+ * handler, which we know is stacked over the relax spot. That
+ * information is then stored by the kernel after some
+ * post-processing, along with other data identifying the caller, and
+ * made available through the /proc/xenomai/debug/relax vfile.
+ *
+ * Implementation-wise, xndebug_notify_relax and xndebug_trace_relax
+ * routines are paired: first, xndebug_notify_relax sends a SIGSHADOW
+ * request to userland when a relax spot is detected from
+ * xnthread_relax, which should then trigger a call back to
+ * xndebug_trace_relax with the complete backtrace information, as
+ * seen from userland (via the internal sc_cobalt_backtrace
+ * syscall). All this runs on behalf of the relaxing thread, so we can
+ * make a number of convenient assumptions (such as being able to scan
+ * the current vma list to get detailed information about the
+ * executable mappings that could be involved).
+ */
+
+void xndebug_notify_relax(struct xnthread *thread, int reason)
+{
+	xnthread_signal(thread, SIGSHADOW,
+			  sigshadow_int(SIGSHADOW_ACTION_BACKTRACE, reason));
+}
+
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason)
+{
+	struct relax_record *p, **h;
+	struct vm_area_struct *vma;
+	struct xnthread *thread;
+	struct relax_spot spot;
+	struct mm_struct *mm;
+	struct file *file;
+	unsigned long pc;
+	char *mapname;
+	int n, depth;
+	char *tmp;
+	u32 hash;
+	spl_t s;
+
+	thread = xnthread_current();
+	if (thread == NULL)
+		return;		/* Can't be, right? What a mess. */
+
+	/*
+	 * We compute PC values relative to the base of the shared
+	 * executable mappings we find in the backtrace, which makes
+	 * it possible for the slackspot utility to match the
+	 * corresponding source code locations from unrelocated file
+	 * offsets.
+	 */
+
+	tmp = (char *)__get_free_page(GFP_KERNEL);
+	if (tmp == NULL)
+		/*
+		 * The situation looks really bad, but we can't do
+		 * anything about it. Just bail out.
+		 */
+		return;
+
+	memset(&spot, 0, sizeof(spot));
+	mm = get_task_mm(current);
+	mmap_read_lock(mm);
+
+	for (n = 0, depth = 0; n < nr; n++) {
+		pc = backtrace[n];
+
+		vma = find_vma(mm, pc);
+		if (vma == NULL)
+			continue;
+
+		/*
+		 * Hack. Unlike DSOs, executables and interpreters
+		 * (e.g. dynamic linkers) are protected against write
+		 * attempts. Use this to determine when $pc should be
+		 * fixed up by subtracting the mapping base address in
+		 * the DSO case.
+		 */
+		if (!(vma->vm_flags & VM_DENYWRITE))
+			pc -= vma->vm_start;
+
+		spot.backtrace[depth].pc = pc;
+
+		/*
+		 * Even in case we can't fetch the map name, we still
+		 * record the PC value, which may still give some hint
+		 * downstream.
+		 */
+		file = vma->vm_file;
+		if (file == NULL)
+			goto next_frame;
+
+		mapname = d_path(&file->f_path, tmp, PAGE_SIZE);
+		if (IS_ERR(mapname))
+			goto next_frame;
+
+		spot.backtrace[depth].mapname = hash_symbol(mapname);
+	next_frame:
+		depth++;
+	}
+
+	mmap_read_unlock(mm);
+	mmput(mm);
+	free_page((unsigned long)tmp);
+
+	/*
+	 * Most of the time we will be sent duplicates, since the odds
+	 * of seeing the same thread running the same code doing the
+	 * same mistake all over again are high. So we probe the hash
+	 * table for an identical spot first, before going for a
+	 * complete record allocation from the system heap if no match
+	 * was found. Otherwise, we just take the fast exit path.
+	 */
+	spot.depth = depth;
+	spot.proghash = thread->proghash;
+	spot.pid = xnthread_host_pid(thread);
+	spot.reason = reason;
+	strcpy(spot.thread, thread->name);
+	hash = jhash2((u32 *)&spot, sizeof(spot) / sizeof(u32), 0);
+
+	xnlock_get_irqsave(&relax_lock, s);
+
+	h = &relax_jhash[hash & (RELAX_HSLOTS - 1)];
+	p = *h;
+	while (p &&
+	       /* Try quick guesses first, then memcmp */
+	       (p->spot.depth != spot.depth ||
+		p->spot.pid != spot.pid ||
+		memcmp(&p->spot, &spot, sizeof(spot))))
+	       p = p->h_next;
+
+	if (p) {
+		p->hits++;
+		goto out;	/* Spot already recorded. */
+	}
+
+	if (relax_queued >= RELAX_SPOTNR)
+		goto out;	/* No more space -- ignore. */
+	/*
+	 * We can only compete with other shadows which have just
+	 * switched to secondary mode like us. So holding the
+	 * relax_lock a bit more without disabling interrupts is not
+	 * an issue. This allows us to postpone the record memory
+	 * allocation while probing and updating the hash table in a
+	 * single move.
+	 */
+	p = xnheap_alloc(&memory_pool, sizeof(*p));
+	if (p == NULL)
+		goto out;      /* Something is about to go wrong... */
+
+	memcpy(&p->spot, &spot, sizeof(p->spot));
+	p->exe_path = hash_symbol(thread->exe_path);
+	p->hits = 1;
+	p->h_next = *h;
+	*h = p;
+	p->r_next = relax_record_list;
+	relax_record_list = p;
+	relax_queued++;
+out:
+	relax_overall++;
+
+	xnlock_put_irqrestore(&relax_lock, s);
+}
+
+static DEFINE_VFILE_HOSTLOCK(relax_mutex);
+
+struct relax_vfile_priv {
+	int queued;
+	int overall;
+	int ncurr;
+	struct relax_record *head;
+	struct relax_record *curr;
+};
+
+static void *relax_vfile_begin(struct xnvfile_regular_iterator *it)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p;
+	spl_t s;
+	int n;
+
+	/*
+	 * Snapshot the counters under lock, to make sure they remain
+	 * mutually consistent despite we dump the record list in a
+	 * lock-less manner. Additionally, the vfile layer already
+	 * holds the relax_mutex lock for us, so that we can't race
+	 * with ->store().
+	 */
+	xnlock_get_irqsave(&relax_lock, s);
+
+	if (relax_queued == 0 || it->pos > relax_queued) {
+		xnlock_put_irqrestore(&relax_lock, s);
+		return NULL;
+	}
+	priv->overall = relax_overall;
+	priv->queued = relax_queued;
+	priv->head = relax_record_list;
+
+	xnlock_put_irqrestore(&relax_lock, s);
+
+	if (it->pos == 0) {
+		priv->curr = NULL;
+		priv->ncurr = -1;
+		return VFILE_SEQ_START;
+	}
+
+	for (n = 1, p = priv->head; n < it->pos; n++)
+		p = p->r_next;
+
+	priv->curr = p;
+	priv->ncurr = n;
+
+	return p;
+}
+
+static void *relax_vfile_next(struct xnvfile_regular_iterator *it)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p;
+	int n;
+
+	if (it->pos > priv->queued)
+		return NULL;
+
+	if (it->pos == priv->ncurr + 1)
+		p = priv->curr->r_next;
+	else {
+		for (n = 1, p = priv->head; n < it->pos; n++)
+			p = p->r_next;
+	}
+
+	priv->curr = p;
+	priv->ncurr = it->pos;
+
+	return p;
+}
+
+static const char *reason_str[] = {
+    [SIGDEBUG_UNDEFINED] = "undefined",
+    [SIGDEBUG_MIGRATE_SIGNAL] = "signal",
+    [SIGDEBUG_MIGRATE_SYSCALL] = "syscall",
+    [SIGDEBUG_MIGRATE_FAULT] = "fault",
+    [SIGDEBUG_MIGRATE_PRIOINV] = "pi-error",
+    [SIGDEBUG_NOMLOCK] = "mlock-check",
+    [SIGDEBUG_WATCHDOG] = "runaway-break",
+    [SIGDEBUG_RESCNT_IMBALANCE] = "resource-count-imbalance",
+    [SIGDEBUG_MUTEX_SLEEP] = "sleep-holding-mutex",
+    [SIGDEBUG_LOCK_BREAK] = "scheduler-lock-break",
+};
+
+static int relax_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p = data;
+	int n;
+
+	/*
+	 * No need to grab any lock to read a record from a previously
+	 * validated index: the data must be there and won't be
+	 * touched anymore.
+	 */
+	if (p == NULL) {
+		xnvfile_printf(it, "%d\n", priv->overall);
+		return 0;
+	}
+
+	xnvfile_printf(it, "%s\n", p->exe_path ?: "?");
+	xnvfile_printf(it, "%d %d %s %s\n", p->spot.pid, p->hits,
+		       reason_str[p->spot.reason], p->spot.thread);
+
+	for (n = 0; n < p->spot.depth; n++)
+		xnvfile_printf(it, "0x%lx %s\n",
+			       p->spot.backtrace[n].pc,
+			       p->spot.backtrace[n].mapname ?: "?");
+
+	xnvfile_printf(it, ".\n");
+
+	return 0;
+}
+
+static ssize_t relax_vfile_store(struct xnvfile_input *input)
+{
+	struct relax_record *p, *np;
+	spl_t s;
+
+	/*
+	 * Flush out all records. Races with ->show() are prevented
+	 * using the relax_mutex lock. The vfile layer takes care of
+	 * this internally.
+	 */
+	xnlock_get_irqsave(&relax_lock, s);
+	p = relax_record_list;
+	relax_record_list = NULL;
+	relax_overall = 0;
+	relax_queued = 0;
+	memset(relax_jhash, 0, sizeof(relax_jhash));
+	xnlock_put_irqrestore(&relax_lock, s);
+
+	while (p) {
+		np = p->r_next;
+		xnheap_free(&memory_pool, p);
+		p = np;
+	}
+
+	return input->size;
+}
+
+static struct xnvfile_regular_ops relax_vfile_ops = {
+	.begin = relax_vfile_begin,
+	.next = relax_vfile_next,
+	.show = relax_vfile_show,
+	.store = relax_vfile_store,
+};
+
+static struct xnvfile_regular relax_vfile = {
+	.privsz = sizeof(struct relax_vfile_priv),
+	.ops = &relax_vfile_ops,
+	.entry = { .lockops = &relax_mutex.ops },
+};
+
+static inline int init_trace_relax(void)
+{
+	u32 size = CONFIG_XENO_OPT_DEBUG_TRACE_LOGSZ * 1024;
+	void *p;
+	int ret;
+
+	p = vmalloc(size);
+	if (p == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&memory_pool, p, size);
+	if (ret)
+		return ret;
+
+	xnheap_set_name(&memory_pool, "debug log");
+
+	ret = xnvfile_init_regular("relax", &relax_vfile, &cobalt_debug_vfroot);
+	if (ret) {
+		xnheap_destroy(&memory_pool);
+		vfree(p);
+	}
+
+	return ret;
+}
+
+static inline void cleanup_trace_relax(void)
+{
+	void *p;
+
+	xnvfile_destroy_regular(&relax_vfile);
+	p = xnheap_get_membase(&memory_pool);
+	xnheap_destroy(&memory_pool);
+	vfree(p);
+}
+
+#else /* !CONFIG_XENO_OPT_DEBUG_TRACE_RELAX */
+
+static inline int init_trace_relax(void)
+{
+	return 0;
+}
+
+static inline void cleanup_trace_relax(void)
+{
+}
+
+static inline void init_thread_relax_trace(struct xnthread *thread)
+{
+}
+
+#endif /* !XENO_OPT_DEBUG_TRACE_RELAX */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+	*start = xnclock_read_raw(&nkclock);
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_prepare_acquire);
+
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu, unsigned long long *start,
+			 const char *file, int line, const char *function)
+{
+	lock->lock_date = *start;
+	lock->spin_time = xnclock_read_raw(&nkclock) - *start;
+	lock->file = file;
+	lock->function = function;
+	lock->line = line;
+	lock->cpu = cpu;
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_acquired);
+
+int xnlock_dbg_release(struct xnlock *lock,
+		       const char *file, int line, const char *function)
+{
+	unsigned long long lock_time;
+	struct xnlockinfo *stats;
+	int cpu;
+
+	lock_time = xnclock_read_raw(&nkclock) - lock->lock_date;
+	cpu = raw_smp_processor_id();
+	stats = &per_cpu(xnlock_stats, cpu);
+
+	if (lock->file == NULL) {
+		lock->file = "??";
+		lock->line = 0;
+		lock->function = "invalid";
+	}
+
+	if (unlikely(lock->owner != cpu)) {
+		pipeline_prepare_panic();
+		printk(XENO_ERR "lock %p already unlocked on CPU #%d\n"
+				"          last owner = %s:%u (%s(), CPU #%d)\n",
+		       lock, cpu, lock->file, lock->line, lock->function,
+		       lock->cpu);
+		dump_stack();
+		return 1;
+	}
+
+	/* File that we released it. */
+	lock->cpu = -lock->cpu;
+	lock->file = file;
+	lock->line = line;
+	lock->function = function;
+
+	if (lock_time > stats->lock_time) {
+		stats->lock_time = lock_time;
+		stats->spin_time = lock->spin_time;
+		stats->file = lock->file;
+		stats->function = lock->function;
+		stats->line = lock->line;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_release);
+
+#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+void xndebug_shadow_init(struct xnthread *thread)
+{
+	struct cobalt_ppd *sys_ppd;
+	size_t len;
+
+	sys_ppd = cobalt_ppd_get(0);
+	/*
+	 * The caller is current, so we know for sure that sys_ppd
+	 * will still be valid after we dropped the lock.
+	 *
+	 * NOTE: Kernel shadows all share the system global ppd
+	 * descriptor with no refcounting.
+	 */
+	thread->exe_path = sys_ppd->exe_path ?: "(unknown)";
+	/*
+	 * The program hash value is a unique token debug features may
+	 * use to identify all threads which belong to a given
+	 * executable file. Using this value for quick probes is often
+	 * handier and more efficient than testing the whole exe_path.
+	 */
+	len = strlen(thread->exe_path);
+	thread->proghash = jhash(thread->exe_path, len, 0);
+}
+
+int xndebug_init(void)
+{
+	int ret;
+
+	ret = init_trace_relax();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void xndebug_cleanup(void)
+{
+	cleanup_trace_relax();
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/timer.c	2022-03-21 12:58:28.717895514 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/Makefile	2022-03-21 12:57:23.892527656 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/arith.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_timer Timer services
+ *
+ * The Xenomai timer facility depends on a clock source (xnclock) for
+ * scheduling the next activation times.
+ *
+ * The core provides and depends on a monotonic clock source (nkclock)
+ * with nanosecond resolution, driving the platform timer hardware
+ * exposed by the interrupt pipeline.
+ *
+ * @{
+ */
+
+int xntimer_heading_p(struct xntimer *timer)
+{
+	struct xnsched *sched = timer->sched;
+	xntimerq_t *q;
+	xntimerh_t *h;
+
+	q = xntimer_percpu_queue(timer);
+	h = xntimerq_head(q);
+	if (h == &timer->aplink)
+		return 1;
+
+	if (sched->lflags & XNHDEFER) {
+		h = xntimerq_second(q, h);
+		if (h == &timer->aplink)
+			return 1;
+	}
+
+	return 0;
+}
+
+void xntimer_enqueue_and_program(struct xntimer *timer, xntimerq_t *q)
+{
+	struct xnsched *sched = xntimer_sched(timer);
+
+	xntimer_enqueue(timer, q);
+	if (pipeline_must_force_program_tick(sched) || xntimer_heading_p(timer)) {
+		struct xnsched *sched = xntimer_sched(timer);
+		struct xnclock *clock = xntimer_clock(timer);
+		if (sched != xnsched_current())
+			xnclock_remote_shot(clock, sched);
+		else
+			xnclock_program_shot(clock, sched);
+	}
+}
+
+/**
+ * Arm a timer.
+ *
+ * Activates a timer so that the associated timeout handler will be
+ * fired after each expiration time. A timer can be either periodic or
+ * one-shot, depending on the reload value passed to this routine. The
+ * given timer must have been previously initialized.
+ *
+ * A timer is attached to the clock specified in xntimer_init().
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param value The date of the initial timer shot, expressed in
+ * nanoseconds.
+ *
+ * @param interval The reload value of the timer. It is a periodic
+ * interval value to be used for reprogramming the next timer shot,
+ * expressed in nanoseconds. If @a interval is equal to XN_INFINITE,
+ * the timer will not be reloaded after it has expired.
+ *
+ * @param mode The timer mode. It can be XN_RELATIVE if @a value shall
+ * be interpreted as a relative date, XN_ABSOLUTE for an absolute date
+ * based on the monotonic clock of the related time base (as returned
+ * my xnclock_read_monotonic()), or XN_REALTIME if the absolute date
+ * is based on the adjustable real-time date for the relevant clock
+ * (obtained from xnclock_read_realtime()).
+ *
+ * @return 0 is returned upon success, or -ETIMEDOUT if an absolute
+ * date in the past has been given. In such an event, the timer is
+ * nevertheless armed for the next shot in the timeline if @a interval
+ * is different from XN_INFINITE.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+int xntimer_start(struct xntimer *timer,
+		  xnticks_t value, xnticks_t interval,
+		  xntmode_t mode)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xntimerq_t *q = xntimer_percpu_queue(timer);
+	xnticks_t date, now, delay, period;
+	unsigned long gravity;
+	int ret = 0;
+
+	atomic_only();
+
+	trace_cobalt_timer_start(timer, value, interval, mode);
+
+	if ((timer->status & XNTIMER_DEQUEUED) == 0)
+		xntimer_dequeue(timer, q);
+
+	now = xnclock_read_raw(clock);
+
+	timer->status &= ~(XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
+	switch (mode) {
+	case XN_RELATIVE:
+		if ((xnsticks_t)value < 0)
+			return -ETIMEDOUT;
+		date = xnclock_ns_to_ticks(clock, value) + now;
+		break;
+	case XN_REALTIME:
+		timer->status |= XNTIMER_REALTIME;
+		value -= xnclock_get_offset(clock);
+		fallthrough;
+	default: /* XN_ABSOLUTE || XN_REALTIME */
+		date = xnclock_ns_to_ticks(clock, value);
+		if ((xnsticks_t)(date - now) <= 0) {
+			if (interval == XN_INFINITE)
+				return -ETIMEDOUT;
+			/*
+			 * We are late on arrival for the first
+			 * delivery, wait for the next shot on the
+			 * periodic time line.
+			 */
+			delay = now - date;
+			period = xnclock_ns_to_ticks(clock, interval);
+			date += period * (xnarch_div64(delay, period) + 1);
+		}
+		break;
+	}
+
+	/*
+	 * To cope with the basic system latency, we apply a clock
+	 * gravity value, which is the amount of time expressed in
+	 * clock ticks by which we should anticipate the shot for any
+	 * outstanding timer. The gravity value varies with the type
+	 * of context the timer wakes up, i.e. irq handler, kernel or
+	 * user thread.
+	 */
+	gravity = xntimer_gravity(timer);
+	xntimerh_date(&timer->aplink) = date - gravity;
+	if (now >= xntimerh_date(&timer->aplink))
+		xntimerh_date(&timer->aplink) += gravity / 2;
+
+	timer->interval_ns = XN_INFINITE;
+	timer->interval = XN_INFINITE;
+	if (interval != XN_INFINITE) {
+		timer->interval_ns = interval;
+		timer->interval = xnclock_ns_to_ticks(clock, interval);
+		timer->periodic_ticks = 0;
+		timer->start_date = date;
+		timer->pexpect_ticks = 0;
+		timer->status |= XNTIMER_PERIODIC;
+	}
+
+	timer->status |= XNTIMER_RUNNING;
+	xntimer_enqueue_and_program(timer, q);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xntimer_start);
+
+/**
+ * @fn int xntimer_stop(struct xntimer *timer)
+ *
+ * @brief Disarm a timer.
+ *
+ * This service deactivates a timer previously armed using
+ * xntimer_start(). Once disarmed, the timer can be subsequently
+ * re-armed using the latter service.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void __xntimer_stop(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xntimerq_t *q = xntimer_percpu_queue(timer);
+	struct xnsched *sched;
+	int heading = 1;
+
+	atomic_only();
+
+	trace_cobalt_timer_stop(timer);
+
+	if ((timer->status & XNTIMER_DEQUEUED) == 0) {
+		heading = xntimer_heading_p(timer);
+		xntimer_dequeue(timer, q);
+	}
+	timer->status &= ~(XNTIMER_FIRED|XNTIMER_RUNNING);
+	sched = xntimer_sched(timer);
+
+	/*
+	 * If we removed the heading timer, reprogram the next shot if
+	 * any. If the timer was running on another CPU, let it tick.
+	 */
+	if (heading && sched == xnsched_current())
+		xnclock_program_shot(clock, sched);
+}
+EXPORT_SYMBOL_GPL(__xntimer_stop);
+
+/**
+ * @fn xnticks_t xntimer_get_date(struct xntimer *timer)
+ *
+ * @brief Return the absolute expiration date.
+ *
+ * Return the next expiration date of a timer as an absolute count of
+ * nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The expiration date in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+xnticks_t xntimer_get_date(struct xntimer *timer)
+{
+	atomic_only();
+
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return xnclock_ticks_to_ns(xntimer_clock(timer), xntimer_expiry(timer));
+}
+EXPORT_SYMBOL_GPL(xntimer_get_date);
+
+/**
+ * @fn xnticks_t xntimer_get_timeout(struct xntimer *timer)
+ *
+ * @brief Return the relative expiration date.
+ *
+ * This call returns the count of nanoseconds remaining until the
+ * timer expires.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The count of nanoseconds until expiry. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled.  It
+ * might happen that the timer expires when this service runs (even if
+ * the associated handler has not been fired yet); in such a case, 1
+ * is returned.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+xnticks_t __xntimer_get_timeout(struct xntimer *timer)
+{
+	struct xnclock *clock;
+	xnticks_t expiry, now;
+
+	atomic_only();
+
+	clock = xntimer_clock(timer);
+	now = xnclock_read_raw(clock);
+	expiry = xntimer_expiry(timer);
+	if (expiry < now)
+		return 1;  /* Will elapse shortly. */
+
+	return xnclock_ticks_to_ns(clock, expiry - now);
+}
+EXPORT_SYMBOL_GPL(__xntimer_get_timeout);
+
+/**
+ * @fn void xntimer_init(struct xntimer *timer,struct xnclock *clock,void (*handler)(struct xntimer *timer), struct xnsched *sched, int flags)
+ * @brief Initialize a timer object.
+ *
+ * Creates a timer. When created, a timer is left disarmed; it must be
+ * started using xntimer_start() in order to be activated.
+ *
+ * @param timer The address of a timer descriptor the nucleus will use
+ * to store the object-specific data.  This descriptor must always be
+ * valid while the object is active therefore it must be allocated in
+ * permanent memory.
+ *
+ * @param clock The clock the timer relates to. Xenomai defines a
+ * monotonic system clock, with nanosecond resolution, named
+ * nkclock. In addition, external clocks driven by other tick sources
+ * may be created dynamically if CONFIG_XENO_OPT_EXTCLOCK is defined.
+ *
+ * @param handler The routine to call upon expiration of the timer.
+ *
+ * @param sched An optional pointer to the per-CPU scheduler slot the
+ * new timer is affine to. If non-NULL, the timer will fire on the CPU
+ * @a sched is bound to, otherwise it will fire either on the current
+ * CPU if real-time, or on the first real-time CPU.
+ *
+ * @param flags A set of flags describing the timer. A set of clock
+ * gravity hints can be passed via the @a flags argument, used for
+ * optimizing the built-in heuristics aimed at latency reduction:
+ *
+ * - XNTIMER_IGRAVITY, the timer activates a leaf timer handler.
+ * - XNTIMER_KGRAVITY, the timer activates a kernel thread.
+ * - XNTIMER_UGRAVITY, the timer activates a user-space thread.
+ *
+ * There is no limitation on the number of timers which can be
+ * created/active concurrently.
+ *
+ * @coretags{unrestricted}
+ */
+#ifdef DOXYGEN_CPP
+void xntimer_init(struct xntimer *timer, struct xnclock *clock,
+		  void (*handler)(struct xntimer *timer),
+		  struct xnsched *sched,
+		  int flags);
+#endif
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags)
+{
+	spl_t s __maybe_unused;
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	timer->clock = clock;
+#endif
+	xntimerh_init(&timer->aplink);
+	xntimerh_date(&timer->aplink) = XN_INFINITE;
+	xntimer_set_priority(timer, XNTIMER_STDPRIO);
+	timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK));
+	timer->handler = handler;
+	timer->interval_ns = 0;
+	timer->sched = NULL;
+
+	/*
+	 * Set the timer affinity, preferably to xnsched_cpu(sched) if
+	 * sched was given, CPU0 otherwise.
+	 */
+	if (sched == NULL)
+		sched = xnsched_struct(0);
+
+	xntimer_set_affinity(timer, sched);
+
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	timer->tracker = clock;
+#endif
+	ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
+		 task_pid_nr(current), current->comm);
+	xntimer_reset_stats(timer);
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&timer->next_stat, &clock->timerq);
+	clock->nrtimers++;
+	xnvfile_touch(&clock->timer_vfile);
+	xnlock_put_irqrestore(&nklock, s);
+#endif /* CONFIG_XENO_OPT_STATS */
+}
+EXPORT_SYMBOL_GPL(__xntimer_init);
+
+void xntimer_set_gravity(struct xntimer *timer, int gravity)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	timer->status &= ~XNTIMER_GRAVITY_MASK;
+	timer->status |= gravity;
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_set_gravity);
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static void __xntimer_switch_tracking(struct xntimer *timer,
+				      struct xnclock *newclock)
+{
+	struct xnclock *oldclock = timer->tracker;
+
+	list_del(&timer->next_stat);
+	oldclock->nrtimers--;
+	xnvfile_touch(&oldclock->timer_vfile);
+	list_add_tail(&timer->next_stat, &newclock->timerq);
+	newclock->nrtimers++;
+	xnvfile_touch(&newclock->timer_vfile);
+	timer->tracker = newclock;
+}
+
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xntimer_switch_tracking(timer, newclock);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_switch_tracking);
+
+#else
+
+static inline
+void __xntimer_switch_tracking(struct xntimer *timer,
+			       struct xnclock *newclock)
+{ }
+
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/**
+ * @brief Set the reference clock of a timer.
+ *
+ * This service changes the reference clock pacing a timer. If the
+ * clock timers are tracked, the tracking information is updated too.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param newclock The address of a valid clock descriptor.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock)
+{
+	atomic_only();
+
+	if (timer->clock != newclock) {
+		xntimer_stop(timer);
+		timer->clock = newclock;
+		/*
+		 * Since the timer was stopped, we can wait until it
+		 * is restarted for fixing its CPU affinity.
+		 */
+		__xntimer_switch_tracking(timer, newclock);
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_EXTCLOCK */
+
+/**
+ * @fn void xntimer_destroy(struct xntimer *timer)
+ *
+ * @brief Release a timer object.
+ *
+ * Destroys a timer. After it has been destroyed, all resources
+ * associated with the timer have been released. The timer is
+ * automatically deactivated before deletion if active on entry.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @coretags{unrestricted}
+ */
+void xntimer_destroy(struct xntimer *timer)
+{
+	struct xnclock *clock __maybe_unused = xntimer_clock(timer);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_stop(timer);
+	timer->status |= XNTIMER_KILLED;
+	timer->sched = NULL;
+#ifdef CONFIG_XENO_OPT_STATS
+	list_del(&timer->next_stat);
+	clock->nrtimers--;
+	xnvfile_touch(&clock->timer_vfile);
+#endif /* CONFIG_XENO_OPT_STATS */
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_destroy);
+
+#ifdef CONFIG_SMP
+
+/**
+ * Migrate a timer.
+ *
+ * This call migrates a timer to another cpu. In order to avoid
+ * pathological cases, it must be called from the CPU to which @a
+ * timer is currently attached.
+ *
+ * @param timer The address of the timer object to be migrated.
+ *
+ * @param sched The address of the destination per-CPU scheduler
+ * slot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off, sched != timer->sched */
+	struct xnclock *clock;
+	xntimerq_t *q;
+
+	trace_cobalt_timer_migrate(timer, xnsched_cpu(sched));
+
+	/*
+	 * This assertion triggers when the timer is migrated to a CPU
+	 * for which we do not expect any clock events/IRQs from the
+	 * associated clock device. If so, the timer would never fire
+	 * since clock ticks would never happen on that CPU.
+	 */
+	XENO_WARN_ON_SMP(COBALT,
+			 !cpumask_empty(&xntimer_clock(timer)->affinity) &&
+			 !cpumask_test_cpu(xnsched_cpu(sched),
+					   &xntimer_clock(timer)->affinity));
+
+	if (timer->status & XNTIMER_RUNNING) {
+		xntimer_stop(timer);
+		timer->sched = sched;
+		clock = xntimer_clock(timer);
+		q = xntimer_percpu_queue(timer);
+		xntimer_enqueue(timer, q);
+		if (xntimer_heading_p(timer))
+			xnclock_remote_shot(clock, sched);
+	} else
+		timer->sched = sched;
+}
+EXPORT_SYMBOL_GPL(__xntimer_migrate);
+
+static inline int get_clock_cpu(struct xnclock *clock, int cpu)
+{
+	/*
+	 * Check a CPU number against the possible set of CPUs
+	 * receiving events from the underlying clock device. If the
+	 * suggested CPU does not receive events from this device,
+	 * return the first one which does instead.
+	 *
+	 * A global clock device with no particular IRQ affinity may
+	 * tick on any CPU, but timers should always be queued on
+	 * CPU0.
+	 *
+	 * NOTE: we have scheduler slots initialized for all online
+	 * CPUs, we can program and receive clock ticks on any of
+	 * them. So there is no point in restricting the valid CPU set
+	 * to cobalt_cpu_affinity, which specifically refers to the
+	 * set of CPUs which may run real-time threads. Although
+	 * receiving a clock tick for waking up a thread living on a
+	 * remote CPU is not optimal since this involves IPI-signaled
+	 * rescheds, this is still a valid case.
+	 */
+	if (cpumask_empty(&clock->affinity))
+		return 0;
+
+	if (cpumask_test_cpu(cpu, &clock->affinity))
+		return cpu;
+	
+	return cpumask_first(&clock->affinity);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	struct xnclock *clock = xntimer_clock(timer);
+	int cpu;
+
+	/*
+	 * Figure out which CPU is best suited for managing this
+	 * timer, preferably picking xnsched_cpu(sched) if the ticking
+	 * device moving the timer clock beats on that CPU. Otherwise,
+	 * pick the first CPU from the clock affinity mask if set. If
+	 * not, the timer is backed by a global device with no
+	 * particular IRQ affinity, so it should always be queued to
+	 * CPU0.
+	 */
+	cpu = 0;
+	if (!cpumask_empty(&clock->affinity))
+		cpu = get_clock_cpu(clock, xnsched_cpu(sched));
+
+	xntimer_migrate(timer, xnsched_struct(cpu));
+}
+EXPORT_SYMBOL_GPL(__xntimer_set_affinity);
+
+#endif /* CONFIG_SMP */
+
+/**
+ * Get the count of overruns for the last tick.
+ *
+ * This service returns the count of pending overruns for the last
+ * tick of a given timer, as measured by the difference between the
+ * expected expiry date of the timer and the date @a now passed as
+ * argument.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param waiter The thread for which the overrun count is being
+ * collected.
+ *
+ * @param now current date (as
+ * xnclock_read_raw(xntimer_clock(timer)))
+ *
+ * @return the number of overruns of @a timer at date @a now
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now)
+{
+	xnticks_t period = timer->interval;
+	unsigned long long overruns = 0;
+	xnsticks_t delta;
+	xntimerq_t *q;
+
+	atomic_only();
+
+	delta = now - xntimer_pexpect(timer);
+	if (unlikely(delta >= (xnsticks_t) period)) {
+		period = timer->interval_ns;
+		delta = xnclock_ticks_to_ns(xntimer_clock(timer), delta);
+		overruns = xnarch_div64(delta, period);
+		timer->pexpect_ticks += overruns;
+		if (xntimer_running_p(timer)) {
+			XENO_BUG_ON(COBALT, (timer->status &
+				    (XNTIMER_DEQUEUED|XNTIMER_PERIODIC))
+				    != XNTIMER_PERIODIC);
+				q = xntimer_percpu_queue(timer);
+			xntimer_dequeue(timer, q);
+			while (xntimerh_date(&timer->aplink) < now) {
+				timer->periodic_ticks++;
+				xntimer_update_date(timer);
+			}
+			xntimer_enqueue_and_program(timer, q);
+		}
+	}
+
+	timer->pexpect_ticks++;
+
+	/* Hide overruns due to the most recent ptracing session. */
+	if (xnthread_test_localinfo(waiter, XNHICCUP))
+		return 0;
+
+	return overruns;
+}
+EXPORT_SYMBOL_GPL(xntimer_get_overruns);
+
+char *xntimer_format_time(xnticks_t ns, char *buf, size_t bufsz)
+{
+	unsigned long ms, us, rem;
+	int len = (int)bufsz;
+	char *p = buf;
+	xnticks_t sec;
+
+	if (ns == 0 && bufsz > 1) {
+		strcpy(buf, "-");
+		return buf;
+	}
+
+	sec = xnclock_divrem_billion(ns, &rem);
+	us = rem / 1000;
+	ms = us / 1000;
+	us %= 1000;
+
+	if (sec) {
+		p += ksformat(p, bufsz, "%Lus", sec);
+		len = bufsz - (p - buf);
+	}
+
+	if (len > 0 && (ms || (sec && us))) {
+		p += ksformat(p, bufsz - (p - buf), "%lums", ms);
+		len = bufsz - (p - buf);
+	}
+
+	if (len > 0 && us)
+		p += ksformat(p, bufsz - (p - buf), "%luus", us);
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(xntimer_format_time);
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+static inline bool xntimerh_is_lt(xntimerh_t *left, xntimerh_t *right)
+{
+	return left->date < right->date
+		|| (left->date == right->date && left->prio > right->prio);
+}
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder)
+{
+	struct rb_node **new = &q->root.rb_node, *parent = NULL;
+
+	if (!q->head)
+		q->head = holder;
+	else if (xntimerh_is_lt(holder, q->head)) {
+		parent = &q->head->link;
+		new = &parent->rb_left;
+		q->head = holder;
+	} else while (*new) {
+		xntimerh_t *i = container_of(*new, xntimerh_t, link);
+
+		parent = *new;
+		if (xntimerh_is_lt(holder, i))
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&holder->link, parent, new);
+	rb_insert_color(&holder->link, &q->root);
+}
+#endif
+
+/** @} */
+++ linux-patched/kernel/Makefile	2022-03-21 12:58:28.219900370 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:163 @
 	$(call cmd,genikh)
 
 clean-files := kheaders_data.tar.xz kheaders.md5
--- linux/drivers/xenomai/spi/spi-device.h	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENOMAI) += xenomai/
+++ linux-patched/drivers/xenomai/spi/spi-device.h	2022-03-21 12:58:31.487868503 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SPI_DEVICE_H
+#define _RTDM_SPI_DEVICE_H
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/spi.h>
+
+struct class;
+struct rtdm_spi_master;
+
+struct rtdm_spi_remote_slave {
+	u8 chip_select;
+	int cs_gpio;
+	struct gpio_desc *cs_gpiod;
+	struct rtdm_device dev;
+	struct list_head next;
+	struct rtdm_spi_config config;
+	struct rtdm_spi_master *master;
+	atomic_t mmap_refs;
+	struct mutex ctl_lock;
+};
+
+static inline struct device *
+slave_to_kdev(struct rtdm_spi_remote_slave *slave)
+{
+	return rtdm_dev_to_kdev(&slave->dev);
+}
+
+int rtdm_spi_add_remote_slave(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_master *spim,
+			      struct spi_device *spi);
+
+void rtdm_spi_remove_remote_slave(struct rtdm_spi_remote_slave *slave);
+
+#endif /* !_RTDM_SPI_DEVICE_H */
+++ linux-patched/drivers/xenomai/spi/Kconfig	2022-03-21 12:58:31.480868571 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-device.c	1970-01-01 01:00:00.000000000 +0100
+menu "Real-time SPI master drivers"
+
+config XENO_DRIVERS_SPI
+       depends on SPI
+       tristate
+
+config XENO_DRIVERS_SPI_BCM2835
+	depends on ARCH_BCM2708 || ARCH_BCM2835
+	select XENO_DRIVERS_SPI
+	tristate "Support for BCM2835 SPI"
+	help
+
+	Enables support for the SPI0 controller available from
+	Broadcom's BCM2835 SoC.
+
+config XENO_DRIVERS_SPI_SUN6I
+	depends on MACH_SUN6I || MACH_SUN8I
+	select XENO_DRIVERS_SPI
+	tristate "Support for A31/H3 SoC SPI"
+	help
+
+	Enables support for the SPI controller available from
+	Allwinner's A31, H3 SoCs.
+
+config XENO_DRIVERS_SPI_OMAP2_MCSPI_RT
+	tristate "McSPI rt-driver for OMAP"
+	depends on HAS_DMA
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
+	select XENO_DRIVERS_SPI
+	help
+
+	SPI real-time master controller for OMAP24XX and later Multichannel SPI
+	(McSPI) modules.
+
+config XENO_DRIVERS_SPI_DEBUG
+       depends on XENO_DRIVERS_SPI
+       bool "Enable SPI core debugging features"
+       
+endmenu
+++ linux-patched/drivers/xenomai/spi/spi-device.c	2022-03-21 12:58:31.473868639 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-omap2-mcspi-rt.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include "spi-master.h"
+
+int rtdm_spi_add_remote_slave(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_master *master,
+			      struct spi_device *spi)
+{
+	struct spi_master *kmaster = master->kmaster;
+	struct rtdm_device *dev;
+	rtdm_lockctx_t c;
+	int ret;
+
+	memset(slave, 0, sizeof(*slave));
+	slave->chip_select = spi->chip_select;
+	slave->config.bits_per_word = spi->bits_per_word;
+	slave->config.speed_hz = spi->max_speed_hz;
+	slave->config.mode = spi->mode;
+	slave->master = master;
+	
+	dev = &slave->dev;
+	dev->driver = &master->driver;
+	dev->label = kasprintf(GFP_KERNEL, "%s/slave%d.%%d",
+			       dev_name(&kmaster->dev),
+			       kmaster->bus_num);
+	if (dev->label == NULL)
+		return -ENOMEM;
+
+	if (gpio_is_valid(spi->cs_gpio))
+		slave->cs_gpio = spi->cs_gpio;
+	else {
+		slave->cs_gpio = -ENOENT;
+		if (kmaster->cs_gpios)
+			slave->cs_gpio = kmaster->cs_gpios[spi->chip_select];
+	}
+
+	if (gpio_is_valid(slave->cs_gpio)) {
+		ret = gpio_request(slave->cs_gpio, dev->label);
+		if (ret)
+			goto fail;
+		slave->cs_gpiod = gpio_to_desc(slave->cs_gpio);
+		if (slave->cs_gpiod == NULL)
+			goto fail;
+	}
+	
+	mutex_init(&slave->ctl_lock);
+
+	dev->device_data = master;
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		goto fail;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+	list_add_tail(&slave->next, &master->slaves);
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	return 0;
+fail:
+	kfree(dev->label);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_add_remote_slave);
+
+void rtdm_spi_remove_remote_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_device *dev;
+	rtdm_lockctx_t c;
+	
+	if (gpio_is_valid(slave->cs_gpio))
+		gpio_free(slave->cs_gpio);
+
+	mutex_destroy(&slave->ctl_lock);
+	rtdm_lock_get_irqsave(&master->lock, c);
+	list_del(&slave->next);
+	rtdm_lock_put_irqrestore(&master->lock, c);
+	dev = &slave->dev;
+	rtdm_dev_unregister(dev);
+	kfree(dev->label);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_remove_remote_slave);
+
+static int spi_device_probe(struct spi_device *spi)
+{
+	struct rtdm_spi_remote_slave *slave;
+	struct rtdm_spi_master *master;
+	int ret;
+
+	/*
+	 * Chicken and egg issue: we want the RTDM device class name
+	 * to duplicate the SPI master name, but that information is
+	 * only available after spi_register_master() has returned. We
+	 * solve this by initializing the RTDM driver descriptor on
+	 * the fly when the first SPI device on the bus is advertised
+	 * on behalf of spi_register_master().
+	 *
+	 * NOTE: the driver core guarantees serialization.
+	 */
+	master = spi_master_get_devdata(spi->master);
+	if (master->devclass == NULL) {
+		ret = __rtdm_spi_setup_driver(master);
+		if (ret)
+			return ret;
+	}
+
+	slave = master->ops->attach_slave(master, spi);
+	if (IS_ERR(slave))
+		return PTR_ERR(slave);
+
+	spi_set_drvdata(spi, slave);
+
+	return 0;
+}
+
+static int spi_device_remove(struct spi_device *spi)
+{
+	struct rtdm_spi_remote_slave *slave = spi_get_drvdata(spi);
+
+	slave->master->ops->detach_slave(slave);
+
+	return 0;
+}
+
+static const struct of_device_id spi_device_match[] = {
+	{
+		.compatible = "rtdm-spidev",
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_device_match);
+
+static struct spi_driver spi_device_driver = {
+	.driver = {
+		.name =	"rtdm_spi_device",
+		.owner = THIS_MODULE,
+		.of_match_table = spi_device_match,
+	},
+	.probe	= spi_device_probe,
+	.remove	= spi_device_remove,
+};
+
+static int __init spi_device_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&spi_device_driver);
+
+	return ret;
+}
+module_init(spi_device_init);
+
+static void __exit spi_device_exit(void)
+{
+	spi_unregister_driver(&spi_device_driver);
+
+}
+module_exit(spi_device_exit);
+++ linux-patched/drivers/xenomai/spi/spi-omap2-mcspi-rt.c	2022-03-21 12:58:31.465868717 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/Makefile	1970-01-01 01:00:00.000000000 +0100
+/**
+ * I/O handling lifted from drivers/spi/spi-omap2-mcspi.c:
+ * Copyright (C) 2019 Laurentiu-Cristian Duca
+ *  <laurentiu [dot] duca [at] gmail [dot] com>
+ * RTDM integration by:
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/gcd.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_OMAP2_MCSPI  3
+
+#define OMAP4_MCSPI_REG_OFFSET 0x100
+#define OMAP2_MCSPI_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+#define OMAP2_MCSPI_MAX_FREQ		48000000
+#define OMAP2_MCSPI_DRIVER_MAX_FREQ	40000000
+#define OMAP2_MCSPI_MAX_DIVIDER		4096
+#define OMAP2_MCSPI_MAX_FIFODEPTH	64
+#define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
+#define SPI_AUTOSUSPEND_TIMEOUT		2000
+#define PM_NEGATIVE_DELAY			-2000
+
+#define OMAP2_MCSPI_REVISION		0x00
+#define OMAP2_MCSPI_SYSCONFIG		0x10
+#define OMAP2_MCSPI_SYSSTATUS		0x14
+#define OMAP2_MCSPI_IRQSTATUS		0x18
+#define OMAP2_MCSPI_IRQENABLE		0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE	0x20
+#define OMAP2_MCSPI_SYST		0x24
+#define OMAP2_MCSPI_MODULCTRL		0x28
+#define OMAP2_MCSPI_XFERLEVEL		0x7c
+
+/* per-channel (chip select) banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHANNELBANK_SIZE	0x14
+#define OMAP2_MCSPI_CHCONF0		0x2c
+#define OMAP2_MCSPI_CHSTAT0		0x30
+#define OMAP2_MCSPI_CHCTRL0		0x34
+#define OMAP2_MCSPI_TX0			0x38
+#define OMAP2_MCSPI_RX0			0x3c
+
+/* per-register bitmasks: */
+#define OMAP2_MCSPI_IRQSTATUS_EOW		BIT(17)
+#define OMAP2_MCSPI_IRQSTATUS_RX1_FULL  BIT(6)
+#define OMAP2_MCSPI_IRQSTATUS_TX1_EMPTY	BIT(4)
+#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL  BIT(2)
+#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY	BIT(0)
+
+#define OMAP2_MCSPI_IRQENABLE_EOW		BIT(17)
+#define OMAP2_MCSPI_IRQENABLE_RX1_FULL  BIT(6)
+#define OMAP2_MCSPI_IRQENABLE_TX1_EMPTY	BIT(4)
+#define OMAP2_MCSPI_IRQENABLE_RX0_FULL  BIT(2)
+#define OMAP2_MCSPI_IRQENABLE_TX0_EMPTY	BIT(0)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
+#define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
+#define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
+
+#define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
+#define OMAP2_MCSPI_CHCONF_POL		BIT(1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
+#define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
+#define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
+#define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
+#define OMAP2_MCSPI_CHCONF_IS		BIT(18)
+#define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
+#define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
+#define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
+#define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
+
+#define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
+#define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
+#define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
+#define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
+
+#define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
+
+#define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
+
+#define OMAP2_MCSPI_SYSCONFIG_CLOCKACTIVITY_MASK	(0x3 << 8)
+#define OMAP2_MCSPI_SYSCONFIG_SIDLEMODE_MASK		(0x3 << 3)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET				BIT(1)
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE				BIT(0)
+
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
+
+/* current version supports max 2 CS per module */
+#define OMAP2_MCSPI_CS_N	2
+
+#define MCSPI_PINDIR_D0_IN_D1_OUT	0
+#define MCSPI_PINDIR_D0_OUT_D1_IN	1
+
+struct omap2_mcspi_platform_config {
+	unsigned short	num_cs;
+	unsigned int regs_offset;
+	unsigned int pin_dir:1;
+};
+
+struct omap2_mcspi_cs {
+	/* CS channel */
+	void __iomem		*regs;
+	unsigned long		phys;
+	u8 chosen;
+};
+
+struct spi_master_omap2_mcspi {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	unsigned long phys;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	int fifo_depth;
+	rtdm_event_t transfer_done;
+	rtdm_lock_t lock;
+	unsigned int pin_dir:1;
+	struct omap2_mcspi_cs cs[OMAP2_MCSPI_CS_N];
+	/* logging */
+	int n_rx_full;
+	int n_tx_empty;
+	int n_interrupts;
+};
+
+struct spi_slave_omap2_mcspi {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_omap2_mcspi *
+to_slave_omap2_mcspi(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_omap2_mcspi, slave);
+}
+
+static inline struct spi_master_omap2_mcspi *
+to_master_omap2_mcspi(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master,
+			struct spi_master_omap2_mcspi, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->kmaster->dev;
+}
+
+static inline u32 mcspi_rd_reg(struct spi_master_omap2_mcspi *spim,
+			     unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void mcspi_wr_reg(struct spi_master_omap2_mcspi *spim,
+			      unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static inline u32
+mcspi_rd_cs_reg(struct spi_master_omap2_mcspi *spim,
+				int cs_id, unsigned int reg)
+{
+	return readl(spim->cs[cs_id].regs + reg);
+}
+
+static inline void
+mcspi_wr_cs_reg(struct spi_master_omap2_mcspi *spim, int cs_id,
+				unsigned int reg, u32 val)
+{
+	writel(val, spim->cs[cs_id].regs + reg);
+}
+
+static void omap2_mcspi_init_hw(struct spi_master_omap2_mcspi *spim)
+{
+	u32 l;
+
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSCONFIG);
+	/* CLOCKACTIVITY = 3h: OCP and Functional clocks are maintained */
+	l |= OMAP2_MCSPI_SYSCONFIG_CLOCKACTIVITY_MASK;
+	/* SIDLEMODE = 1h: ignore idle requests */
+	l &= ~OMAP2_MCSPI_SYSCONFIG_SIDLEMODE_MASK;
+	l |= 0x1 << 3;
+	/* AUTOIDLE=0: OCP clock is free-running */
+	l &= ~OMAP2_MCSPI_SYSCONFIG_AUTOIDLE;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_SYSCONFIG, l);
+
+	/* Initialise the hardware with the default polarities (only omap2) */
+	mcspi_wr_reg(spim, OMAP2_MCSPI_WAKEUPENABLE,
+				 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+
+	/* Setup single-channel master mode */
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_MODULCTRL);
+	/* MS=0 => spi master */
+	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
+	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_MODULCTRL, l);
+}
+
+static void omap2_mcspi_reset_hw(struct spi_master_omap2_mcspi *spim)
+{
+	u32 l;
+
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSCONFIG);
+	l |= OMAP2_MCSPI_SYSCONFIG_SOFTRESET;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_SYSCONFIG, l);
+	/* wait until reset is done */
+	do {
+		l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSSTATUS);
+		cpu_relax();
+	} while (!(l & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
+}
+
+static void
+omap2_mcspi_chip_select(struct rtdm_spi_remote_slave *slave, bool active)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 l;
+
+	/* FORCE: manual SPIEN assertion to keep SPIEN active */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	/* "active" is the logical state, not the impedance level. */
+	if (active)
+		l |= OMAP2_MCSPI_CHCONF_FORCE;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, l);
+	/* Flash post-writes */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+}
+
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+	u32 div;
+
+	for (div = 0; div < 15; div++)
+		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+			return div;
+
+	return 15;
+}
+
+/* channel 0 enable/disable */
+static void
+omap2_mcspi_channel_enable(struct rtdm_spi_remote_slave *slave, int enable)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 l;
+
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+	if (enable)
+		l |= OMAP2_MCSPI_CHCTRL_EN;
+	else
+		l &= ~OMAP2_MCSPI_CHCTRL_EN;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0, l);
+	/* Flash post-writes */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 l = 0, clkd = 0, div = 1, extclk = 0, clkg = 0, word_len;
+	u32 speed_hz = OMAP2_MCSPI_MAX_FREQ;
+	u32 chctrl0;
+
+	/* The configuration parameters can be loaded in MCSPI_CH(i)CONF
+	 * only when the channel is disabled
+	 */
+	omap2_mcspi_channel_enable(slave, 0);
+
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+
+	/* Set clock frequency. */
+	speed_hz = (u32) config->speed_hz;
+	if (speed_hz > OMAP2_MCSPI_DRIVER_MAX_FREQ) {
+		dev_warn(slave_to_kdev(slave),
+			"maximum clock frequency is %d",
+			OMAP2_MCSPI_DRIVER_MAX_FREQ);
+	}
+	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_DRIVER_MAX_FREQ);
+	if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+		clkd = omap2_mcspi_calc_divisor(speed_hz);
+		speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+		clkg = 0;
+	} else {
+		div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+		speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+		clkd = (div - 1) & 0xf;
+		extclk = (div - 1) >> 4;
+		clkg = OMAP2_MCSPI_CHCONF_CLKG;
+	}
+	/* set clock divisor */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+	l |= clkd << 2;
+	/* set clock granularity */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+	l |= clkg;
+	if (clkg) {
+		chctrl0 = mcspi_rd_cs_reg(spim,
+			slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+		chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+		chctrl0 |= extclk << 8;
+		mcspi_wr_cs_reg(spim,
+			slave->chip_select, OMAP2_MCSPI_CHCTRL0, chctrl0);
+	}
+
+	if (spim->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+		l &= ~OMAP2_MCSPI_CHCONF_IS;
+		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+		l |= OMAP2_MCSPI_CHCONF_DPE0;
+	} else {
+		l |= OMAP2_MCSPI_CHCONF_IS;
+		l |= OMAP2_MCSPI_CHCONF_DPE1;
+		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+	}
+
+	/* wordlength */
+	word_len = config->bits_per_word;
+	/* TODO: allow word_len != 8 */
+	if (word_len != 8) {
+		dev_err(slave_to_kdev(slave), "word_len(%d) != 8.\n",
+				word_len);
+		return -EIO;
+	}
+	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+	l |= (word_len - 1) << 7;
+
+	/* set chipselect polarity; manage with FORCE */
+	if (!(config->mode & SPI_CS_HIGH))
+		/* CS active-low */
+		l |= OMAP2_MCSPI_CHCONF_EPOL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+	/* set SPI mode 0..3 */
+	if (config->mode & SPI_CPOL)
+		l |= OMAP2_MCSPI_CHCONF_POL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_POL;
+	if (config->mode & SPI_CPHA)
+		l |= OMAP2_MCSPI_CHCONF_PHA;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, l);
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+
+	omap2_mcspi_chip_select(slave, 0);
+
+	return 0;
+}
+
+static void mcspi_rd_fifo(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+
+	/* Receiver register must be read to remove source of interrupt */
+	for (i = 0; i < spim->fifo_depth; i++) {
+		byte = mcspi_rd_cs_reg(spim, cs_id, OMAP2_MCSPI_RX0);
+		if (spim->rx_buf && (spim->rx_len > 0))
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static void mcspi_wr_fifo(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+
+	/* load transmitter register to remove the source of the interrupt */
+	for (i = 0; i < spim->fifo_depth; i++) {
+		if (spim->tx_len <= 0)
+			byte = 0;
+		else
+			byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_TX0, byte);
+		spim->tx_len--;
+	}
+}
+
+static void mcspi_wr_fifo_bh(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+	rtdm_lockctx_t c;
+
+	rtdm_lock_get_irqsave(&spim->lock, c);
+
+	for (i = 0; i < spim->fifo_depth; i++) {
+		if (spim->tx_len <= 0)
+			byte = 0;
+		else
+			byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_TX0, byte);
+		spim->tx_len--;
+	}
+
+	rtdm_lock_put_irqrestore(&spim->lock, c);
+}
+
+static int omap2_mcspi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_omap2_mcspi *spim;
+	u32 l;
+	int i, cs_id = 0;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_omap2_mcspi);
+	rtdm_lock_get(&spim->lock);
+
+	for (i = 0; i < OMAP2_MCSPI_CS_N; i++)
+		if (spim->cs[i].chosen) {
+			cs_id = i;
+			break;
+		}
+
+	spim->n_interrupts++;
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_IRQSTATUS);
+
+	if ((l & OMAP2_MCSPI_IRQSTATUS_RX0_FULL) ||
+	   (l & OMAP2_MCSPI_IRQSTATUS_RX1_FULL)) {
+		mcspi_rd_fifo(spim, cs_id);
+		spim->n_rx_full++;
+	}
+	if ((l & OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) ||
+		(l & OMAP2_MCSPI_IRQSTATUS_TX1_EMPTY)) {
+		if (spim->tx_len > 0)
+			mcspi_wr_fifo(spim, cs_id);
+		spim->n_tx_empty++;
+	}
+
+	/* write 1 to OMAP2_MCSPI_IRQSTATUS field to reset it */
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQSTATUS, l);
+
+	if ((spim->tx_len <= 0) && (spim->rx_len <= 0)) {
+		/* disable interrupts */
+		mcspi_wr_reg(spim, OMAP2_MCSPI_IRQENABLE, 0);
+
+		rtdm_event_signal(&spim->transfer_done);
+	}
+
+	rtdm_lock_put(&spim->lock);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int omap2_mcspi_disable_fifo(struct rtdm_spi_remote_slave *slave,
+							int cs_id)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 chconf;
+
+	chconf = mcspi_rd_cs_reg(spim, cs_id, OMAP2_MCSPI_CHCONF0);
+	chconf &= ~(OMAP2_MCSPI_CHCONF_FFER | OMAP2_MCSPI_CHCONF_FFET);
+	mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_CHCONF0, chconf);
+	return 0;
+}
+
+static int omap2_mcspi_set_fifo(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	unsigned int wcnt;
+	int max_fifo_depth, fifo_depth, bytes_per_word;
+	u32 chconf, xferlevel;
+
+	chconf = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	bytes_per_word = 1;
+
+	max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+	if (spim->tx_len < max_fifo_depth) {
+		fifo_depth = spim->tx_len;
+		wcnt = spim->tx_len / bytes_per_word;
+	} else {
+		fifo_depth = max_fifo_depth;
+		wcnt = max_fifo_depth * (spim->tx_len / max_fifo_depth)
+			/ bytes_per_word;
+	}
+	if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) {
+		dev_err(slave_to_kdev(slave),
+			"%s: wcnt=%d: too many bytes in a transfer.\n",
+			__func__, wcnt);
+		return -EINVAL;
+	}
+
+	chconf |= OMAP2_MCSPI_CHCONF_FFER;
+	chconf |= OMAP2_MCSPI_CHCONF_FFET;
+
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, chconf);
+	spim->fifo_depth = fifo_depth;
+
+	xferlevel = wcnt << 16;
+	xferlevel |= (fifo_depth - 1) << 8;
+	xferlevel |= fifo_depth - 1;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+
+	return 0;
+}
+
+
+static int do_transfer_irq_bh(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 chconf, l;
+	int ret;
+	int i;
+
+	/* configure to send and receive */
+	chconf = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, chconf);
+
+	/* fifo can be enabled on a single channel */
+	if (slave->chip_select == 0) {
+		if (spim->cs[1].chosen)
+			omap2_mcspi_disable_fifo(slave, 1);
+	} else {
+		if (spim->cs[0].chosen)
+			omap2_mcspi_disable_fifo(slave, 0);
+	}
+	ret = omap2_mcspi_set_fifo(slave);
+	if (ret)
+		return ret;
+
+	omap2_mcspi_channel_enable(slave, 1);
+
+	/* Set slave->chip_select as chosen */
+	for (i = 0; i < OMAP2_MCSPI_CS_N; i++)
+		if (i == slave->chip_select)
+			spim->cs[i].chosen = 1;
+		else
+			spim->cs[i].chosen = 0;
+
+	/* The interrupt status bit should always be reset
+	 * after the channel is enabled
+	 * and before the event is enabled as an interrupt source.
+	 */
+	/* write 1 to OMAP2_MCSPI_IRQSTATUS field to reset it */
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_IRQSTATUS);
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQSTATUS, l);
+
+	spim->n_interrupts = 0;
+	spim->n_rx_full = 0;
+	spim->n_tx_empty = 0;
+
+	/* Enable interrupts last. */
+	/* support only two channels */
+	if (slave->chip_select == 0)
+		l = OMAP2_MCSPI_IRQENABLE_TX0_EMPTY |
+			OMAP2_MCSPI_IRQENABLE_RX0_FULL;
+	else
+		l = OMAP2_MCSPI_IRQENABLE_TX1_EMPTY |
+			OMAP2_MCSPI_IRQENABLE_RX1_FULL;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQENABLE, l);
+
+	/* TX_EMPTY will be raised only after data is transfered */
+	mcspi_wr_fifo_bh(spim, slave->chip_select);
+
+	/* wait for transfer completion */
+	ret = rtdm_event_wait(&spim->transfer_done);
+	omap2_mcspi_channel_enable(slave, 0);
+	if (ret)
+		return ret;
+
+	/* spim->tx_len and spim->rx_len should be 0 */
+	if (spim->tx_len || spim->rx_len)
+		return -EIO;
+	return 0;
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int len, first_size, last_size, ret;
+
+	len = spim->tx_len;
+
+	if (len < (OMAP2_MCSPI_MAX_FIFODEPTH / 2))
+		goto label_last;
+
+	first_size = (OMAP2_MCSPI_MAX_FIFODEPTH / 2) *
+		(len / (OMAP2_MCSPI_MAX_FIFODEPTH / 2));
+	spim->tx_len = first_size;
+	spim->rx_len = first_size;
+	ret = do_transfer_irq_bh(slave);
+	if (ret)
+		return ret;
+
+label_last:
+	last_size = len % (OMAP2_MCSPI_MAX_FIFODEPTH / 2);
+	if (last_size == 0)
+		return ret;
+	spim->tx_len = last_size;
+	spim->rx_len = last_size;
+	ret = do_transfer_irq_bh(slave);
+	return ret;
+}
+
+static int omap2_mcspi_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	if (mapped_data->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+
+	spim->tx_len = mapped_data->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = mapped_data->io_virt + spim->rx_len;
+	spim->rx_buf = mapped_data->io_virt;
+
+	ret = do_transfer_irq(slave);
+
+	return ret ? : 0;
+}
+
+static int omap2_mcspi_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+								 int len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	if ((mapped_data->io_len == 0) ||
+		(len <= 0) || (len > (mapped_data->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = mapped_data->io_virt + mapped_data->io_len / 2;
+	spim->rx_buf = mapped_data->io_virt;
+
+	ret = do_transfer_irq(slave);
+
+
+	return ret ? : 0;
+}
+
+static ssize_t omap2_mcspi_read(struct rtdm_spi_remote_slave *slave,
+			    void *rx, size_t len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int ret;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	ret = do_transfer_irq(slave);
+
+	return  ret ? : len;
+}
+
+static ssize_t omap2_mcspi_write(struct rtdm_spi_remote_slave *slave,
+			     const void *tx, size_t len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int ret;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	ret = do_transfer_irq(slave);
+
+	return  ret ? : len;
+}
+
+static int set_iobufs(struct spi_slave_omap2_mcspi *mapped_data, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == mapped_data->io_len)
+		return 0;
+
+	if (mapped_data->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	/*
+	 * Since we need the I/O buffers to be set for starting a
+	 * transfer, there is no need for serializing this routine and
+	 * transfer_iobufs(), provided io_len is set last.
+	 *
+	 * NOTE: We don't need coherent memory until we actually get
+	 * DMA transfers working, this code is a bit ahead of
+	 * schedule.
+	 *
+	 * Revisit: this assumes DMA mask is 4Gb.
+	 */
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	mapped_data->io_dma = dma;
+	mapped_data->io_virt = p;
+	/*
+	 * May race with transfer_iobufs(), must be assigned after all
+	 * the rest is set up, enforcing a membar.
+	 */
+	smp_mb();
+	mapped_data->io_len = len;
+
+	return 0;
+}
+
+static int omap2_mcspi_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	ret = set_iobufs(mapped_data, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = mapped_data->io_len / 2;
+	p->map_len = mapped_data->io_len;
+
+	return 0;
+}
+
+static int omap2_mcspi_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			       struct vm_area_struct *vma)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	/*
+	 * dma_alloc_coherent() delivers non-cached memory, make sure
+	 * to return consistent mapping attributes. Typically, mixing
+	 * memory attributes across address spaces referring to the
+	 * same physical area is architecturally wrong on ARM.
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+
+	return rtdm_mmap_kmem(vma, mapped_data->io_virt);
+}
+
+static void omap2_mcspi_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	dma_free_coherent(NULL, mapped_data->io_len,
+			  mapped_data->io_virt, mapped_data->io_dma);
+	mapped_data->io_len = 0;
+}
+
+static struct rtdm_spi_remote_slave *
+omap2_mcspi_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_master_omap2_mcspi *spim;
+	struct spi_slave_omap2_mcspi *mapped_data;
+	int ret;
+
+	if ((spi->chip_select >= OMAP2_MCSPI_CS_N) || (OMAP2_MCSPI_CS_N > 2)) {
+		/* Error in the case of native CS requested with CS > 1 */
+		dev_err(&spi->dev, "%s: only two native CS per spi module are supported\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mapped_data = kzalloc(sizeof(*mapped_data), GFP_KERNEL);
+	if (mapped_data == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&mapped_data->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev, "%s: failed to attach slave\n", __func__);
+		kfree(mapped_data);
+		return ERR_PTR(ret);
+	}
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+	spim->cs[spi->chip_select].chosen = 0;
+	spim->cs[spi->chip_select].regs = spim->regs +
+		spi->chip_select * OMAP2_MCSPI_CHANNELBANK_SIZE;
+	spim->cs[spi->chip_select].phys = spim->phys +
+		spi->chip_select * OMAP2_MCSPI_CHANNELBANK_SIZE;
+
+	return &mapped_data->slave;
+}
+
+static void omap2_mcspi_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+
+	kfree(mapped_data);
+}
+
+static struct rtdm_spi_master_ops omap2_mcspi_master_ops = {
+	.configure = omap2_mcspi_configure,
+	.chip_select = omap2_mcspi_chip_select,
+	.set_iobufs = omap2_mcspi_set_iobufs,
+	.mmap_iobufs = omap2_mcspi_mmap_iobufs,
+	.mmap_release = omap2_mcspi_mmap_release,
+	.transfer_iobufs = omap2_mcspi_transfer_iobufs,
+	.transfer_iobufs_n = omap2_mcspi_transfer_iobufs_n,
+	.write = omap2_mcspi_write,
+	.read = omap2_mcspi_read,
+	.attach_slave = omap2_mcspi_attach_slave,
+	.detach_slave = omap2_mcspi_detach_slave,
+};
+
+static struct omap2_mcspi_platform_config omap2_pdata = {
+	.regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static const struct of_device_id omap_mcspi_of_match[] = {
+	{
+		.compatible = "ti,omap2-mcspi",
+		.data = &omap2_pdata,
+	},
+	{
+		/* beaglebone black */
+		.compatible = "ti,omap4-mcspi",
+		.data = &omap4_pdata,
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+static int omap2_mcspi_probe(struct platform_device *pdev)
+{
+	struct spi_master_omap2_mcspi *spim;
+	struct rtdm_spi_master *master;
+	struct spi_master *kmaster;
+	struct resource *r;
+	int ret, irq;
+	u32 regs_offset = 0;
+	const struct omap2_mcspi_platform_config *pdata;
+	const struct of_device_id *match;
+	u32 num_cs = 1;
+	unsigned int pin_dir = MCSPI_PINDIR_D0_IN_D1_OUT;
+
+	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+	if (match) {
+		pdata = match->data;
+		regs_offset = pdata->regs_offset;
+	} else {
+		dev_err(&pdev->dev, "%s: cannot find a match with device tree\n"
+				"of '%s' or '%s'",
+				__func__,
+				omap_mcspi_of_match[0].compatible,
+				omap_mcspi_of_match[1].compatible);
+		return -ENOENT;
+	}
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+		   struct spi_master_omap2_mcspi, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_OMAP2_MCSPI;
+	master->ops = &omap2_mcspi_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	kmaster = master->kmaster;
+	/* flags understood by this controller driver */
+	kmaster->mode_bits = OMAP2_MCSPI_SPI_MODE_BITS;
+	/* TODO: SPI_BPW_RANGE_MASK(4, 32); */
+	kmaster->bits_per_word_mask = SPI_BPW_MASK(8);
+	of_property_read_u32(pdev->dev.of_node, "ti,spi-num-cs", &num_cs);
+	kmaster->num_chipselect = num_cs;
+	if (of_get_property(pdev->dev.of_node,
+		"ti,pindir-d0-out-d1-in", NULL)) {
+		pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+	}
+
+	kmaster->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+	kmaster->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+	kmaster->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+	rtdm_event_init(&spim->transfer_done, 0);
+	rtdm_lock_init(&spim->lock);
+
+	spim->pin_dir = pin_dir;
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	spim->phys = r->start + regs_offset;
+	spim->regs += regs_offset;
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		dev_err(&pdev->dev, "%s: irq_of_parse_and_map: %d\n",
+				__func__, irq);
+		goto fail;
+	}
+
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       omap2_mcspi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+				__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n", __func__);
+		goto fail_unclk;
+	}
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	/* if delay is negative and the use_autosuspend flag is set
+	 * then runtime suspends are prevented.
+	 */
+	pm_runtime_set_autosuspend_delay(&pdev->dev, PM_NEGATIVE_DELAY);
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "%s: pm_runtime_get_sync error %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	omap2_mcspi_reset_hw(spim);
+	omap2_mcspi_init_hw(spim);
+
+	dev_info(&pdev->dev, "success\n");
+	return 0;
+
+fail_unclk:
+fail:
+	spi_master_put(kmaster);
+
+	return ret;
+}
+
+static int omap2_mcspi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_omap2_mcspi *spim;
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+
+	omap2_mcspi_reset_hw(spim);
+
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	rtdm_irq_free(&spim->irqh);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static struct platform_driver omap2_mcspi_spi_driver = {
+	.driver		= {
+		.name		= "omap2_mcspi_rt",
+		.of_match_table	= omap_mcspi_of_match,
+	},
+	.probe		= omap2_mcspi_probe,
+	.remove		= omap2_mcspi_remove,
+};
+module_platform_driver(omap2_mcspi_spi_driver);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/spi/Makefile	2022-03-21 12:58:31.458868786 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-master.h	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-$(CONFIG_XENO_DRIVERS_SPI_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_XENO_DRIVERS_SPI) += xeno_spi.o
+
+xeno_spi-y := spi-master.o spi-device.o
+
+obj-$(CONFIG_XENO_DRIVERS_SPI_BCM2835) += xeno_spi_bcm2835.o
+obj-$(CONFIG_XENO_DRIVERS_SPI_SUN6I) += xeno_spi_sun6i.o
+obj-$(CONFIG_XENO_DRIVERS_SPI_OMAP2_MCSPI_RT) += xeno_spi_omap2_mcspi_rt.o
+
+xeno_spi_bcm2835-y := spi-bcm2835.o
+xeno_spi_sun6i-y := spi-sun6i.o
+xeno_spi_omap2_mcspi_rt-y := spi-omap2-mcspi-rt.o
+++ linux-patched/drivers/xenomai/spi/spi-master.h	2022-03-21 12:58:31.451868854 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-master.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SPI_MASTER_H
+#define _RTDM_SPI_MASTER_H
+
+#include <rtdm/driver.h>
+#include <rtdm/uapi/spi.h>
+#include "spi-device.h"
+
+struct class;
+struct device_node;
+struct rtdm_spi_master;
+struct spi_master;
+
+struct rtdm_spi_master_ops {
+	int (*open)(struct rtdm_spi_remote_slave *slave);
+	void (*close)(struct rtdm_spi_remote_slave *slave);
+	int (*configure)(struct rtdm_spi_remote_slave *slave);
+	void (*chip_select)(struct rtdm_spi_remote_slave *slave,
+			    bool active);
+	int (*set_iobufs)(struct rtdm_spi_remote_slave *slave,
+			  struct rtdm_spi_iobufs *p);
+	int (*mmap_iobufs)(struct rtdm_spi_remote_slave *slave,
+			   struct vm_area_struct *vma);
+	void (*mmap_release)(struct rtdm_spi_remote_slave *slave);
+	int (*transfer_iobufs)(struct rtdm_spi_remote_slave *slave);
+	int (*transfer_iobufs_n)(struct rtdm_spi_remote_slave *slave, int len);
+	ssize_t (*write)(struct rtdm_spi_remote_slave *slave,
+			 const void *tx, size_t len);
+	ssize_t (*read)(struct rtdm_spi_remote_slave *slave,
+			 void *rx, size_t len);
+	struct rtdm_spi_remote_slave *(*attach_slave)
+		(struct rtdm_spi_master *master,
+			struct spi_device *spi);
+	void (*detach_slave)(struct rtdm_spi_remote_slave *slave);
+};
+
+struct rtdm_spi_master {
+	int subclass;
+	const struct rtdm_spi_master_ops *ops;
+	struct spi_master *kmaster;
+	struct {	/* Internal */
+		struct rtdm_driver driver;
+		struct class *devclass;
+		char *classname;
+		struct list_head slaves;
+		struct list_head next;
+		rtdm_lock_t lock;
+		rtdm_mutex_t bus_lock;
+		struct rtdm_spi_remote_slave *cs;
+	};
+};
+
+#define rtdm_spi_alloc_master(__dev, __type, __mptr)			\
+	__rtdm_spi_alloc_master(__dev, sizeof(__type),			\
+				offsetof(__type, __mptr))		\
+
+struct rtdm_spi_master *
+__rtdm_spi_alloc_master(struct device *dev, size_t size, int off);
+
+int __rtdm_spi_setup_driver(struct rtdm_spi_master *master);
+
+int rtdm_spi_add_master(struct rtdm_spi_master *master);
+
+void rtdm_spi_remove_master(struct rtdm_spi_master *master);
+
+#endif /* !_RTDM_SPI_MASTER_H */
+++ linux-patched/drivers/xenomai/spi/spi-master.c	2022-03-21 12:58:31.443868932 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-sun6i.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include "spi-master.h"
+
+static inline
+struct device *to_kdev(struct rtdm_spi_remote_slave *slave)
+{
+	return rtdm_dev_to_kdev(&slave->dev);
+}
+
+static inline struct rtdm_spi_remote_slave *fd_to_slave(struct rtdm_fd *fd)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+
+	return container_of(dev, struct rtdm_spi_remote_slave, dev);
+}
+
+static int update_slave_config(struct rtdm_spi_remote_slave *slave,
+			       struct rtdm_spi_config *config)
+{
+	struct rtdm_spi_config old_config;
+	struct rtdm_spi_master *master = slave->master;
+	int ret;
+
+	rtdm_mutex_lock(&master->bus_lock);
+
+	old_config = slave->config;
+	slave->config = *config;
+	ret = slave->master->ops->configure(slave);
+	if (ret) {
+		slave->config = old_config;
+		rtdm_mutex_unlock(&master->bus_lock);
+		return ret;
+	}
+
+	rtdm_mutex_unlock(&master->bus_lock);
+	
+	dev_info(to_kdev(slave),
+		 "configured mode %d, %s%s%s%s%u bits/w, %u Hz max\n",
+		 (int) (slave->config.mode & (SPI_CPOL | SPI_CPHA)),
+		 (slave->config.mode & SPI_CS_HIGH) ? "cs_high, " : "",
+		 (slave->config.mode & SPI_LSB_FIRST) ? "lsb, " : "",
+		 (slave->config.mode & SPI_3WIRE) ? "3wire, " : "",
+		 (slave->config.mode & SPI_LOOP) ? "loopback, " : "",
+		 slave->config.bits_per_word,
+		 slave->config.speed_hz);
+	
+	return 0;
+}
+
+static int spi_master_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+
+	if (master->ops->open)
+		return master->ops->open(slave);
+		
+	return 0;
+}
+
+static void spi_master_close(struct rtdm_fd *fd)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+
+	if (master->cs == slave)
+		master->cs = NULL;
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	if (master->ops->close)
+		master->ops->close(slave);
+}
+
+static int do_chip_select(struct rtdm_spi_remote_slave *slave)
+{				/* master->bus_lock held */
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+	int state;
+
+	if (slave->config.speed_hz == 0)
+		return -EINVAL; /* Setup is missing. */
+
+	/* Serialize with spi_master_close() */
+	rtdm_lock_get_irqsave(&master->lock, c);
+	
+	if (master->cs != slave) {
+		if (gpio_is_valid(slave->cs_gpio)) {
+			state = !!(slave->config.mode & SPI_CS_HIGH);
+			gpiod_set_raw_value(slave->cs_gpiod, state);
+		} else
+			master->ops->chip_select(slave, true);
+		master->cs = slave;
+	}
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	return 0;
+}
+
+static void do_chip_deselect(struct rtdm_spi_remote_slave *slave)
+{				/* master->bus_lock held */
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+	int state;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+
+	if (gpio_is_valid(slave->cs_gpio)) {
+		state = !(slave->config.mode & SPI_CS_HIGH);
+		gpiod_set_raw_value(slave->cs_gpiod, state);
+	} else
+		master->ops->chip_select(slave, false);
+
+	master->cs = NULL;
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+}
+
+static int spi_master_ioctl_rt(struct rtdm_fd *fd,
+			       unsigned int request, void *arg)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_spi_config config;
+	int ret, len;
+
+	switch (request) {
+	case SPI_RTIOC_SET_CONFIG:
+		ret = rtdm_safe_copy_from_user(fd, &config,
+					       arg, sizeof(config));
+		if (ret == 0)
+			ret = update_slave_config(slave, &config);
+		break;
+	case SPI_RTIOC_GET_CONFIG:
+		rtdm_mutex_lock(&master->bus_lock);
+		config = slave->config;
+		rtdm_mutex_unlock(&master->bus_lock);
+		ret = rtdm_safe_copy_to_user(fd, arg,
+					     &config, sizeof(config));
+		break;
+	case SPI_RTIOC_TRANSFER:
+		ret = -EINVAL;
+		if (master->ops->transfer_iobufs) {
+			rtdm_mutex_lock(&master->bus_lock);
+			ret = do_chip_select(slave);
+			if (ret == 0) {
+				ret = master->ops->transfer_iobufs(slave);
+				do_chip_deselect(slave);
+			}
+			rtdm_mutex_unlock(&master->bus_lock);
+		}
+		break;
+	case SPI_RTIOC_TRANSFER_N:
+		ret = -EINVAL;
+		if (master->ops->transfer_iobufs_n) {
+			len = (long)arg;
+			rtdm_mutex_lock(&master->bus_lock);
+			ret = do_chip_select(slave);
+			if (ret == 0) {
+				ret = master->ops->transfer_iobufs_n(slave, len);
+				do_chip_deselect(slave);
+			}
+			rtdm_mutex_unlock(&master->bus_lock);
+		}
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int spi_master_ioctl_nrt(struct rtdm_fd *fd,
+				unsigned int request, void *arg)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_spi_iobufs iobufs;
+	int ret;
+
+	switch (request) {
+	case SPI_RTIOC_SET_IOBUFS:
+		ret = rtdm_safe_copy_from_user(fd, &iobufs,
+					       arg, sizeof(iobufs));
+		if (ret)
+			break;
+		/*
+		 * No transfer can happen without I/O buffers being
+		 * set, and I/O buffers cannot be reset, therefore we
+		 * need no serialization with the transfer code here.
+		 */
+		mutex_lock(&slave->ctl_lock);
+		ret = master->ops->set_iobufs(slave, &iobufs);
+		mutex_unlock(&slave->ctl_lock);
+		if (ret == 0)
+			ret = rtdm_safe_copy_to_user(fd, arg,
+					     &iobufs, sizeof(iobufs));
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t spi_master_read_rt(struct rtdm_fd *fd,
+				  void __user *u_buf, size_t len)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	void *rx;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	rx = xnmalloc(len);
+	if (rx == NULL)
+		return -ENOMEM;
+
+	rtdm_mutex_lock(&master->bus_lock);
+	ret = do_chip_select(slave);
+	if (ret == 0) {
+		ret = master->ops->read(slave, rx, len);
+		do_chip_deselect(slave);
+	}
+	rtdm_mutex_unlock(&master->bus_lock);
+	if (ret > 0)
+		ret = rtdm_safe_copy_to_user(fd, u_buf, rx, ret);
+	
+	xnfree(rx);
+	
+	return ret;
+}
+
+static ssize_t spi_master_write_rt(struct rtdm_fd *fd,
+				   const void __user *u_buf, size_t len)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	void *tx;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	tx = xnmalloc(len);
+	if (tx == NULL)
+		return -ENOMEM;
+
+	ret = rtdm_safe_copy_from_user(fd, tx, u_buf, len);
+	if (ret == 0) {
+		rtdm_mutex_lock(&master->bus_lock);
+		ret = do_chip_select(slave);
+		if (ret == 0) {
+			ret = master->ops->write(slave, tx, len);
+			do_chip_deselect(slave);
+		}
+		rtdm_mutex_unlock(&master->bus_lock);
+	}
+	
+	xnfree(tx);
+
+	return ret;
+}
+
+static void iobufs_vmopen(struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = vma->vm_private_data;
+
+	atomic_inc(&slave->mmap_refs);
+	dev_dbg(slave_to_kdev(slave), "mapping added\n");
+}
+
+static void iobufs_vmclose(struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = vma->vm_private_data;
+
+	if (atomic_dec_and_test(&slave->mmap_refs)) {
+		slave->master->ops->mmap_release(slave);
+		dev_dbg(slave_to_kdev(slave), "mapping released\n");
+	}
+}
+
+static struct vm_operations_struct iobufs_vmops = {
+	.open = iobufs_vmopen,
+	.close = iobufs_vmclose,
+};
+
+static int spi_master_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	int ret;
+
+	if (slave->master->ops->mmap_iobufs == NULL)
+		return -EINVAL;
+
+	ret = slave->master->ops->mmap_iobufs(slave, vma);
+	if (ret)
+		return ret;
+
+	dev_dbg(slave_to_kdev(slave), "mapping created\n");
+	atomic_inc(&slave->mmap_refs);
+
+	if (slave->master->ops->mmap_release) {
+		vma->vm_ops = &iobufs_vmops;
+		vma->vm_private_data = slave;
+	}
+
+	return 0;
+}
+
+static char *spi_slave_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s/%s",
+			 dev->class->name,
+			 dev_name(dev));
+}
+
+struct rtdm_spi_master *
+__rtdm_spi_alloc_master(struct device *dev, size_t size, int off)
+{
+	struct rtdm_spi_master *master;
+	struct spi_master *kmaster;
+
+	kmaster = spi_alloc_master(dev, size);
+	if (kmaster == NULL)
+		return NULL;
+	
+	master = (void *)(kmaster + 1) + off;
+	master->kmaster = kmaster;
+	spi_master_set_devdata(kmaster, master);
+
+	return master;
+}
+EXPORT_SYMBOL_GPL(__rtdm_spi_alloc_master);
+
+int __rtdm_spi_setup_driver(struct rtdm_spi_master *master)
+{
+	master->classname = kstrdup(
+		dev_name(&master->kmaster->dev), GFP_KERNEL);
+	master->devclass = class_create(THIS_MODULE,
+		master->classname);
+	if (IS_ERR(master->devclass)) {
+		kfree(master->classname);
+		printk(XENO_ERR "cannot create sysfs class\n");
+		return PTR_ERR(master->devclass);
+	}
+
+	master->devclass->devnode = spi_slave_devnode;
+	master->cs = NULL;
+
+	master->driver.profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(rtdm_spi_master,
+				  RTDM_CLASS_SPI,
+				  master->subclass,
+				  0);
+	master->driver.device_flags = RTDM_NAMED_DEVICE;
+	master->driver.base_minor = 0;
+	master->driver.device_count = 256;
+	master->driver.context_size = 0;
+	master->driver.ops = (struct rtdm_fd_ops){
+		.open		=	spi_master_open,
+		.close		=	spi_master_close,
+		.read_rt	=	spi_master_read_rt,
+		.write_rt	=	spi_master_write_rt,
+		.ioctl_rt	=	spi_master_ioctl_rt,
+		.ioctl_nrt	=	spi_master_ioctl_nrt,
+		.mmap		=	spi_master_mmap,
+	};
+	
+	rtdm_drv_set_sysclass(&master->driver, master->devclass);
+
+	INIT_LIST_HEAD(&master->slaves);
+	rtdm_lock_init(&master->lock);
+	rtdm_mutex_init(&master->bus_lock);
+
+	return 0;
+}
+
+static int spi_transfer_one_unimp(struct spi_master *master,
+				  struct spi_device *spi,
+				  struct spi_transfer *tfr)
+{
+	return -ENODEV;
+}
+
+int rtdm_spi_add_master(struct rtdm_spi_master *master)
+{
+	struct spi_master *kmaster = master->kmaster;
+
+	/*
+	 * Prevent the transfer handler to be called from the regular
+	 * SPI stack, just in case.
+	 */
+	kmaster->transfer_one = spi_transfer_one_unimp;
+	master->devclass = NULL;
+
+	/*
+	 * Add the core SPI driver, devices on the bus will be
+	 * enumerated, handed to spi_device_probe().
+	 */
+	return spi_register_master(kmaster);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_add_master);
+
+void rtdm_spi_remove_master(struct rtdm_spi_master *master)
+{
+	struct class *class = master->devclass;
+	char *classname = master->classname;
+	
+	rtdm_mutex_destroy(&master->bus_lock);
+	spi_unregister_master(master->kmaster);
+	rtdm_drv_set_sysclass(&master->driver, NULL);
+	class_destroy(class);
+	kfree(classname);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_remove_master);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/spi/spi-sun6i.c	2022-03-21 12:58:31.436869000 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-bcm2835.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * I/O handling lifted from drivers/spi/spi-sun6i.c:
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * RTDM integration by:
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_SUN6I  2
+
+#define SUN6I_GBL_CTL_REG		0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE	BIT(0)
+#define SUN6I_GBL_CTL_MASTER		BIT(1)
+#define SUN6I_GBL_CTL_TP		BIT(7)
+#define SUN6I_GBL_CTL_RST		BIT(31)
+
+#define SUN6I_TFR_CTL_REG		0x08
+#define SUN6I_TFR_CTL_CPHA		BIT(0)
+#define SUN6I_TFR_CTL_CPOL		BIT(1)
+#define SUN6I_TFR_CTL_SPOL		BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK		0x30
+#define SUN6I_TFR_CTL_CS(cs)		(((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL		BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL		BIT(7)
+#define SUN6I_TFR_CTL_DHB		BIT(8)
+#define SUN6I_TFR_CTL_FBS		BIT(12)
+#define SUN6I_TFR_CTL_XCH		BIT(31)
+
+#define SUN6I_INT_CTL_REG		0x10
+#define SUN6I_INT_CTL_RX_RDY		BIT(0)
+#define SUN6I_INT_CTL_TX_RDY		BIT(4)
+#define SUN6I_INT_CTL_RX_OVF		BIT(8)
+#define SUN6I_INT_CTL_TC		BIT(12)
+
+#define SUN6I_INT_STA_REG		0x14
+
+#define SUN6I_FIFO_CTL_REG		0x18
+#define SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_MASK	0xff
+#define SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_BITS	0
+#define SUN6I_FIFO_CTL_RX_RST			BIT(15)
+#define SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_MASK	0xff
+#define SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_BITS	16
+#define SUN6I_FIFO_CTL_TX_RST			BIT(31)
+
+#define SUN6I_FIFO_STA_REG		0x1c
+#define SUN6I_FIFO_STA_RX_CNT(reg)	(((reg) >> 0) & 0xff)
+#define SUN6I_FIFO_STA_TX_CNT(reg)	(((reg) >> 16) & 0xff)
+
+#define SUN6I_CLK_CTL_REG		0x24
+#define SUN6I_CLK_CTL_CDR2_MASK		0xff
+#define SUN6I_CLK_CTL_CDR2(div)		(((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK		0xf
+#define SUN6I_CLK_CTL_CDR1(div)		(((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS		BIT(12)
+
+#define SUN6I_MAX_XFER_SIZE		0xffffff
+
+#define SUN6I_BURST_CNT_REG		0x30
+#define SUN6I_BURST_CNT(cnt)		((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_XMIT_CNT_REG		0x34
+#define SUN6I_XMIT_CNT(cnt)		((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_BURST_CTL_CNT_REG		0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt)	((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_TXDATA_REG		0x200
+#define SUN6I_RXDATA_REG		0x300
+
+#define SUN6I_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH	\
+				 | SPI_LSB_FIRST)
+
+	struct spi_setup_data {
+		int fifo_depth;
+	};
+
+static struct spi_setup_data sun6i_data = {
+	.fifo_depth = 128,
+};
+
+static struct spi_setup_data sun8i_data = {
+	.fifo_depth = 64,
+};
+
+struct spi_master_sun6i {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	struct reset_control *rstc;
+	struct clk *hclk;
+	struct clk *mclk;
+	unsigned long clk_hz;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	rtdm_event_t transfer_done;
+	const struct spi_setup_data *setup;
+};
+
+struct spi_slave_sun6i {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_sun6i *
+to_slave_sun6i(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_sun6i, slave);
+}
+
+static inline struct spi_master_sun6i *
+to_master_sun6i(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master, struct spi_master_sun6i, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->kmaster->dev;
+}
+
+static inline u32 sun6i_rd(struct spi_master_sun6i *spim,
+			   unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void sun6i_wr(struct spi_master_sun6i *spim,
+			    unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static void sun6i_rd_fifo(struct spi_master_sun6i *spim)
+{
+	u32 reg;
+	int len;
+	u8 byte;
+
+	reg = sun6i_rd(spim, SUN6I_FIFO_STA_REG);
+	len = min((int)SUN6I_FIFO_STA_RX_CNT(reg), spim->rx_len);
+
+	while (len-- > 0) {
+		byte = sun6i_rd(spim, SUN6I_RXDATA_REG);
+		if (spim->rx_buf)
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static void sun6i_wr_fifo(struct spi_master_sun6i *spim)
+{
+	u32 reg;
+	int len;
+	u8 byte;
+
+	reg = sun6i_rd(spim, SUN6I_FIFO_STA_REG);
+	len = min(spim->setup->fifo_depth - (int)SUN6I_FIFO_STA_TX_CNT(reg),
+		  spim->tx_len);
+	
+	while (len-- > 0) {
+		byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		sun6i_wr(spim, SUN6I_TXDATA_REG, byte);
+		spim->tx_len--;
+	}
+}
+
+static int sun6i_spi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_sun6i *spim;
+	u32 status;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_sun6i);
+
+	sun6i_rd_fifo(spim);
+	sun6i_wr_fifo(spim);
+	
+	status = sun6i_rd(spim, SUN6I_INT_STA_REG);
+	if ((status & SUN6I_INT_CTL_TC)) {
+		sun6i_wr(spim, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+		sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+		rtdm_event_signal(&spim->transfer_done);
+	} else if (status & SUN6I_INT_CTL_TX_RDY)
+		sun6i_wr(spim, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TX_RDY);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int sun6i_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 reg, div;
+	
+	/* Set clock polarity and phase. */
+
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~(SUN6I_TFR_CTL_CPOL | SUN6I_TFR_CTL_CPHA |
+		 SUN6I_TFR_CTL_FBS | SUN6I_TFR_CTL_SPOL);
+
+	/* Manual CS via ->chip_select(). */
+	reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+	if (config->mode & SPI_CPOL)
+		reg |= SUN6I_TFR_CTL_CPOL;
+
+	if (config->mode & SPI_CPHA)
+		reg |= SUN6I_TFR_CTL_CPHA;
+
+	if (config->mode & SPI_LSB_FIRST)
+		reg |= SUN6I_TFR_CTL_FBS;
+
+	if (!(config->mode & SPI_CS_HIGH))
+		reg |= SUN6I_TFR_CTL_SPOL;
+
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+	
+	/* Setup clock divider. */
+
+	div = spim->clk_hz / (2 * config->speed_hz);
+	if (div <= SUN6I_CLK_CTL_CDR2_MASK + 1) {
+		if (div > 0)
+			div--;
+		reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+	} else {
+		div = ilog2(spim->clk_hz) - ilog2(config->speed_hz);
+		reg = SUN6I_CLK_CTL_CDR1(div);
+	}
+
+	sun6i_wr(spim, SUN6I_CLK_CTL_REG, reg);
+
+	return 0;
+}
+
+static void sun6i_chip_select(struct rtdm_spi_remote_slave *slave,
+			      bool active)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	u32 reg;
+
+	/*
+	 * We have no cs_gpios, so this handler will be called for
+	 * each transfer.
+	 */
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~(SUN6I_TFR_CTL_CS_MASK | SUN6I_TFR_CTL_CS_LEVEL);
+	reg |= SUN6I_TFR_CTL_CS(slave->chip_select);
+
+	if (active)
+		reg |= SUN6I_TFR_CTL_CS_LEVEL;
+
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	u32 tx_len = 0, reg;
+	int ret;
+
+	/* Reset FIFO. */
+	sun6i_wr(spim, SUN6I_FIFO_CTL_REG,
+		 SUN6I_FIFO_CTL_RX_RST | SUN6I_FIFO_CTL_TX_RST);
+
+	/* Set FIFO interrupt trigger level to 3/4 of the fifo depth. */
+	reg = spim->setup->fifo_depth / 4 * 3;
+	sun6i_wr(spim, SUN6I_FIFO_CTL_REG,
+		 (reg << SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_BITS) |
+		 (reg << SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_BITS));
+
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~SUN6I_TFR_CTL_DHB;
+	/* Discard unused SPI bursts if TX only. */
+	if (spim->rx_buf == NULL)
+		reg |= SUN6I_TFR_CTL_DHB;
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+
+	if (spim->tx_buf)
+		tx_len = spim->tx_len;
+
+	/* Setup the counters. */
+	sun6i_wr(spim, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(spim->tx_len));
+	sun6i_wr(spim, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+	sun6i_wr(spim, SUN6I_BURST_CTL_CNT_REG,
+		 SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+	/* Fill the TX FIFO */
+	sun6i_wr_fifo(spim);
+
+	/* Enable interrupts. */
+	reg = sun6i_rd(spim, SUN6I_INT_CTL_REG);
+	reg |= SUN6I_INT_CTL_TC | SUN6I_INT_CTL_TX_RDY;
+	sun6i_wr(spim, SUN6I_INT_CTL_REG, reg);
+
+	/* Start the transfer. */
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+	
+	ret = rtdm_event_wait(&spim->transfer_done);
+	if (ret) {
+		sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int sun6i_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	if (sun6i->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+	
+	spim->tx_len = sun6i->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = sun6i->io_virt + spim->rx_len;
+	spim->rx_buf = sun6i->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static int sun6i_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+				   int len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	if ((sun6i->io_len == 0) ||
+		(len <= 0) || (len > (sun6i->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = sun6i->io_virt + sun6i->io_len / 2;
+	spim->rx_buf = sun6i->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static ssize_t sun6i_read(struct rtdm_spi_remote_slave *slave,
+			  void *rx, size_t len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static ssize_t sun6i_write(struct rtdm_spi_remote_slave *slave,
+			   const void *tx, size_t len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static int set_iobufs(struct spi_slave_sun6i *sun6i, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+	
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == sun6i->io_len)
+		return 0;
+
+	if (sun6i->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	sun6i->io_dma = dma;
+	sun6i->io_virt = p;
+	smp_mb();
+	sun6i->io_len = len;
+	
+	return 0;
+}
+
+static int sun6i_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			    struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+	int ret;
+
+	ret = set_iobufs(sun6i, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = sun6i->io_len / 2;
+	p->map_len = sun6i->io_len;
+	
+	return 0;
+}
+
+static int sun6i_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			     struct vm_area_struct *vma)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return rtdm_mmap_kmem(vma, sun6i->io_virt);
+}
+
+static void sun6i_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	dma_free_coherent(NULL, sun6i->io_len,
+			  sun6i->io_virt, sun6i->io_dma);
+	sun6i->io_len = 0;
+}
+
+static struct rtdm_spi_remote_slave *
+sun6i_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_slave_sun6i *sun6i;
+	int ret;
+
+	sun6i = kzalloc(sizeof(*sun6i), GFP_KERNEL);
+	if (sun6i == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&sun6i->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev,
+			"%s: failed to attach slave\n", __func__);
+		kfree(sun6i);
+		return ERR_PTR(ret);
+	}
+
+	return &sun6i->slave;
+}
+
+static void sun6i_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+	kfree(sun6i);
+}
+
+static struct rtdm_spi_master_ops sun6i_master_ops = {
+	.configure = sun6i_configure,
+	.chip_select = sun6i_chip_select,
+	.set_iobufs = sun6i_set_iobufs,
+	.mmap_iobufs = sun6i_mmap_iobufs,
+	.mmap_release = sun6i_mmap_release,
+	.transfer_iobufs = sun6i_transfer_iobufs,
+	.transfer_iobufs_n = sun6i_transfer_iobufs_n,
+	.write = sun6i_write,
+	.read = sun6i_read,
+	.attach_slave = sun6i_attach_slave,
+	.detach_slave = sun6i_detach_slave,
+};
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master;
+	struct spi_master_sun6i *spim;
+	struct spi_master *kmaster;
+	struct resource *r;
+	int ret, irq;
+	u32 clk_rate;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+				       struct spi_master_sun6i, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_SUN6I;
+	master->ops = &sun6i_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	kmaster = master->kmaster;
+	kmaster->max_speed_hz = 100 * 1000 * 1000;
+	kmaster->min_speed_hz = 3 * 1000;
+	kmaster->mode_bits = SUN6I_SPI_MODE_BITS;
+	kmaster->bits_per_word_mask = SPI_BPW_MASK(8);
+	kmaster->num_chipselect = 4;
+	kmaster->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_sun6i, master);
+	spim->setup = of_device_get_match_data(&pdev->dev);
+
+	rtdm_event_init(&spim->transfer_done, 0);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	
+	spim->hclk = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(spim->hclk)) {
+		dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+		ret = PTR_ERR(spim->hclk);
+		goto fail;
+	}
+
+	spim->mclk = devm_clk_get(&pdev->dev, "mod");
+	if (IS_ERR(spim->mclk)) {
+		dev_err(&pdev->dev, "Unable to acquire MOD clock\n");
+		ret = PTR_ERR(spim->mclk);
+		goto fail;
+	}
+
+	spim->rstc = devm_reset_control_get(&pdev->dev, NULL);
+	if (IS_ERR(spim->rstc)) {
+		dev_err(&pdev->dev, "Couldn't get reset controller\n");
+		ret = PTR_ERR(spim->rstc);
+		goto fail;
+	}
+
+	/*
+	 * Ensure that we have a parent clock fast enough to handle
+	 * the fastest transfers properly.
+	 */
+	clk_rate = clk_get_rate(spim->mclk);
+	if (clk_rate < 2 * kmaster->max_speed_hz)
+		clk_set_rate(spim->mclk, 2 * kmaster->max_speed_hz);
+
+	spim->clk_hz = clk_get_rate(spim->mclk);
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		goto fail;
+	}
+
+	clk_prepare_enable(spim->hclk);
+	clk_prepare_enable(spim->mclk);
+
+	ret = reset_control_deassert(spim->rstc);
+	if (ret)
+		goto fail_unclk;
+
+	/* Enable SPI module, in master mode with smart burst. */
+
+	sun6i_wr(spim, SUN6I_GBL_CTL_REG,
+		 SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER |
+		 SUN6I_GBL_CTL_TP);
+
+	/* Disable and clear all interrupts. */
+	sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+	sun6i_wr(spim, SUN6I_INT_STA_REG, ~0);
+	
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       sun6i_spi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+			__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n",
+			__func__);
+		goto fail_register;
+	}
+
+	return 0;
+
+fail_register:
+	rtdm_irq_free(&spim->irqh);
+fail_unclk:
+	clk_disable_unprepare(spim->mclk);
+	clk_disable_unprepare(spim->hclk);
+fail:
+	spi_master_put(kmaster);
+
+	return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_sun6i *spim;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	spim = container_of(master, struct spi_master_sun6i, master);
+
+	rtdm_irq_free(&spim->irqh);
+
+	clk_disable_unprepare(spim->mclk);
+	clk_disable_unprepare(spim->hclk);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+	{
+		.compatible = "allwinner,sun6i-a31-spi",
+		.data = &sun6i_data,
+	},
+	{
+		.compatible = "allwinner,sun8i-h3-spi",
+		.data = &sun8i_data,
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static struct platform_driver sun6i_spi_driver = {
+	.driver		= {
+		.name		= "spi-sun6i",
+		.of_match_table	= sun6i_spi_match,
+	},
+	.probe		= sun6i_spi_probe,
+	.remove		= sun6i_spi_remove,
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/spi/spi-bcm2835.c	2022-03-21 12:58:31.428869078 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * I/O handling lifted from drivers/spi/spi-bcm2835.c:
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * RTDM integration by:
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_BCM2835  1
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS			0x00
+#define BCM2835_SPI_FIFO		0x04
+#define BCM2835_SPI_CLK			0x08
+#define BCM2835_SPI_DLEN		0x0c
+#define BCM2835_SPI_LTOH		0x10
+#define BCM2835_SPI_DC			0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG		0x02000000
+#define BCM2835_SPI_CS_DMA_LEN		0x01000000
+#define BCM2835_SPI_CS_CSPOL2		0x00800000
+#define BCM2835_SPI_CS_CSPOL1		0x00400000
+#define BCM2835_SPI_CS_CSPOL0		0x00200000
+#define BCM2835_SPI_CS_RXF		0x00100000
+#define BCM2835_SPI_CS_RXR		0x00080000
+#define BCM2835_SPI_CS_TXD		0x00040000
+#define BCM2835_SPI_CS_RXD		0x00020000
+#define BCM2835_SPI_CS_DONE		0x00010000
+#define BCM2835_SPI_CS_LEN		0x00002000
+#define BCM2835_SPI_CS_REN		0x00001000
+#define BCM2835_SPI_CS_ADCS		0x00000800
+#define BCM2835_SPI_CS_INTR		0x00000400
+#define BCM2835_SPI_CS_INTD		0x00000200
+#define BCM2835_SPI_CS_DMAEN		0x00000100
+#define BCM2835_SPI_CS_TA		0x00000080
+#define BCM2835_SPI_CS_CSPOL		0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX		0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX		0x00000010
+#define BCM2835_SPI_CS_CPOL		0x00000008
+#define BCM2835_SPI_CS_CPHA		0x00000004
+#define BCM2835_SPI_CS_CS_10		0x00000002
+#define BCM2835_SPI_CS_CS_01		0x00000001
+
+#define BCM2835_SPI_POLLING_LIMIT_US	30
+#define BCM2835_SPI_POLLING_JIFFIES	2
+#define BCM2835_SPI_DMA_MIN_LENGTH	96
+#define BCM2835_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+				| SPI_NO_CS | SPI_3WIRE)
+
+struct spi_master_bcm2835 {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	struct clk *clk;
+	unsigned long clk_hz;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	rtdm_event_t transfer_done;
+};
+
+struct spi_slave_bcm2835 {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_bcm2835 *
+to_slave_bcm2835(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_bcm2835, slave);
+}
+
+static inline struct spi_master_bcm2835 *
+to_master_bcm2835(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master, struct spi_master_bcm2835, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->kmaster->dev;
+}
+
+static inline u32 bcm2835_rd(struct spi_master_bcm2835 *spim,
+			     unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void bcm2835_wr(struct spi_master_bcm2835 *spim,
+			      unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct spi_master_bcm2835 *spim)
+{
+	u8 byte;
+
+	while (spim->rx_len > 0 &&
+	       (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
+		byte = bcm2835_rd(spim, BCM2835_SPI_FIFO);
+		if (spim->rx_buf)
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static inline void bcm2835_wr_fifo(struct spi_master_bcm2835 *spim)
+{
+	u8 byte;
+
+	while (spim->tx_len > 0 &&
+	       (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
+		byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		bcm2835_wr(spim, BCM2835_SPI_FIFO, byte);
+		spim->tx_len--;
+	}
+}
+
+static void bcm2835_reset_hw(struct spi_master_bcm2835 *spim)
+{
+	u32 cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_INTR |
+		BCM2835_SPI_CS_INTD |
+		BCM2835_SPI_CS_DMAEN |
+		BCM2835_SPI_CS_TA);
+	cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
+
+	/* Reset the SPI block. */
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+	bcm2835_wr(spim, BCM2835_SPI_DLEN, 0);
+}
+
+static int bcm2835_spi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_bcm2835 *spim;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_bcm2835);
+
+	bcm2835_rd_fifo(spim);
+	bcm2835_wr_fifo(spim);
+
+	if (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+		bcm2835_reset_hw(spim);
+		rtdm_event_signal(&spim->transfer_done);
+	}
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int bcm2835_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	unsigned long spi_hz, cdiv;
+	u32 cs;
+
+	/* Set clock polarity and phase. */
+
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
+	if (config->mode & SPI_CPOL)
+		cs |= BCM2835_SPI_CS_CPOL;
+	if (config->mode & SPI_CPHA)
+		cs |= BCM2835_SPI_CS_CPHA;
+
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+	
+	/* Set clock frequency. */
+
+	spi_hz = config->speed_hz;
+
+	/*
+	 * Fastest clock rate is of the APB clock, which is close to
+	 * clk_hz / 2.
+	 */
+	if (spi_hz >= spim->clk_hz / 2)
+		cdiv = 2;
+	else if (spi_hz) {
+		cdiv = DIV_ROUND_UP(spim->clk_hz, spi_hz); /* Multiple of 2. */
+		cdiv += (cdiv % 2);
+		if (cdiv >= 65536)
+			cdiv = 0;
+	} else
+		cdiv = 0;
+
+	bcm2835_wr(spim, BCM2835_SPI_CLK, cdiv);
+	
+	return 0;
+}
+
+static void bcm2835_chip_select(struct rtdm_spi_remote_slave *slave,
+				bool active)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 cs;
+
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	if (config->mode & SPI_CS_HIGH) {
+		cs |= BCM2835_SPI_CS_CSPOL;
+		cs |= BCM2835_SPI_CS_CSPOL0 << slave->chip_select;
+	} else {
+		cs &= ~BCM2835_SPI_CS_CSPOL;
+		cs &= ~(BCM2835_SPI_CS_CSPOL0 << slave->chip_select);
+	}
+
+	/* "active" is the logical state, not the impedance level. */
+
+	if (active) {
+		if (config->mode & SPI_NO_CS)
+			cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+		else {
+			cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
+			cs |= slave->chip_select;
+		}
+	} else {
+		/* Put HW-CS into deselected state. */
+		cs &= ~BCM2835_SPI_CS_CSPOL;
+		/* Use the "undefined" chip-select as precaution. */
+		cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+	}
+
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	int ret;
+	u32 cs;
+	
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~BCM2835_SPI_CS_REN;
+	if ((slave->config.mode & SPI_3WIRE) && spim->rx_buf)
+		cs |= BCM2835_SPI_CS_REN;
+
+	cs |= BCM2835_SPI_CS_TA;
+
+	/*
+	 * Fill in fifo if we have gpio-cs note that there have been
+	 * rare events where the native-CS flapped for <1us which may
+	 * change the behaviour with gpio-cs this does not happen, so
+	 * it is implemented only for this case.
+	 */
+	if (gpio_is_valid(slave->cs_gpio)) {
+		/* Set dummy CS, ->chip_select() was not called. */
+		cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+		/* Enable SPI block, before filling FIFO. */
+		bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+		bcm2835_wr_fifo(spim);
+	}
+
+	/* Enable interrupts last, wait for transfer completion. */
+	cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD;
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+
+	ret = rtdm_event_wait(&spim->transfer_done);
+	if (ret) {
+		bcm2835_reset_hw(spim);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int bcm2835_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	if (bcm->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+	
+	spim->tx_len = bcm->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = bcm->io_virt + spim->rx_len;
+	spim->rx_buf = bcm->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static int bcm2835_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+				     int len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	if ((bcm->io_len == 0) ||
+		(len <= 0) || (len > (bcm->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = bcm->io_virt + bcm->io_len / 2;
+	spim->rx_buf = bcm->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static ssize_t bcm2835_read(struct rtdm_spi_remote_slave *slave,
+			    void *rx, size_t len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static ssize_t bcm2835_write(struct rtdm_spi_remote_slave *slave,
+			     const void *tx, size_t len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static int set_iobufs(struct spi_slave_bcm2835 *bcm, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+	
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == bcm->io_len)
+		return 0;
+
+	if (bcm->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	/*
+	 * Since we need the I/O buffers to be set for starting a
+	 * transfer, there is no need for serializing this routine and
+	 * transfer_iobufs(), provided io_len is set last.
+	 *
+	 * NOTE: We don't need coherent memory until we actually get
+	 * DMA transfers working, this code is a bit ahead of
+	 * schedule.
+	 *
+	 * Revisit: this assumes DMA mask is 4Gb.
+	 */
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	bcm->io_dma = dma;
+	bcm->io_virt = p;
+	smp_mb();
+	/*
+	 * May race with transfer_iobufs(), must be assigned after all
+	 * the rest is set up, enforcing a membar.
+	 */
+	bcm->io_len = len;
+	
+	return 0;
+}
+
+static int bcm2835_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+	int ret;
+
+	ret = set_iobufs(bcm, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = bcm->io_len / 2;
+	p->map_len = bcm->io_len;
+	
+	return 0;
+}
+
+static int bcm2835_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			       struct vm_area_struct *vma)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	/*
+	 * dma_alloc_coherent() delivers non-cached memory, make sure
+	 * to return consistent mapping attributes. Typically, mixing
+	 * memory attributes across address spaces referring to the
+	 * same physical area is architecturally wrong on ARM.
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return rtdm_mmap_kmem(vma, bcm->io_virt);
+}
+
+static void bcm2835_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	dma_free_coherent(NULL, bcm->io_len,
+			  bcm->io_virt, bcm->io_dma);
+	bcm->io_len = 0;
+}
+
+static int gpio_match_name(struct gpio_chip *chip, void *data)
+{
+	return !strcmp(chip->label, data);
+}
+
+static int find_cs_gpio(struct spi_device *spi)
+{
+	struct spi_master *kmaster = spi->master;
+	u32 pingroup_index, pin, pin_index;
+	struct device_node *pins;
+	struct gpio_chip *chip;
+	int ret;
+
+	if (gpio_is_valid(spi->cs_gpio)) {
+		dev_info(&spi->dev, "using GPIO%i for CS%d\n",
+			 spi->cs_gpio, spi->chip_select);
+		return 0;
+	}
+
+	/* Translate native CS to GPIO. */
+
+	for (pingroup_index = 0;
+	     (pins = of_parse_phandle(kmaster->dev.of_node,
+		     "pinctrl-0", pingroup_index)) != 0; pingroup_index++) {
+		for (pin_index = 0;
+		     of_property_read_u32_index(pins, "brcm,pins",
+				pin_index, &pin) == 0; pin_index++) {
+			if ((spi->chip_select == 0 &&
+			     (pin == 8 || pin == 36 || pin == 46)) ||
+			    (spi->chip_select == 1 &&
+			     (pin == 7 || pin == 35))) {
+				spi->cs_gpio = pin;
+				break;
+			}
+		}
+		of_node_put(pins);
+	}
+
+	/* If that failed, assume GPIOs 7-11 are used */
+	if (!gpio_is_valid(spi->cs_gpio) ) {
+		chip = gpiochip_find("pinctrl-bcm2835", gpio_match_name);
+		if (chip == NULL)
+			return 0;
+
+		spi->cs_gpio = chip->base + 8 - spi->chip_select;
+	}
+
+	dev_info(&spi->dev,
+		 "setting up native-CS%i as GPIO %i\n",
+		 spi->chip_select, spi->cs_gpio);
+
+	ret = gpio_direction_output(spi->cs_gpio,
+			    (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+	if (ret) {
+		dev_err(&spi->dev,
+			"could not set CS%i gpio %i as output: %i",
+			spi->chip_select, spi->cs_gpio, ret);
+		return ret;
+	}
+
+	/*
+	 * Force value on GPIO in case the pin controller does not
+	 * handle that properly when switching to output mode.
+	 */
+	gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+
+	return 0;
+}
+
+static struct rtdm_spi_remote_slave *
+bcm2835_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_slave_bcm2835 *bcm;
+	int ret;
+
+	if (spi->chip_select > 1) {
+		/*
+		 * Error in the case of native CS requested with CS >
+		 * 1 officially there is a CS2, but it is not
+		 * documented which GPIO is connected with that...
+		 */
+		dev_err(&spi->dev,
+			"%s: only two native chip-selects are supported\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = find_cs_gpio(spi);
+	if (ret)
+		return ERR_PTR(ret);
+	
+	bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
+	if (bcm == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&bcm->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev,
+			"%s: failed to attach slave\n", __func__);
+		kfree(bcm);
+		return ERR_PTR(ret);
+	}
+
+	return &bcm->slave;
+}
+
+static void bcm2835_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+	kfree(bcm);
+}
+
+static struct rtdm_spi_master_ops bcm2835_master_ops = {
+	.configure = bcm2835_configure,
+	.chip_select = bcm2835_chip_select,
+	.set_iobufs = bcm2835_set_iobufs,
+	.mmap_iobufs = bcm2835_mmap_iobufs,
+	.mmap_release = bcm2835_mmap_release,
+	.transfer_iobufs = bcm2835_transfer_iobufs,
+	.transfer_iobufs_n = bcm2835_transfer_iobufs_n,
+	.write = bcm2835_write,
+	.read = bcm2835_read,
+	.attach_slave = bcm2835_attach_slave,
+	.detach_slave = bcm2835_detach_slave,
+};
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master_bcm2835 *spim;
+	struct rtdm_spi_master *master;
+	struct spi_master *kmaster;
+	struct resource *r;
+	int ret, irq;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+		   struct spi_master_bcm2835, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_BCM2835;
+	master->ops = &bcm2835_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	kmaster = master->kmaster;
+	kmaster->mode_bits = BCM2835_SPI_MODE_BITS;
+	kmaster->bits_per_word_mask = SPI_BPW_MASK(8);
+	kmaster->num_chipselect = 2;
+	kmaster->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_bcm2835, master);
+	rtdm_event_init(&spim->transfer_done, 0);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	
+	spim->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(spim->clk)) {
+		ret = PTR_ERR(spim->clk);
+		goto fail;
+	}
+
+	spim->clk_hz = clk_get_rate(spim->clk);
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		goto fail;
+	}
+
+	clk_prepare_enable(spim->clk);
+
+	/* Initialise the hardware with the default polarities */
+	bcm2835_wr(spim, BCM2835_SPI_CS,
+		   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       bcm2835_spi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+			__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n",
+			__func__);
+		goto fail_unclk;
+	}
+
+	return 0;
+
+fail_unclk:
+	clk_disable_unprepare(spim->clk);
+fail:
+	spi_master_put(kmaster);
+
+	return ret;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_bcm2835 *spim;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	spim = container_of(master, struct spi_master_bcm2835, master);
+
+	/* Clear FIFOs, and disable the HW block */
+	bcm2835_wr(spim, BCM2835_SPI_CS,
+		   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+	rtdm_irq_free(&spim->irqh);
+
+	clk_disable_unprepare(spim->clk);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+	{
+		.compatible = "brcm,bcm2835-spi",
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+	.driver		= {
+		.name		= "spi-bcm2835",
+		.of_match_table	= bcm2835_spi_match,
+	},
+	.probe		= bcm2835_spi_probe,
+	.remove		= bcm2835_spi_remove,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/Kconfig	2022-03-21 12:58:31.421869147 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/udd/Kconfig	1970-01-01 01:00:00.000000000 +0100
+menu "Drivers"
+
+config XENO_OPT_RTDM_COMPAT_DEVNODE
+	bool "Enable legacy pathnames for named RTDM devices"
+	default y
+	help
+	This compatibility option allows applications to open named
+	RTDM devices using the legacy naming scheme, i.e.
+
+	fd = open("devname", ...);
+	   or
+	fd = open("/dev/devname", ...);
+
+	When such a request is received by RTDM, a warning message is
+	issued to the kernel log whenever XENO_OPT_DEBUG_LEGACY is
+	also enabled in the kernel configuration.
+
+	Applications should open named devices via their actual device
+	nodes instead, i.e.
+
+	fd = open("/dev/rtdm/devname", ...);
+
+source "drivers/xenomai/autotune/Kconfig"
+source "drivers/xenomai/serial/Kconfig"
+source "drivers/xenomai/testing/Kconfig"
+source "drivers/xenomai/can/Kconfig"
+source "drivers/xenomai/net/Kconfig"
+source "drivers/xenomai/analogy/Kconfig"
+source "drivers/xenomai/ipc/Kconfig"
+source "drivers/xenomai/udd/Kconfig"
+source "drivers/xenomai/gpio/Kconfig"
+source "drivers/xenomai/gpiopwm/Kconfig"
+source "drivers/xenomai/spi/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/udd/Kconfig	2022-03-21 12:58:31.414869215 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/udd/udd.c	1970-01-01 01:00:00.000000000 +0100
+menu "UDD support"
+
+config XENO_DRIVERS_UDD
+	tristate "User-space device driver framework"
+	help
+
+	A RTDM-based driver for enabling interrupt control and I/O
+	memory access interfaces to user-space device drivers.
+
+endmenu
+++ linux-patched/drivers/xenomai/udd/udd.c	2022-03-21 12:58:31.406869293 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/udd/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <rtdm/cobalt.h>
+#include <rtdm/driver.h>
+#include <rtdm/udd.h>
+#include <pipeline/inband_work.h>
+
+struct udd_context {
+	u32 event_count;
+};
+
+static int udd_open(struct rtdm_fd *fd, int oflags)
+{
+	struct udd_context *context;
+	struct udd_device *udd;
+	int ret;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.open) {
+		ret = udd->ops.open(fd, oflags);
+		if (ret)
+			return ret;
+	}
+
+	context = rtdm_fd_to_private(fd);
+	context->event_count = 0;
+
+	return 0;
+}
+
+static void udd_close(struct rtdm_fd *fd)
+{
+	struct udd_device *udd;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.close)
+		udd->ops.close(fd);
+}
+
+static int udd_ioctl_rt(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg)
+{
+	struct udd_signotify signfy;
+	struct udd_reserved *ur;
+	struct udd_device *udd;
+	rtdm_event_t done;
+	int ret;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.ioctl) {
+		ret = udd->ops.ioctl(fd, request, arg);
+		if (ret != -ENOSYS)
+			return ret;
+	}
+
+	ur = &udd->__reserved;
+
+	switch (request) {
+	case UDD_RTIOC_IRQSIG:
+		ret = rtdm_safe_copy_from_user(fd, &signfy, arg, sizeof(signfy));
+		if (ret)
+			return ret;
+		/* Early check, we'll redo at each signal issue. */
+		if (signfy.pid <= 0)
+			ur->signfy.pid = -1;
+		else {
+			if (signfy.sig < SIGRTMIN || signfy.sig > SIGRTMAX)
+				return -EINVAL;
+			if (cobalt_thread_find_local(signfy.pid) == NULL)
+				return -EINVAL;
+			ur->signfy = signfy;
+		}
+		break;
+	case UDD_RTIOC_IRQEN:
+	case UDD_RTIOC_IRQDIS:
+		if (udd->irq == UDD_IRQ_NONE || udd->irq == UDD_IRQ_CUSTOM)
+			return -EIO;
+		rtdm_event_init(&done, 0);
+		if (request == UDD_RTIOC_IRQEN)
+			udd_enable_irq(udd, &done);
+		else
+			udd_disable_irq(udd, &done);
+		ret = rtdm_event_wait(&done);
+		if (ret != -EIDRM)
+			rtdm_event_destroy(&done);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t udd_read_rt(struct rtdm_fd *fd,
+			   void __user *buf, size_t len)
+{
+	struct udd_context *context;
+	struct udd_reserved *ur;
+	struct udd_device *udd;
+	rtdm_lockctx_t ctx;
+	ssize_t ret = 0;
+	u32 count;
+
+	if (len != sizeof(count))
+		return -EINVAL;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->irq == UDD_IRQ_NONE)
+		return -EIO;
+
+	ur = &udd->__reserved;
+	context = rtdm_fd_to_private(fd);
+
+	cobalt_atomic_enter(ctx);
+
+	if (ur->event_count != context->event_count)
+		rtdm_event_clear(&ur->pulse);
+	else
+		ret = rtdm_event_wait(&ur->pulse);
+
+	count = ur->event_count;
+
+	cobalt_atomic_leave(ctx);
+
+	if (ret)
+		return ret;
+
+	context->event_count = count;
+	ret = rtdm_copy_to_user(fd, buf, &count, sizeof(count));
+
+	return ret ?: sizeof(count);
+}
+
+static ssize_t udd_write_rt(struct rtdm_fd *fd,
+			    const void __user *buf, size_t len)
+{
+	int ret;
+	u32 val;
+
+	if (len != sizeof(val))
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_from_user(fd, &val, buf, sizeof(val));
+	if (ret)
+		return ret;
+
+	ret = udd_ioctl_rt(fd, val ? UDD_RTIOC_IRQEN : UDD_RTIOC_IRQDIS, NULL);
+
+	return ret ?: len;
+}
+
+static int udd_select(struct rtdm_fd *fd, struct xnselector *selector,
+		      unsigned int type, unsigned int index)
+{
+	struct udd_device *udd;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->irq == UDD_IRQ_NONE)
+		return -EIO;
+
+	return rtdm_event_select(&udd->__reserved.pulse,
+				 selector, type, index);
+}
+
+static int udd_irq_handler(rtdm_irq_t *irqh)
+{
+	struct udd_device *udd;
+	int ret;
+
+	udd = rtdm_irq_get_arg(irqh, struct udd_device);
+	ret = udd->ops.interrupt(udd);
+	if (ret == RTDM_IRQ_HANDLED)
+		udd_notify_event(udd);
+
+	return ret;
+}
+
+static int mapper_open(struct rtdm_fd *fd, int oflags)
+{
+	int minor = rtdm_fd_minor(fd);
+	struct udd_device *udd;
+
+	/*
+	 * Check that we are opening a mapper instance pointing at a
+	 * valid memory region. e.g. UDD creates the companion device
+	 * "foo,mapper" on the fly when registering the main device
+	 * "foo". Userland may then open("/dev/foo,mapper0", ...)
+	 * followed by a call to mmap() for mapping the memory region
+	 * #0 as declared in the mem_regions[] array of the main
+	 * device.
+	 *
+	 * We support sparse region arrays, so the device minor shall
+	 * match the mem_regions[] index exactly.
+	 */
+	if (minor < 0 || minor >= UDD_NR_MAPS)
+		return -EIO;
+
+	udd = udd_get_device(fd);
+	if (udd->mem_regions[minor].type == UDD_MEM_NONE)
+		return -EIO;
+
+	return 0;
+}
+
+static void mapper_close(struct rtdm_fd *fd)
+{
+	/* nop */
+}
+
+static int mapper_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct udd_memregion *rn;
+	struct udd_device *udd;
+	size_t len;
+	int ret;
+
+	udd = udd_get_device(fd);
+	if (udd->ops.mmap)
+		/* Offload to client driver if handler is present. */
+		return udd->ops.mmap(fd, vma);
+
+	/* Otherwise DIY using the RTDM helpers. */
+
+	len = vma->vm_end - vma->vm_start;
+	rn = udd->mem_regions + rtdm_fd_minor(fd);
+	if (rn->len < len)
+		/* Can't map that much, bail out. */
+		return -EINVAL;
+
+	switch (rn->type) {
+	case UDD_MEM_PHYS:
+		ret = rtdm_mmap_iomem(vma, rn->addr);
+		break;
+	case UDD_MEM_LOGICAL:
+		ret = rtdm_mmap_kmem(vma, (void *)rn->addr);
+		break;
+	case UDD_MEM_VIRTUAL:
+		ret = rtdm_mmap_vmem(vma, (void *)rn->addr);
+		break;
+	default:
+		ret = -EINVAL;	/* Paranoid, can't happen. */
+	}
+
+	return ret;
+}
+
+static inline int check_memregion(struct udd_device *udd,
+				  struct udd_memregion *rn)
+{
+	if (rn->name == NULL)
+		return -EINVAL;
+
+	if (rn->addr == 0)
+		return -EINVAL;
+
+	if (rn->len == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static inline int register_mapper(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	struct rtdm_driver *drv = &ur->mapper_driver;
+	struct udd_mapper *mapper;
+	struct udd_memregion *rn;
+	int n, ret;
+
+	ur->mapper_name = kasformat("%s,mapper%%d", udd->device_name);
+	if (ur->mapper_name == NULL)
+		return -ENOMEM;
+
+	drv->profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(mapper, RTDM_CLASS_MEMORY,
+				  RTDM_SUBCLASS_GENERIC, 0);
+	drv->device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR;
+	drv->device_count = UDD_NR_MAPS;
+	drv->base_minor = 0;
+	drv->ops = (struct rtdm_fd_ops){
+		.open		=	mapper_open,
+		.close		=	mapper_close,
+		.mmap		=	mapper_mmap,
+	};
+
+	for (n = 0, mapper = ur->mapdev; n < UDD_NR_MAPS; n++, mapper++) {
+		rn = udd->mem_regions + n;
+		if (rn->type == UDD_MEM_NONE)
+			continue;
+		mapper->dev.driver = drv;
+		mapper->dev.label = ur->mapper_name;
+		mapper->dev.minor = n;
+		mapper->udd = udd;
+		ret = rtdm_dev_register(&mapper->dev);
+		if (ret)
+			goto undo;
+	}
+
+	return 0;
+undo:
+	while (--n >= 0)
+		rtdm_dev_unregister(&ur->mapdev[n].dev);
+
+	return ret;
+}
+
+/**
+ * @brief Register a UDD device
+ *
+ * This routine registers a mini-driver at the UDD core.
+ *
+ * @param udd @ref udd_device "UDD device descriptor" which should
+ * describe the new device properties.
+ *
+ * @return Zero is returned upon success, otherwise a negative error
+ * code is received, from the set of error codes defined by
+ * rtdm_dev_register(). In addition, the following error codes can be
+ * returned:
+ *
+ * - -EINVAL, some of the memory regions declared in the
+ *   udd_device.mem_regions[] array have invalid properties, i.e. bad
+ *   type, NULL name, zero length or address. Any undeclared region
+ *   entry from the array must bear the UDD_MEM_NONE type.
+ *
+ * - -EINVAL, if udd_device.irq is different from UDD_IRQ_CUSTOM and
+ * UDD_IRQ_NONE but invalid, causing rtdm_irq_request() to fail.
+ *
+ * - -EINVAL, if udd_device.device_flags contains invalid flags.
+ *
+ * - -ENOSYS, if this service is called while the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int udd_register_device(struct udd_device *udd)
+{
+	struct rtdm_device *dev = &udd->__reserved.device;
+	struct udd_reserved *ur = &udd->__reserved;
+	struct rtdm_driver *drv = &ur->driver;
+	struct udd_memregion *rn;
+	int ret, n;
+
+	if (udd->device_flags & RTDM_PROTOCOL_DEVICE)
+		return -EINVAL;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM &&
+	    udd->ops.interrupt == NULL)
+		return -EINVAL;
+
+	for (n = 0, ur->nr_maps = 0; n < UDD_NR_MAPS; n++) {
+		/* We allow sparse region arrays. */
+		rn = udd->mem_regions + n;
+		if (rn->type == UDD_MEM_NONE)
+			continue;
+		ret = check_memregion(udd, rn);
+		if (ret)
+			return ret;
+		udd->__reserved.nr_maps++;
+	}
+
+	drv->profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(udd->device_name, RTDM_CLASS_UDD,
+				  udd->device_subclass, 0);
+	drv->device_flags = RTDM_NAMED_DEVICE|udd->device_flags;
+	drv->device_count = 1;
+	drv->context_size = sizeof(struct udd_context);
+	drv->ops = (struct rtdm_fd_ops){
+		.open = udd_open,
+		.ioctl_rt = udd_ioctl_rt,
+		.read_rt = udd_read_rt,
+		.write_rt = udd_write_rt,
+		.close = udd_close,
+		.select = udd_select,
+	};
+
+	dev->driver = drv;
+	dev->label = udd->device_name;
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		return ret;
+
+	if (ur->nr_maps > 0) {
+		ret = register_mapper(udd);
+		if (ret)
+			goto fail_mapper;
+	} else
+		ur->mapper_name = NULL;
+
+	ur->event_count = 0;
+	rtdm_event_init(&ur->pulse, 0);
+	ur->signfy.pid = -1;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM) {
+		ret = rtdm_irq_request(&ur->irqh, udd->irq,
+				       udd_irq_handler, 0,
+				       dev->name, udd);
+		if (ret)
+			goto fail_irq_request;
+	}
+
+	return 0;
+
+fail_irq_request:
+	for (n = 0; n < UDD_NR_MAPS; n++) {
+		rn = udd->mem_regions + n;
+		if (rn->type != UDD_MEM_NONE)
+			rtdm_dev_unregister(&ur->mapdev[n].dev);
+	}
+fail_mapper:
+	rtdm_dev_unregister(dev);
+	if (ur->mapper_name)
+		kfree(ur->mapper_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(udd_register_device);
+
+/**
+ * @brief Unregister a UDD device
+ *
+ * This routine unregisters a mini-driver from the UDD core. This
+ * routine waits until all connections to @a udd have been closed
+ * prior to unregistering.
+ *
+ * @param udd UDD device descriptor
+ *
+ * @return Zero is returned upon success, otherwise -ENXIO is received
+ * if this service is called while the Cobalt kernel is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int udd_unregister_device(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	struct udd_memregion *rn;
+	int n;
+
+	rtdm_event_destroy(&ur->pulse);
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		rtdm_irq_free(&ur->irqh);
+
+	for (n = 0; n < UDD_NR_MAPS; n++) {
+		rn = udd->mem_regions + n;
+		if (rn->type != UDD_MEM_NONE)
+			rtdm_dev_unregister(&ur->mapdev[n].dev);
+	}
+
+	if (ur->mapper_name)
+		kfree(ur->mapper_name);
+
+	rtdm_dev_unregister(&ur->device);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(udd_unregister_device);
+
+/**
+ * @brief Notify an IRQ event for an unmanaged interrupt
+ *
+ * When the UDD core shall hand over the interrupt management for a
+ * device to the mini-driver (see UDD_IRQ_CUSTOM), the latter should
+ * notify the UDD core when IRQ events are received by calling this
+ * service.
+ *
+ * As a result, the UDD core wakes up any Cobalt thread waiting for
+ * interrupts on the device via a read(2) or select(2) call.
+ *
+ * @param udd UDD device descriptor receiving the IRQ.
+ *
+ * @coretags{coreirq-only}
+ *
+ * @note In case the @ref udd_irq_handler "IRQ handler" from the
+ * mini-driver requested the UDD core not to re-enable the interrupt
+ * line, the application may later request the unmasking by issuing
+ * the UDD_RTIOC_IRQEN ioctl(2) command. Writing a non-zero integer to
+ * the device via the write(2) system call has the same effect.
+ */
+void udd_notify_event(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	union sigval sival;
+	rtdm_lockctx_t ctx;
+
+	cobalt_atomic_enter(ctx);
+	ur->event_count++;
+	rtdm_event_signal(&ur->pulse);
+	cobalt_atomic_leave(ctx);
+
+	if (ur->signfy.pid > 0) {
+		sival.sival_int = (int)ur->event_count;
+		__cobalt_sigqueue(ur->signfy.pid, ur->signfy.sig, &sival);
+	}
+}
+EXPORT_SYMBOL_GPL(udd_notify_event);
+
+struct irqswitch_work {
+	struct pipeline_inband_work inband_work;
+	rtdm_irq_t *irqh;
+	int enabled;
+	rtdm_event_t *done;
+	struct irqswitch_work *self; /* Revisit: I-pipe requirement */
+};
+
+static void lostage_irqswitch_line(struct pipeline_inband_work *inband_work)
+{
+	struct irqswitch_work *rq;
+
+	/*
+	 * This runs from secondary mode, we may flip the IRQ state
+	 * now.
+	 */
+	rq = container_of(inband_work, struct irqswitch_work, inband_work);
+	if (rq->enabled)
+		rtdm_irq_enable(rq->irqh);
+	else
+		rtdm_irq_disable(rq->irqh);
+
+	if (rq->done)
+		rtdm_event_signal(rq->done);
+
+	xnfree(rq->self);
+}
+
+static void switch_irq_line(rtdm_irq_t *irqh, int enable, rtdm_event_t *done)
+{
+	struct irqswitch_work *rq;
+
+	rq = xnmalloc(sizeof(*rq));
+	if (WARN_ON(rq == NULL))
+		return;
+
+	rq->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*rq,
+					lostage_irqswitch_line);
+	rq->irqh = irqh;
+	rq->enabled = enable;
+	rq->done = done;
+	rq->self = rq;	/* Revisit: I-pipe requirement */
+
+	/*
+	 * Not pretty, but we may not traverse the kernel code for
+	 * enabling/disabling IRQ lines from primary mode. Defer this
+	 * to the root context.
+	 */
+	pipeline_post_inband_work(rq);
+}
+
+/**
+ * @brief Enable the device IRQ line
+ *
+ * This service issues a request to the regular kernel for enabling
+ * the IRQ line registered by the driver. If the caller runs in
+ * primary mode, the request is scheduled but deferred until the
+ * current CPU leaves the real-time domain (see note). Otherwise, the
+ * request is immediately handled.
+ *
+ * @param udd The UDD driver handling the IRQ to disable. If no IRQ
+ * was registered by the driver at the UDD core, this routine has no
+ * effect.
+ *
+ * @param done Optional event to signal upon completion. If non-NULL,
+ * @a done will be posted by a call to rtdm_event_signal() after the
+ * interrupt line is enabled.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note The deferral is required as some interrupt management code
+ * involved in enabling interrupt lines may not be safely executed
+ * from primary mode. By passing a valid @a done object address, the
+ * caller can wait for the request to complete, by sleeping on
+ * rtdm_event_wait().
+ */
+void udd_enable_irq(struct udd_device *udd, rtdm_event_t *done)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		switch_irq_line(&ur->irqh, 1, done);
+}
+EXPORT_SYMBOL_GPL(udd_enable_irq);
+
+/**
+ * @brief Disable the device IRQ line
+ *
+ * This service issues a request to the regular kernel for disabling
+ * the IRQ line registered by the driver. If the caller runs in
+ * primary mode, the request is scheduled but deferred until the
+ * current CPU leaves the real-time domain (see note). Otherwise, the
+ * request is immediately handled.
+ *
+ * @param udd The UDD driver handling the IRQ to disable. If no IRQ
+ * was registered by the driver at the UDD core, this routine has no
+ * effect.
+ *
+ * @param done Optional event to signal upon completion. If non-NULL,
+ * @a done will be posted by a call to rtdm_event_signal() after the
+ * interrupt line is disabled.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note The deferral is required as some interrupt management code
+ * involved in disabling interrupt lines may not be safely executed
+ * from primary mode. By passing a valid @a done object address, the
+ * caller can wait for the request to complete, by sleeping on
+ * rtdm_event_wait().
+ */
+void udd_disable_irq(struct udd_device *udd, rtdm_event_t *done)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		switch_irq_line(&ur->irqh, 0, done);
+}
+EXPORT_SYMBOL_GPL(udd_disable_irq);
+
+/**
+ * @brief RTDM file descriptor to target UDD device
+ *
+ * Retrieves the UDD device from a RTDM file descriptor.
+ *
+ * @param fd File descriptor received by an ancillary I/O handler
+ * from a mini-driver based on the UDD core.
+ *
+ * @return A pointer to the UDD device to which @a fd refers to.
+ *
+ * @note This service is intended for use by mini-drivers based on the
+ * UDD core exclusively. Passing file descriptors referring to other
+ * RTDM devices will certainly lead to invalid results.
+ *
+ * @coretags{mode-unrestricted}
+ */
+struct udd_device *udd_get_device(struct rtdm_fd *fd)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+
+	if (dev->driver->profile_info.class_id == RTDM_CLASS_MEMORY)
+		return container_of(dev, struct udd_mapper, dev)->udd;
+
+	return container_of(dev, struct udd_device, __reserved.device);
+}
+EXPORT_SYMBOL_GPL(udd_get_device);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/udd/Makefile	2022-03-21 12:58:31.399869361 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/testing/timerbench.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/kernel
+
+obj-$(CONFIG_XENO_DRIVERS_UDD) += xeno_udd.o
+
+xeno_udd-y := udd.o
+++ linux-patched/drivers/xenomai/testing/timerbench.c	2022-03-21 12:58:31.392869429 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/rtdmtest.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/arith.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+#include <rtdm/compat.h>
+
+MODULE_DESCRIPTION("Timer latency test helper");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("0.2.1");
+MODULE_LICENSE("GPL");
+
+struct rt_tmbench_context {
+	int mode;
+	unsigned int period;
+	int freeze_max;
+	int warmup_loops;
+	int samples_per_sec;
+	int32_t *histogram_min;
+	int32_t *histogram_max;
+	int32_t *histogram_avg;
+	int histogram_size;
+	int bucketsize;
+
+	rtdm_task_t timer_task;
+
+	rtdm_timer_t timer;
+	int warmup;
+	uint64_t start_time;
+	uint64_t date;
+	struct rttst_bench_res curr;
+
+	rtdm_event_t result_event;
+	struct rttst_interm_bench_res result;
+
+	struct semaphore nrt_mutex;
+};
+
+static inline void add_histogram(struct rt_tmbench_context *ctx,
+				 __s32 *histogram, __s32 addval)
+{
+	/* bucketsize steps */
+	int inabs = (addval >= 0 ? addval : -addval) / ctx->bucketsize;
+	histogram[inabs < ctx->histogram_size ?
+		  inabs : ctx->histogram_size - 1]++;
+}
+
+static inline long long slldiv(long long s, unsigned d)
+{
+	return s >= 0 ? xnarch_ulldiv(s, d, NULL) : -xnarch_ulldiv(-s, d, NULL);
+}
+
+static void eval_inner_loop(struct rt_tmbench_context *ctx, __s32 dt)
+{
+	if (dt > ctx->curr.max)
+		ctx->curr.max = dt;
+	if (dt < ctx->curr.min)
+		ctx->curr.min = dt;
+	ctx->curr.avg += dt;
+
+	if (xntrace_enabled() &&
+		ctx->freeze_max &&
+		(dt > ctx->result.overall.max) &&
+		!ctx->warmup) {
+		ctx->result.overall.max = dt;
+		xntrace_latpeak_freeze(dt);
+	}
+
+	ctx->date += ctx->period;
+
+	if (!ctx->warmup && ctx->histogram_size)
+		add_histogram(ctx, ctx->histogram_avg, dt);
+
+	/* Evaluate overruns and adjust next release date.
+	   Beware of signedness! */
+	while (dt > 0 && (unsigned long)dt > ctx->period) {
+		ctx->curr.overruns++;
+		ctx->date += ctx->period;
+		dt -= ctx->period;
+	}
+}
+
+static void eval_outer_loop(struct rt_tmbench_context *ctx)
+{
+	if (!ctx->warmup) {
+		if (ctx->histogram_size) {
+			add_histogram(ctx, ctx->histogram_max, ctx->curr.max);
+			add_histogram(ctx, ctx->histogram_min, ctx->curr.min);
+		}
+
+		ctx->result.last.min = ctx->curr.min;
+		if (ctx->curr.min < ctx->result.overall.min)
+			ctx->result.overall.min = ctx->curr.min;
+
+		ctx->result.last.max = ctx->curr.max;
+		if (ctx->curr.max > ctx->result.overall.max)
+			ctx->result.overall.max = ctx->curr.max;
+
+		ctx->result.last.avg =
+		    slldiv(ctx->curr.avg, ctx->samples_per_sec);
+		ctx->result.overall.avg += ctx->result.last.avg;
+		ctx->result.overall.overruns += ctx->curr.overruns;
+		rtdm_event_pulse(&ctx->result_event);
+	}
+
+	if (ctx->warmup &&
+	    (ctx->result.overall.test_loops == ctx->warmup_loops)) {
+		ctx->result.overall.test_loops = 0;
+		ctx->warmup = 0;
+	}
+
+	ctx->curr.min = 10000000;
+	ctx->curr.max = -10000000;
+	ctx->curr.avg = 0;
+	ctx->curr.overruns = 0;
+
+	ctx->result.overall.test_loops++;
+}
+
+static void timer_task_proc(void *arg)
+{
+	struct rt_tmbench_context *ctx = arg;
+	int count, err;
+	spl_t s;
+
+	/* first event: one millisecond from now. */
+	ctx->date = rtdm_clock_read_monotonic() + 1000000;
+
+	while (1) {
+		for (count = 0; count < ctx->samples_per_sec; count++) {
+			cobalt_atomic_enter(s);
+			ctx->start_time = rtdm_clock_read_monotonic();
+			err = rtdm_task_sleep_abs(ctx->date,
+						  RTDM_TIMERMODE_ABSOLUTE);
+			cobalt_atomic_leave(s);
+			if (err)
+				return;
+
+			eval_inner_loop(ctx,
+					(__s32)(rtdm_clock_read_monotonic() -
+						ctx->date));
+		}
+		eval_outer_loop(ctx);
+	}
+}
+
+static void timer_proc(rtdm_timer_t *timer)
+{
+	struct rt_tmbench_context *ctx =
+	    container_of(timer, struct rt_tmbench_context, timer);
+	int err;
+
+	do {
+		eval_inner_loop(ctx, (__s32)(rtdm_clock_read_monotonic() -
+					     ctx->date));
+
+		ctx->start_time = rtdm_clock_read_monotonic();
+		err = rtdm_timer_start_in_handler(&ctx->timer, ctx->date, 0,
+						  RTDM_TIMERMODE_ABSOLUTE);
+
+		if (++ctx->curr.test_loops >= ctx->samples_per_sec) {
+			ctx->curr.test_loops = 0;
+			eval_outer_loop(ctx);
+		}
+	} while (err);
+}
+
+static int rt_tmbench_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_tmbench_context *ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	ctx->mode = RTTST_TMBENCH_INVALID;
+	sema_init(&ctx->nrt_mutex, 1);
+
+	return 0;
+}
+
+static void rt_tmbench_close(struct rtdm_fd *fd)
+{
+	struct rt_tmbench_context *ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	down(&ctx->nrt_mutex);
+
+	if (ctx->mode >= 0) {
+		if (ctx->mode == RTTST_TMBENCH_TASK)
+			rtdm_task_destroy(&ctx->timer_task);
+		else if (ctx->mode == RTTST_TMBENCH_HANDLER)
+			rtdm_timer_destroy(&ctx->timer);
+
+		rtdm_event_destroy(&ctx->result_event);
+
+		if (ctx->histogram_size)
+			kfree(ctx->histogram_min);
+
+		ctx->mode = RTTST_TMBENCH_INVALID;
+		ctx->histogram_size = 0;
+	}
+
+	up(&ctx->nrt_mutex);
+}
+
+static int rt_tmbench_start(struct rtdm_fd *fd,
+			    struct rt_tmbench_context *ctx,
+			    struct rttst_tmbench_config __user *user_config)
+{
+	int err = 0;
+	spl_t s;
+
+	struct rttst_tmbench_config config_buf;
+	struct rttst_tmbench_config *config =
+		(struct rttst_tmbench_config *)user_config;
+
+	if (rtdm_fd_is_user(fd)) {
+		if (rtdm_safe_copy_from_user
+		    (fd, &config_buf,user_config,
+		     sizeof(struct rttst_tmbench_config)) < 0)
+			return -EFAULT;
+
+		config = &config_buf;
+	}
+
+	down(&ctx->nrt_mutex);
+
+	ctx->period = config->period;
+	ctx->warmup_loops = config->warmup_loops;
+	ctx->samples_per_sec = 1000000000 / ctx->period;
+	ctx->histogram_size = config->histogram_size;
+	ctx->freeze_max = config->freeze_max;
+
+	if (ctx->histogram_size > 0) {
+		ctx->histogram_min =
+		    kmalloc(3 * ctx->histogram_size * sizeof(int32_t),
+			    GFP_KERNEL);
+		ctx->histogram_max =
+		    ctx->histogram_min + config->histogram_size;
+		ctx->histogram_avg =
+		    ctx->histogram_max + config->histogram_size;
+
+		if (!ctx->histogram_min) {
+			up(&ctx->nrt_mutex);
+			return -ENOMEM;
+		}
+
+		memset(ctx->histogram_min, 0,
+		       3 * ctx->histogram_size * sizeof(int32_t));
+		ctx->bucketsize = config->histogram_bucketsize;
+	}
+
+	ctx->result.overall.min = 10000000;
+	ctx->result.overall.max = -10000000;
+	ctx->result.overall.avg = 0;
+	ctx->result.overall.test_loops = 1;
+	ctx->result.overall.overruns = 0;
+
+	ctx->warmup = 1;
+
+	ctx->curr.min = 10000000;
+	ctx->curr.max = -10000000;
+	ctx->curr.avg = 0;
+	ctx->curr.overruns = 0;
+	ctx->mode = RTTST_TMBENCH_INVALID;
+
+	rtdm_event_init(&ctx->result_event, 0);
+
+	if (config->mode == RTTST_TMBENCH_TASK) {
+		err = rtdm_task_init(&ctx->timer_task, "timerbench",
+				timer_task_proc, ctx,
+				config->priority, 0);
+		if (!err)
+			ctx->mode = RTTST_TMBENCH_TASK;
+	} else {
+		rtdm_timer_init(&ctx->timer, timer_proc,
+				rtdm_fd_device(fd)->name);
+
+		ctx->curr.test_loops = 0;
+
+		ctx->mode = RTTST_TMBENCH_HANDLER;
+
+		cobalt_atomic_enter(s);
+		ctx->start_time = rtdm_clock_read_monotonic();
+
+		/* first event: one millisecond from now. */
+		ctx->date = ctx->start_time + 1000000;
+
+		err = rtdm_timer_start(&ctx->timer, ctx->date, 0,
+				RTDM_TIMERMODE_ABSOLUTE);
+		cobalt_atomic_leave(s);
+	}
+
+	up(&ctx->nrt_mutex);
+
+	return err;
+}
+
+static int kernel_copy_results(struct rt_tmbench_context *ctx,
+			       struct rttst_overall_bench_res *res)
+{
+	int size;
+
+	memcpy(&res->result, &ctx->result.overall, sizeof(res->result));
+
+	if (ctx->histogram_size > 0) {
+		size = ctx->histogram_size * sizeof(int32_t);
+		memcpy(res->histogram_min, ctx->histogram_min, size);
+		memcpy(res->histogram_max, ctx->histogram_max, size);
+		memcpy(res->histogram_avg, ctx->histogram_avg, size);
+		kfree(ctx->histogram_min);
+	}
+
+	return 0;
+}
+
+static int user_copy_results(struct rt_tmbench_context *ctx,
+			     struct rttst_overall_bench_res __user *u_res)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	struct rttst_overall_bench_res res_buf;
+	int ret, size;
+
+	ret = rtdm_safe_copy_to_user(fd, &u_res->result,
+				     &ctx->result.overall,
+				     sizeof(u_res->result));
+	if (ret || ctx->histogram_size == 0)
+		return ret;
+
+	size = ctx->histogram_size * sizeof(int32_t);
+
+	if (rtdm_safe_copy_from_user(fd, &res_buf, u_res, sizeof(res_buf)) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_min,
+				   ctx->histogram_min, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_max,
+				   ctx->histogram_max, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_avg,
+				   ctx->histogram_avg, size) < 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+static int compat_user_copy_results(struct rt_tmbench_context *ctx,
+				    struct compat_rttst_overall_bench_res __user *u_res)
+{
+	struct compat_rttst_overall_bench_res res_buf;
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	int ret, size;
+
+	ret = rtdm_safe_copy_to_user(fd, &u_res->result,
+				     &ctx->result.overall,
+				     sizeof(u_res->result));
+	if (ret || ctx->histogram_size == 0)
+		return ret;
+
+	size = ctx->histogram_size * sizeof(int32_t);
+
+	if (rtdm_safe_copy_from_user(fd, &res_buf, u_res, sizeof(res_buf)) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_min),
+				   ctx->histogram_min, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_max),
+				   ctx->histogram_max, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_avg),
+				   ctx->histogram_avg, size) < 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+static int rt_tmbench_stop(struct rt_tmbench_context *ctx, void *u_res)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	int ret;
+
+	down(&ctx->nrt_mutex);
+
+	if (ctx->mode < 0) {
+		up(&ctx->nrt_mutex);
+		return -EINVAL;
+	}
+
+	if (ctx->mode == RTTST_TMBENCH_TASK)
+		rtdm_task_destroy(&ctx->timer_task);
+	else if (ctx->mode == RTTST_TMBENCH_HANDLER)
+		rtdm_timer_destroy(&ctx->timer);
+
+	rtdm_event_destroy(&ctx->result_event);
+
+	ctx->mode = RTTST_TMBENCH_INVALID;
+
+	ctx->result.overall.avg =
+	    slldiv(ctx->result.overall.avg,
+		   ((ctx->result.overall.test_loops) > 1 ?
+		    ctx->result.overall.test_loops : 2) - 1);
+
+	if (rtdm_fd_is_user(fd)) {
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd))
+			ret = compat_user_copy_results(ctx, u_res);
+		else
+#endif
+			ret = user_copy_results(ctx, u_res);
+	} else
+		ret = kernel_copy_results(ctx, u_res);
+
+	if (ctx->histogram_size > 0)
+		kfree(ctx->histogram_min);
+
+	up(&ctx->nrt_mutex);
+
+	return ret;
+}
+
+static int rt_tmbench_ioctl_nrt(struct rtdm_fd *fd,
+				unsigned int request, void __user *arg)
+{
+	struct rt_tmbench_context *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTTST_RTIOC_TMBENCH_START:
+		err = rt_tmbench_start(fd, ctx, arg);
+		break;
+
+	COMPAT_CASE(RTTST_RTIOC_TMBENCH_STOP):
+		err = rt_tmbench_stop(ctx, arg);
+		break;
+	default:
+		err = -ENOSYS;
+	}
+
+	return err;
+}
+
+static int rt_tmbench_ioctl_rt(struct rtdm_fd *fd,
+			       unsigned int request, void __user *arg)
+{
+	struct rt_tmbench_context *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTTST_RTIOC_INTERM_BENCH_RES:
+		err = rtdm_event_wait(&ctx->result_event);
+		if (err)
+			return err;
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rttst_interm_bench_res __user *user_res = arg;
+
+			err = rtdm_safe_copy_to_user(fd, user_res,
+						     &ctx->result,
+						     sizeof(*user_res));
+		} else {
+			struct rttst_interm_bench_res *res = (void *)arg;
+
+			memcpy(res, &ctx->result, sizeof(*res));
+		}
+
+		break;
+
+	default:
+		err = -ENOSYS;
+	}
+
+	return err;
+}
+
+static struct rtdm_driver timerbench_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(timerbench,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_TIMERBENCH,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rt_tmbench_context),
+	.ops = {
+		.open		= rt_tmbench_open,
+		.close		= rt_tmbench_close,
+		.ioctl_rt	= rt_tmbench_ioctl_rt,
+		.ioctl_nrt	= rt_tmbench_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &timerbench_driver,
+	.label = "timerbench",
+};
+
+static int __init __timerbench_init(void)
+{
+	return rtdm_dev_register(&device);
+}
+
+static void __timerbench_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(__timerbench_init);
+module_exit(__timerbench_exit);
+++ linux-patched/drivers/xenomai/testing/rtdmtest.c	2022-03-21 12:58:31.384869507 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/testing.h>
+
+MODULE_DESCRIPTION("RTDM test helper module");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("0.1.0");
+MODULE_LICENSE("GPL");
+
+struct rtdm_basic_context {
+	rtdm_timer_t close_timer;
+	unsigned long close_counter;
+	unsigned long close_deferral;
+};
+
+struct rtdm_actor_context {
+	rtdm_task_t actor_task;
+	unsigned int request;
+	rtdm_event_t run;
+	rtdm_event_t done;
+	union {
+		__u32 cpu;
+	} args;
+};
+
+static void close_timer_proc(rtdm_timer_t *timer)
+{
+	struct rtdm_basic_context *ctx =
+		container_of(timer, struct rtdm_basic_context, close_timer);
+
+	if (ctx->close_counter != 1)
+		printk(XENO_ERR
+		       "rtdmtest: %s: close_counter is %lu, should be 1!\n",
+		       __FUNCTION__, ctx->close_counter);
+
+	ctx->close_deferral = RTTST_RTDM_NORMAL_CLOSE;
+	rtdm_fd_unlock(rtdm_private_to_fd(ctx));
+}
+
+static int rtdm_basic_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_timer_init(&ctx->close_timer, close_timer_proc,
+			"rtdm close test");
+	ctx->close_counter = 0;
+	ctx->close_deferral = RTTST_RTDM_NORMAL_CLOSE;
+
+	return 0;
+}
+
+static void rtdm_basic_close(struct rtdm_fd *fd)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+
+	ctx->close_counter++;
+
+	switch (ctx->close_deferral) {
+	case RTTST_RTDM_DEFER_CLOSE_CONTEXT:
+		if (ctx->close_counter != 2) {
+			printk(XENO_ERR
+			       "rtdmtest: %s: close_counter is %lu, "
+			       "should be 2!\n",
+			       __FUNCTION__, ctx->close_counter);
+			return;
+		}
+		rtdm_fd_unlock(fd);
+		break;
+	}
+
+	rtdm_timer_destroy(&ctx->close_timer);
+}
+
+static int rtdm_basic_ioctl_rt(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	int ret, magic = RTTST_RTDM_MAGIC_PRIMARY;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_PING_PRIMARY:
+		ret = rtdm_safe_copy_to_user(fd, arg, &magic,
+					     sizeof(magic));
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int rtdm_basic_ioctl_nrt(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+	int ret = 0, magic = RTTST_RTDM_MAGIC_SECONDARY;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_DEFER_CLOSE:
+		ctx->close_deferral = (unsigned long)arg;
+		if (ctx->close_deferral == RTTST_RTDM_DEFER_CLOSE_CONTEXT) {
+			++ctx->close_counter;
+			rtdm_fd_lock(fd);
+			rtdm_timer_start(&ctx->close_timer, 300000000ULL, 0,
+					RTDM_TIMERMODE_RELATIVE);
+		}
+		break;
+	case RTTST_RTIOC_RTDM_PING_SECONDARY:
+		ret = rtdm_safe_copy_to_user(fd, arg, &magic,
+					     sizeof(magic));
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+static void actor_handler(void *arg)
+{
+	struct rtdm_actor_context *ctx = arg;
+	int ret;
+
+	for (;;) {
+		if (rtdm_task_should_stop())
+			return;
+
+		ret = rtdm_event_wait(&ctx->run);
+		if (ret)
+			break;
+
+		switch (ctx->request) {
+		case RTTST_RTIOC_RTDM_ACTOR_GET_CPU:
+			ctx->args.cpu = task_cpu(current);
+			break;
+		default:
+			printk(XENO_ERR "rtdmtest: bad request code %d\n",
+			       ctx->request);
+		}
+
+		rtdm_event_signal(&ctx->done);
+	}
+}
+
+static int rtdm_actor_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_event_init(&ctx->run, 0);
+	rtdm_event_init(&ctx->done, 0);
+
+	return rtdm_task_init(&ctx->actor_task, "rtdm_actor",
+			      actor_handler, ctx,
+			      RTDM_TASK_LOWEST_PRIORITY, 0);
+}
+
+static void rtdm_actor_close(struct rtdm_fd *fd)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_task_destroy(&ctx->actor_task);
+	rtdm_event_destroy(&ctx->run);
+	rtdm_event_destroy(&ctx->done);
+}
+
+#define ACTION_TIMEOUT 50000000ULL /* 50 ms timeout on action */
+
+static int run_action(struct rtdm_actor_context *ctx, unsigned int request)
+{
+	rtdm_toseq_t toseq;
+
+	rtdm_toseq_init(&toseq, ACTION_TIMEOUT);
+	ctx->request = request;
+	rtdm_event_signal(&ctx->run);
+	/*
+	 * XXX: The handshake mechanism is not bullet-proof against
+	 * -EINTR received when waiting for the done event. Hopefully
+	 * we won't restart/start a request while the action task has
+	 * not yet completed the previous one we stopped waiting for
+	 * abruptly.
+	 */
+	return rtdm_event_timedwait(&ctx->done, ACTION_TIMEOUT, &toseq);
+}
+
+static int rtdm_actor_ioctl(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+	int ret;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_ACTOR_GET_CPU:
+		ctx->args.cpu = (__u32)-EINVAL;
+		ret = run_action(ctx, request);
+		if (ret)
+			break;
+		ret = rtdm_safe_copy_to_user(fd, arg, &ctx->args.cpu,
+					     sizeof(ctx->args.cpu));
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+      
+static struct rtdm_driver rtdm_basic_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtdm_test_basic,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_RTDMTEST,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 2,
+	.context_size		= sizeof(struct rtdm_basic_context),
+	.ops = {
+		.open		= rtdm_basic_open,
+		.close		= rtdm_basic_close,
+		.ioctl_rt	= rtdm_basic_ioctl_rt,
+		.ioctl_nrt	= rtdm_basic_ioctl_nrt,
+	},
+};
+
+static struct rtdm_driver rtdm_actor_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtdm_test_actor,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_RTDMTEST,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rtdm_actor_context),
+	.ops = {
+		.open		= rtdm_actor_open,
+		.close		= rtdm_actor_close,
+		.ioctl_rt	= rtdm_actor_ioctl,
+	},
+};
+
+static struct rtdm_device device[3] = {
+	[0 ... 1] = {
+		.driver = &rtdm_basic_driver,
+		.label = "rtdm%d",
+	},
+	[2] = {
+		.driver = &rtdm_actor_driver,
+		.label = "rtdmx",
+	}
+};
+
+static int __init rtdm_test_init(void)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++) {
+		ret = rtdm_dev_register(device + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(device + i);
+
+	return ret;
+}
+
+static void __exit rtdm_test_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++)
+		rtdm_dev_unregister(device + i);
+}
+
+module_init(rtdm_test_init);
+module_exit(rtdm_test_exit);
+++ linux-patched/drivers/xenomai/testing/Kconfig	2022-03-21 12:58:31.377869576 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/Makefile	1970-01-01 01:00:00.000000000 +0100
+menu "Testing drivers"
+
+config XENO_DRIVERS_TIMERBENCH
+	tristate "Timer benchmark driver"
+	default y
+	help
+	Kernel-based benchmark driver for timer latency evaluation.
+	See testsuite/latency for a possible front-end.
+
+config XENO_DRIVERS_SWITCHTEST
+	tristate "Context switch unit testing driver"
+	default y
+	help
+	Kernel-based driver for unit testing context switches and
+	FPU switches.
+
+config XENO_DRIVERS_HEAPCHECK
+	tristate "Memory allocator test driver"
+	default y
+	help
+	Kernel-based driver for testing Cobalt's memory allocator.
+
+config XENO_DRIVERS_RTDMTEST
+	depends on m
+	tristate "RTDM unit tests driver"
+	help
+	Kernel driver for performing RTDM unit tests.
+
+endmenu
+++ linux-patched/drivers/xenomai/testing/Makefile	2022-03-21 12:58:31.370869644 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/switchtest.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENO_DRIVERS_TIMERBENCH) += xeno_timerbench.o
+obj-$(CONFIG_XENO_DRIVERS_SWITCHTEST) += xeno_switchtest.o
+obj-$(CONFIG_XENO_DRIVERS_RTDMTEST)   += xeno_rtdmtest.o
+obj-$(CONFIG_XENO_DRIVERS_HEAPCHECK)   += xeno_heapcheck.o
+
+xeno_timerbench-y := timerbench.o
+
+xeno_switchtest-y := switchtest.o
+
+xeno_rtdmtest-y := rtdmtest.o
+
+xeno_heapcheck-y := heapcheck.o
+++ linux-patched/drivers/xenomai/testing/switchtest.c	2022-03-21 12:58:31.362869722 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/heapcheck.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/semaphore.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/trace.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+#include <asm/xenomai/fptest.h>
+
+MODULE_DESCRIPTION("Cobalt context switch test helper");
+MODULE_AUTHOR("Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>");
+MODULE_VERSION("0.1.1");
+MODULE_LICENSE("GPL");
+
+#define RTSWITCH_RT      0x10000
+#define RTSWITCH_NRT     0
+#define RTSWITCH_KERNEL  0x20000
+
+struct rtswitch_task {
+	struct rttst_swtest_task base;
+	rtdm_event_t rt_synch;
+	struct semaphore nrt_synch;
+	struct xnthread ktask;          /* For kernel-space real-time tasks. */
+	unsigned int last_switch;
+};
+
+struct rtswitch_context {
+	struct rtswitch_task *tasks;
+	unsigned int tasks_count;
+	unsigned int next_index;
+	struct semaphore lock;
+	unsigned int cpu;
+	unsigned int switches_count;
+
+	unsigned long pause_us;
+	unsigned int next_task;
+	rtdm_timer_t wake_up_delay;
+
+	unsigned int failed;
+	struct rttst_swtest_error error;
+
+	struct rtswitch_task *utask;
+	rtdm_nrtsig_t wake_utask;
+};
+
+static int fp_features;
+
+static int report(const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = vprintk(fmt, ap);
+	va_end(ap);
+
+	return ret;
+}
+
+static void handle_ktask_error(struct rtswitch_context *ctx, unsigned int fp_val)
+{
+	struct rtswitch_task *cur = &ctx->tasks[ctx->error.last_switch.to];
+	unsigned int i;
+
+	ctx->failed = 1;
+	ctx->error.fp_val = fp_val;
+
+	if ((cur->base.flags & RTSWITCH_RT) == RTSWITCH_RT)
+		for (i = 0; i < ctx->tasks_count; i++) {
+			struct rtswitch_task *task = &ctx->tasks[i];
+
+			/* Find the first non kernel-space task. */
+			if ((task->base.flags & RTSWITCH_KERNEL))
+				continue;
+
+			/* Unblock it. */
+			switch(task->base.flags & RTSWITCH_RT) {
+			case RTSWITCH_NRT:
+				ctx->utask = task;
+				rtdm_nrtsig_pend(&ctx->wake_utask);
+				break;
+
+			case RTSWITCH_RT:
+				rtdm_event_signal(&task->rt_synch);
+				break;
+			}
+
+			xnthread_suspend(&cur->ktask,
+					 XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
+		}
+}
+
+static int rtswitch_pend_rt(struct rtswitch_context *ctx,
+			    unsigned int idx)
+{
+	struct rtswitch_task *task;
+	int rc;
+
+	if (idx > ctx->tasks_count)
+		return -EINVAL;
+
+	task = &ctx->tasks[idx];
+	task->base.flags |= RTSWITCH_RT;
+
+	rc = rtdm_event_wait(&task->rt_synch);
+	if (rc < 0)
+		return rc;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static void timed_wake_up(rtdm_timer_t *timer)
+{
+	struct rtswitch_context *ctx =
+		container_of(timer, struct rtswitch_context, wake_up_delay);
+	struct rtswitch_task *task;
+
+	task = &ctx->tasks[ctx->next_task];
+
+	switch (task->base.flags & RTSWITCH_RT) {
+	case RTSWITCH_NRT:
+		ctx->utask = task;
+		rtdm_nrtsig_pend(&ctx->wake_utask);
+		break;
+
+	case RTSWITCH_RT:
+		rtdm_event_signal(&task->rt_synch);
+	}
+}
+
+static int rtswitch_to_rt(struct rtswitch_context *ctx,
+			  unsigned int from_idx,
+			  unsigned int to_idx)
+{
+	struct rtswitch_task *from, *to;
+	int rc;
+
+	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
+		return -EINVAL;
+
+	/* to == from is a special case which means
+	   "return to the previous task". */
+	if (to_idx == from_idx)
+		to_idx = ctx->error.last_switch.from;
+
+	from = &ctx->tasks[from_idx];
+	to = &ctx->tasks[to_idx];
+
+	from->base.flags |= RTSWITCH_RT;
+	from->last_switch = ++ctx->switches_count;
+	ctx->error.last_switch.from = from_idx;
+	ctx->error.last_switch.to = to_idx;
+	barrier();
+
+	if (ctx->pause_us) {
+		ctx->next_task = to_idx;
+		barrier();
+		rtdm_timer_start(&ctx->wake_up_delay,
+				 ctx->pause_us * 1000, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+		xnsched_lock();
+	} else
+		switch (to->base.flags & RTSWITCH_RT) {
+		case RTSWITCH_NRT:
+			ctx->utask = to;
+			barrier();
+			rtdm_nrtsig_pend(&ctx->wake_utask);
+			xnsched_lock();
+			break;
+
+		case RTSWITCH_RT:
+			xnsched_lock();
+			rtdm_event_signal(&to->rt_synch);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	rc = rtdm_event_wait(&from->rt_synch);
+	xnsched_unlock();
+
+	if (rc < 0)
+		return rc;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_pend_nrt(struct rtswitch_context *ctx,
+			     unsigned int idx)
+{
+	struct rtswitch_task *task;
+
+	if (idx > ctx->tasks_count)
+		return -EINVAL;
+
+	task = &ctx->tasks[idx];
+
+	task->base.flags &= ~RTSWITCH_RT;
+
+	if (down_interruptible(&task->nrt_synch))
+		return -EINTR;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_to_nrt(struct rtswitch_context *ctx,
+			   unsigned int from_idx,
+			   unsigned int to_idx)
+{
+	struct rtswitch_task *from, *to;
+	unsigned int expected, fp_val;
+	int fp_check;
+
+	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
+		return -EINVAL;
+
+	/* to == from is a special case which means
+	   "return to the previous task". */
+	if (to_idx == from_idx)
+		to_idx = ctx->error.last_switch.from;
+
+	from = &ctx->tasks[from_idx];
+	to = &ctx->tasks[to_idx];
+
+	fp_check = ctx->switches_count == from->last_switch + 1
+		&& ctx->error.last_switch.from == to_idx
+		&& ctx->error.last_switch.to == from_idx;
+
+	from->base.flags &= ~RTSWITCH_RT;
+	from->last_switch = ++ctx->switches_count;
+	ctx->error.last_switch.from = from_idx;
+	ctx->error.last_switch.to = to_idx;
+	barrier();
+
+	if (ctx->pause_us) {
+		ctx->next_task = to_idx;
+		barrier();
+		rtdm_timer_start(&ctx->wake_up_delay,
+				 ctx->pause_us * 1000, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+	} else
+		switch (to->base.flags & RTSWITCH_RT) {
+		case RTSWITCH_NRT:
+		switch_to_nrt:
+			up(&to->nrt_synch);
+			break;
+
+		case RTSWITCH_RT:
+
+			if (!fp_check || fp_linux_begin() < 0) {
+				fp_check = 0;
+				goto signal_nofp;
+			}
+
+			expected = from_idx + 500 +
+				(ctx->switches_count % 4000000) * 1000;
+
+			fp_regs_set(fp_features, expected);
+			rtdm_event_signal(&to->rt_synch);
+			fp_val = fp_regs_check(fp_features, expected, report);
+			fp_linux_end();
+
+			if(down_interruptible(&from->nrt_synch))
+				return -EINTR;
+			if (ctx->failed)
+				return 1;
+			if (fp_val != expected) {
+				handle_ktask_error(ctx, fp_val);
+				return 1;
+			}
+
+			from->base.flags &= ~RTSWITCH_RT;
+			from->last_switch = ++ctx->switches_count;
+			ctx->error.last_switch.from = from_idx;
+			ctx->error.last_switch.to = to_idx;
+			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
+				goto switch_to_nrt;
+			expected = from_idx + 500 +
+				(ctx->switches_count % 4000000) * 1000;
+			barrier();
+
+			fp_linux_begin();
+			fp_regs_set(fp_features, expected);
+			rtdm_event_signal(&to->rt_synch);
+			fp_val = fp_regs_check(fp_features, expected, report);
+			fp_linux_end();
+
+			if (down_interruptible(&from->nrt_synch))
+				return -EINTR;
+			if (ctx->failed)
+				return 1;
+			if (fp_val != expected) {
+				handle_ktask_error(ctx, fp_val);
+				return 1;
+			}
+
+			from->base.flags &= ~RTSWITCH_RT;
+			from->last_switch = ++ctx->switches_count;
+			ctx->error.last_switch.from = from_idx;
+			ctx->error.last_switch.to = to_idx;
+			barrier();
+			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
+				goto switch_to_nrt;
+
+		signal_nofp:
+			rtdm_event_signal(&to->rt_synch);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	if (down_interruptible(&from->nrt_synch))
+		return -EINTR;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_set_tasks_count(struct rtswitch_context *ctx, unsigned int count)
+{
+	struct rtswitch_task *tasks;
+
+	if (ctx->tasks_count == count)
+		return 0;
+
+	tasks = vmalloc(count * sizeof(*tasks));
+
+	if (!tasks)
+		return -ENOMEM;
+
+	down(&ctx->lock);
+
+	if (ctx->tasks)
+		vfree(ctx->tasks);
+
+	ctx->tasks = tasks;
+	ctx->tasks_count = count;
+	ctx->next_index = 0;
+
+	up(&ctx->lock);
+
+	return 0;
+}
+
+static int rtswitch_register_task(struct rtswitch_context *ctx,
+				  struct rttst_swtest_task *arg)
+{
+	struct rtswitch_task *t;
+
+	down(&ctx->lock);
+
+	if (ctx->next_index == ctx->tasks_count) {
+		up(&ctx->lock);
+		return -EBUSY;
+	}
+
+	arg->index = ctx->next_index;
+	t = &ctx->tasks[arg->index];
+	ctx->next_index++;
+	t->base = *arg;
+	t->last_switch = 0;
+	sema_init(&t->nrt_synch, 0);
+	rtdm_event_init(&t->rt_synch, 0);
+
+	up(&ctx->lock);
+
+	return 0;
+}
+
+struct taskarg {
+	struct rtswitch_context *ctx;
+	struct rtswitch_task *task;
+};
+
+static void rtswitch_ktask(void *cookie)
+{
+	struct taskarg *arg = (struct taskarg *) cookie;
+	unsigned int fp_val, expected, to, i = 0;
+	struct rtswitch_context *ctx = arg->ctx;
+	struct rtswitch_task *task = arg->task;
+
+	to = task->base.index;
+
+	rtswitch_pend_rt(ctx, task->base.index);
+
+	while (!rtdm_task_should_stop()) {
+		if (task->base.flags & RTTST_SWTEST_USE_FPU)
+			fp_regs_set(fp_features, task->base.index + i * 1000);
+
+		switch(i % 3) {
+		case 0:
+			/* to == from means "return to last task" */
+			rtswitch_to_rt(ctx, task->base.index, task->base.index);
+			break;
+		case 1:
+			if (++to == task->base.index)
+				++to;
+			if (to > ctx->tasks_count - 1)
+				to = 0;
+			if (to == task->base.index)
+				++to;
+
+			fallthrough;
+		case 2:
+			rtswitch_to_rt(ctx, task->base.index, to);
+		}
+
+		if (task->base.flags & RTTST_SWTEST_USE_FPU) {
+			expected = task->base.index + i * 1000;
+			fp_val = fp_regs_check(fp_features, expected, report);
+
+			if (fp_val != expected) {
+				if (task->base.flags & RTTST_SWTEST_FREEZE)
+					xntrace_user_freeze(0, 0);
+				handle_ktask_error(ctx, fp_val);
+			}
+		}
+
+		if (++i == 4000000)
+			i = 0;
+	}
+}
+
+static int rtswitch_create_ktask(struct rtswitch_context *ctx,
+				 struct rttst_swtest_task *ptask)
+{
+	union xnsched_policy_param param;
+	struct xnthread_start_attr sattr;
+	struct xnthread_init_attr iattr;
+	struct rtswitch_task *task;
+	struct taskarg arg;
+	int init_flags;
+	char name[30];
+	int err;
+
+	/*
+	 * Silently disable FP tests in kernel if FPU is not supported
+	 * there. Typical case is math emulation support: we can use
+	 * it from userland as a synthetic FPU, but there is no sane
+	 * way to use it from kernel-based threads (Xenomai or Linux).
+	 */
+	if (!fp_kernel_supported())
+		ptask->flags &= ~RTTST_SWTEST_USE_FPU;
+
+	ptask->flags |= RTSWITCH_KERNEL;
+	err = rtswitch_register_task(ctx, ptask);
+
+	if (err)
+		return err;
+
+	ksformat(name, sizeof(name), "rtk%d/%u", ptask->index, ctx->cpu);
+
+	task = &ctx->tasks[ptask->index];
+
+	arg.ctx = ctx;
+	arg.task = task;
+
+	init_flags = (ptask->flags & RTTST_SWTEST_FPU) ? XNFPU : 0;
+
+	iattr.name = name;
+	iattr.flags = init_flags;
+	iattr.personality = &xenomai_personality;
+	iattr.affinity = *cpumask_of(ctx->cpu);
+	param.rt.prio = 1;
+
+	set_cpus_allowed_ptr(current, cpumask_of(ctx->cpu));
+
+	err = xnthread_init(&task->ktask,
+			    &iattr, &xnsched_class_rt, &param);
+	if (!err) {
+		sattr.mode = 0;
+		sattr.entry = rtswitch_ktask;
+		sattr.cookie = &arg;
+		err = xnthread_start(&task->ktask, &sattr);
+		if (err)
+			__xnthread_discard(&task->ktask);
+	} else
+		/*
+		 * In order to avoid calling xnthread_cancel() for an
+		 * invalid thread.
+		 */
+		task->base.flags = 0;
+	/*
+	 * Putting the argument on stack is safe, because the new
+	 * thread, thanks to the above call to set_cpus_allowed_ptr(),
+	 * will preempt the current thread immediately, and will
+	 * suspend only once the arguments on stack are used.
+	 */
+
+	return err;
+}
+
+static void rtswitch_utask_waker(rtdm_nrtsig_t *sig, void *arg)
+{
+	struct rtswitch_context *ctx = (struct rtswitch_context *)arg;
+	up(&ctx->utask->nrt_synch);
+}
+
+static int rtswitch_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+
+	ctx->tasks = NULL;
+	ctx->tasks_count = ctx->next_index = ctx->cpu = ctx->switches_count = 0;
+	sema_init(&ctx->lock, 1);
+	ctx->failed = 0;
+	ctx->error.last_switch.from = ctx->error.last_switch.to = -1;
+	ctx->pause_us = 0;
+
+	rtdm_nrtsig_init(&ctx->wake_utask, rtswitch_utask_waker, ctx);
+
+	rtdm_timer_init(&ctx->wake_up_delay, timed_wake_up, "switchtest timer");
+
+	return 0;
+}
+
+static void rtswitch_close(struct rtdm_fd *fd)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	unsigned int i;
+
+	rtdm_timer_destroy(&ctx->wake_up_delay);
+	rtdm_nrtsig_destroy(&ctx->wake_utask);
+
+	if (ctx->tasks) {
+		set_cpus_allowed_ptr(current, cpumask_of(ctx->cpu));
+
+		for (i = 0; i < ctx->next_index; i++) {
+			struct rtswitch_task *task = &ctx->tasks[i];
+
+			if (task->base.flags & RTSWITCH_KERNEL) {
+				rtdm_task_destroy(&task->ktask);
+				rtdm_task_join(&task->ktask);
+			}
+			rtdm_event_destroy(&task->rt_synch);
+		}
+		vfree(ctx->tasks);
+	}
+}
+
+static int rtswitch_ioctl_nrt(struct rtdm_fd *fd,
+			      unsigned int request,
+			      void *arg)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	struct rttst_swtest_task task;
+	struct rttst_swtest_dir fromto;
+	__u32 count;
+	int err;
+
+	switch (request) {
+	case RTTST_RTIOC_SWTEST_SET_TASKS_COUNT:
+		return rtswitch_set_tasks_count(ctx,
+						(unsigned long) arg);
+
+	case RTTST_RTIOC_SWTEST_SET_CPU:
+		if ((unsigned long) arg > num_online_cpus() - 1)
+			return -EINVAL;
+
+		ctx->cpu = (unsigned long) arg;
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_SET_PAUSE:
+		ctx->pause_us = (unsigned long) arg;
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_REGISTER_UTASK:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		err = rtswitch_register_task(ctx, &task);
+
+		if (!err)
+			rtdm_copy_to_user(fd,
+					  arg,
+					  &task,
+					  sizeof(task));
+
+		return err;
+
+	case RTTST_RTIOC_SWTEST_CREATE_KTASK:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		err = rtswitch_create_ktask(ctx, &task);
+
+		if (!err)
+			rtdm_copy_to_user(fd,
+					  arg,
+					  &task,
+					  sizeof(task));
+
+		return err;
+
+	case RTTST_RTIOC_SWTEST_PEND:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		return rtswitch_pend_nrt(ctx, task.index);
+
+	case RTTST_RTIOC_SWTEST_SWITCH_TO:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(fromto)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd,
+				    &fromto,
+				    arg,
+				    sizeof(fromto));
+
+		return rtswitch_to_nrt(ctx, fromto.from, fromto.to);
+
+	case RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(count)))
+			return -EFAULT;
+
+		count = ctx->switches_count;
+
+		rtdm_copy_to_user(fd, arg, &count, sizeof(count));
+
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_GET_LAST_ERROR:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(ctx->error)))
+			return -EFAULT;
+
+		rtdm_copy_to_user(fd,
+				  arg,
+				  &ctx->error,
+				  sizeof(ctx->error));
+
+		return 0;
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+static int rtswitch_ioctl_rt(struct rtdm_fd *fd,
+			     unsigned int request,
+			     void *arg)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	struct rttst_swtest_task task;
+	struct rttst_swtest_dir fromto;
+
+	switch (request) {
+	case RTTST_RTIOC_SWTEST_PEND:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		return rtswitch_pend_rt(ctx, task.index);
+
+	case RTTST_RTIOC_SWTEST_SWITCH_TO:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(fromto)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd,
+				    &fromto,
+				    arg,
+				    sizeof(fromto));
+
+		return rtswitch_to_rt(ctx, fromto.from, fromto.to);
+
+	case RTTST_RTIOC_SWTEST_GET_LAST_ERROR:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(ctx->error)))
+			return -EFAULT;
+
+		rtdm_copy_to_user(fd,
+				  arg,
+				  &ctx->error,
+				  sizeof(ctx->error));
+
+		return 0;
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+static struct rtdm_driver switchtest_driver = {
+	.profile_info = RTDM_PROFILE_INFO(switchtest,
+					  RTDM_CLASS_TESTING,
+					  RTDM_SUBCLASS_SWITCHTEST,
+					  RTTST_PROFILE_VER),
+	.device_flags = RTDM_NAMED_DEVICE,
+	.device_count =	1,
+	.context_size = sizeof(struct rtswitch_context),
+	.ops = {
+		.open = rtswitch_open,
+		.close = rtswitch_close,
+		.ioctl_rt = rtswitch_ioctl_rt,
+		.ioctl_nrt = rtswitch_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &switchtest_driver,
+	.label = "switchtest",
+};
+
+int __init __switchtest_init(void)
+{
+	fp_features = fp_detect();
+
+	return rtdm_dev_register(&device);
+}
+
+void __switchtest_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(__switchtest_init);
+module_exit(__switchtest_exit);
+++ linux-patched/drivers/xenomai/testing/heapcheck.c	2022-03-21 12:58:31.355869790 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/heap.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+
+#define complain(__fmt, __args...)	\
+	printk(XENO_WARNING "heap check: " __fmt "\n", ##__args)
+
+static struct xnheap test_heap = {
+	.name = "test_heap"
+};
+
+enum pattern {
+	alphabet_series,
+	digit_series,
+	binary_series,
+};
+
+struct chunk {
+	void *ptr;
+	enum pattern pattern;
+};
+
+struct runstats {
+	struct rttst_heap_stats stats;
+	struct runstats *next;
+};
+
+static struct runstats *statistics;
+
+static int nrstats;
+
+static inline void breathe(int loops)
+{
+	if ((loops % 1000) == 0)
+		rtdm_task_sleep(300000ULL);
+}
+
+static inline void do_swap(void *left, void *right)
+{
+	char trans[sizeof(struct chunk)];
+
+	memcpy(trans, left, sizeof(struct chunk));
+	memcpy(left, right, sizeof(struct chunk));
+	memcpy(right, trans, sizeof(struct chunk));
+}
+
+static void random_shuffle(void *vbase, size_t nmemb)
+{
+	struct {
+		char x[sizeof(struct chunk)];
+	} __attribute__((packed)) *base = vbase;
+	unsigned int j, k;
+
+	for (j = nmemb; j > 0; j--) {
+		k = (unsigned int)(prandom_u32() % nmemb) + 1;
+		if (j == k)
+			continue;
+		do_swap(&base[j - 1], &base[k - 1]);
+	}
+}
+
+static void fill_pattern(char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		*p++ = (char)(val % count);
+		val++;
+	}
+}
+
+static int check_pattern(const char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		if (*p++ != (char)(val % count))
+			return 0;
+		val++;
+	}
+
+	return 1;
+}
+
+static size_t find_largest_free(size_t free_size, size_t block_size)
+{
+	void *p;
+
+	for (;;) {
+		p = xnheap_alloc(&test_heap, free_size);
+		if (p) {
+			xnheap_free(&test_heap, p);
+			break;
+		}
+		if (free_size <= block_size)
+			break;
+		free_size -= block_size;
+	}
+
+	return free_size;
+}
+
+static int test_seq(size_t heap_size, size_t block_size, int flags)
+{
+	long alloc_sum_ns, alloc_avg_ns, free_sum_ns, free_avg_ns,
+		alloc_max_ns, free_max_ns, d;
+	size_t user_size, largest_free, maximum_free, freed;
+	int ret, n, k, maxblocks, nrblocks;
+	nanosecs_rel_t start, end;
+	struct chunk *chunks;
+	struct runstats *st;
+	bool done_frag;
+	void *mem, *p;
+
+	maxblocks = heap_size / block_size;
+
+	mem = vmalloc(heap_size);
+	if (mem == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&test_heap, mem, heap_size);
+	if (ret) {
+		complain("cannot init heap with size %zu",
+		       heap_size);
+		goto out;
+	}
+
+	chunks = vmalloc(sizeof(*chunks) * maxblocks);
+	if (chunks == NULL) {
+		ret = -ENOMEM;
+		goto no_chunks;
+	}
+	memset(chunks, 0, sizeof(*chunks) * maxblocks);
+
+	ret = xnthread_harden();
+	if (ret)
+		goto done;
+
+	if (xnheap_get_size(&test_heap) != heap_size) {
+		complain("memory size inconsistency (%zu / %zu bytes)",
+			 heap_size, xnheap_get_size(&test_heap));
+		goto bad;
+	}
+
+	user_size = 0;
+	alloc_avg_ns = 0;
+	free_avg_ns = 0;
+	alloc_max_ns = 0;
+	free_max_ns = 0;
+	maximum_free = 0;
+	largest_free = 0;
+
+	for (n = 0, alloc_sum_ns = 0; ; n++) {
+		start = rtdm_clock_read_monotonic();
+		p = xnheap_alloc(&test_heap, block_size);
+		end = rtdm_clock_read_monotonic();
+		d = end - start;
+		if (d > alloc_max_ns)
+			alloc_max_ns = d;
+		alloc_sum_ns += d;
+		if (p == NULL)
+			break;
+		user_size += block_size;
+		if (n >= maxblocks) {
+			complain("too many blocks fetched"
+			       " (heap=%zu, block=%zu, "
+			       "got more than %d blocks)",
+			       heap_size, block_size, maxblocks);
+			goto bad;
+		}
+		chunks[n].ptr = p;
+		if (flags & RTTST_HEAPCHECK_PATTERN) {
+			chunks[n].pattern = (enum pattern)(prandom_u32() % 3);
+			fill_pattern(chunks[n].ptr, block_size, chunks[n].pattern);
+		}
+		breathe(n);
+	}
+
+	nrblocks = n;
+	if (nrblocks == 0)
+		goto do_stats;
+
+	if ((flags & RTTST_HEAPCHECK_ZEROOVRD) && nrblocks != maxblocks) {
+		complain("too few blocks fetched, unexpected overhead"
+			 " (heap=%zu, block=%zu, "
+			 "got %d, less than %d blocks)",
+			 heap_size, block_size, nrblocks, maxblocks);
+		goto bad;
+	}
+
+	breathe(0);
+
+	/* Make sure we did not trash any busy block while allocating. */
+	if (flags & RTTST_HEAPCHECK_PATTERN) {
+		for (n = 0; n < nrblocks; n++) {
+			if (!check_pattern(chunks[n].ptr, block_size,
+					   chunks[n].pattern)) {
+				complain("corrupted block #%d on alloc"
+					 " sequence (pattern %d)",
+					 n, chunks[n].pattern);
+				goto bad;
+			}
+			breathe(n);
+		}
+	}
+	
+	if (flags & RTTST_HEAPCHECK_SHUFFLE)
+		random_shuffle(chunks, nrblocks);
+
+	/*
+	 * Release all blocks.
+	 */
+	for (n = 0, free_sum_ns = 0, freed = 0, done_frag = false;
+	     n < nrblocks; n++) {
+		start = rtdm_clock_read_monotonic();
+		xnheap_free(&test_heap, chunks[n].ptr);
+		end = rtdm_clock_read_monotonic();
+		d = end - start;
+		if (d > free_max_ns)
+			free_max_ns = d;
+		free_sum_ns += d;
+		chunks[n].ptr = NULL;
+		/* Make sure we did not trash busy blocks while freeing. */
+		if (flags & RTTST_HEAPCHECK_PATTERN) {
+			for (k = 0; k < nrblocks; k++) {
+				if (chunks[k].ptr &&
+				    !check_pattern(chunks[k].ptr, block_size,
+						   chunks[k].pattern)) {
+					complain("corrupted block #%d on release"
+						 " sequence (pattern %d)",
+						 k, chunks[k].pattern);
+					goto bad;
+				}
+				breathe(k);
+			}
+		}
+		freed += block_size;
+		/*
+		 * Get a sense of the fragmentation for the tested
+		 * allocation pattern, heap and block sizes when half
+		 * of the usable heap size should be available to us.
+		 * NOTE: user_size excludes the overhead, this is
+		 * actually what we managed to get from the current
+		 * heap out of the allocation loop.
+		 */
+		if (!done_frag && freed >= user_size / 2) {
+			/* Calculate the external fragmentation. */
+			largest_free = find_largest_free(freed, block_size);
+			maximum_free = freed;
+			done_frag = true;
+		}
+		breathe(n);
+	}
+
+	/*
+	 * If the deallocation mechanism is broken, we might not be
+	 * able to reproduce the same allocation pattern with the same
+	 * outcome, check this.
+	 */
+	if (flags & RTTST_HEAPCHECK_HOT) {
+		for (n = 0, alloc_max_ns = alloc_sum_ns = 0; ; n++) {
+			start = rtdm_clock_read_monotonic();
+			p = xnheap_alloc(&test_heap, block_size);
+			end = rtdm_clock_read_monotonic();
+			d = end - start;
+			if (d > alloc_max_ns)
+				alloc_max_ns = d;
+			alloc_sum_ns += d;
+			if (p == NULL)
+				break;
+			if (n >= maxblocks) {
+				complain("too many blocks fetched during hot pass"
+					 " (heap=%zu, block=%zu, "
+					 "got more than %d blocks)",
+					 heap_size, block_size, maxblocks);
+				goto bad;
+			}
+			chunks[n].ptr = p;
+			breathe(n);
+		}
+		if (n != nrblocks) {
+			complain("inconsistent block count fetched"
+				 " during hot pass (heap=%zu, block=%zu, "
+				 "got %d blocks vs %d during alloc)",
+				 heap_size, block_size, n, nrblocks);
+			goto bad;
+		}
+		for (n = 0, free_max_ns = free_sum_ns = 0; n < nrblocks; n++) {
+			start = rtdm_clock_read_monotonic();
+			xnheap_free(&test_heap, chunks[n].ptr);
+			end = rtdm_clock_read_monotonic();
+			d = end - start;
+			if (d > free_max_ns)
+				free_max_ns = d;
+			free_sum_ns += d;
+			breathe(n);
+		}
+	}
+
+	alloc_avg_ns = alloc_sum_ns / nrblocks;
+	free_avg_ns = free_sum_ns / nrblocks;
+
+	if ((flags & RTTST_HEAPCHECK_ZEROOVRD) && heap_size != user_size) {
+		complain("unexpected overhead reported");
+		goto bad;
+	}
+
+	if (xnheap_get_used(&test_heap) > 0) {
+		complain("memory leakage reported: %zu bytes missing",
+			 xnheap_get_used(&test_heap));
+		goto bad;
+	}
+		
+do_stats:
+	xnthread_relax(0, 0);
+	ret = 0;
+	/*
+	 * Don't report stats when running a pattern check, timings
+	 * are affected.
+	 */
+	if (!(flags & RTTST_HEAPCHECK_PATTERN)) {
+		st = kmalloc(sizeof(*st), GFP_KERNEL);
+		if (st == NULL) {
+			complain("failed allocating memory");
+			ret = -ENOMEM;
+			goto out;
+		}
+		st->stats.heap_size = heap_size;
+		st->stats.user_size = user_size;
+		st->stats.block_size = block_size;
+		st->stats.nrblocks = nrblocks;
+		st->stats.alloc_avg_ns = alloc_avg_ns;
+		st->stats.alloc_max_ns = alloc_max_ns;
+		st->stats.free_avg_ns = free_avg_ns;
+		st->stats.free_max_ns = free_max_ns;
+		st->stats.maximum_free = maximum_free;
+		st->stats.largest_free = largest_free;
+		st->stats.flags = flags;
+		st->next = statistics;
+		statistics = st;
+		nrstats++;
+	}
+
+done:
+	vfree(chunks);
+no_chunks:
+	xnheap_destroy(&test_heap);
+out:
+	vfree(mem);
+
+	return ret;
+bad:
+	xnthread_relax(0, 0);
+	ret = -EPROTO;
+	goto done;
+}
+
+static int collect_stats(struct rtdm_fd *fd,
+			 struct rttst_heap_stats __user *buf, int nr)
+{
+	struct runstats *p, *next;
+	int ret, n;
+
+	if (nr < 0)
+		return -EINVAL;
+
+	for (p = statistics, n = nr; p && n > 0 && nrstats > 0;
+	     n--, nrstats--, p = next, buf += sizeof(p->stats)) {
+		ret = rtdm_copy_to_user(fd, buf, &p->stats, sizeof(p->stats));
+		if (ret)
+			return ret;
+		next = p->next;
+		statistics = next;
+		kfree(p);
+	}
+
+	return nr - n;
+}
+
+static void heapcheck_close(struct rtdm_fd *fd)
+{
+	struct runstats *p, *next;
+
+	for (p = statistics; p; p = next) {
+		next = p->next;
+		kfree(p);
+	}
+
+	statistics = NULL;
+}
+
+static int heapcheck_ioctl(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	struct compat_rttst_heap_stathdr compat_sthdr;
+#endif
+	struct rttst_heap_stathdr sthdr;
+	struct rttst_heap_parms parms;
+	int ret;
+
+	switch (request) {
+	case RTTST_RTIOC_HEAP_CHECK:
+		ret = rtdm_copy_from_user(fd, &parms, arg, sizeof(parms));
+		if (ret)
+			return ret;
+		ret = test_seq(parms.heap_size,
+			       parms.block_size,
+			       parms.flags);
+		if (ret)
+			return ret;
+		parms.nrstats = nrstats;
+		ret = rtdm_copy_to_user(fd, arg, &parms, sizeof(parms));
+		break;
+	case RTTST_RTIOC_HEAP_STAT_COLLECT:
+		sthdr.buf = NULL;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd)) {
+			ret = rtdm_copy_from_user(fd, &compat_sthdr, arg,
+						  sizeof(compat_sthdr));
+			if (ret)
+				return ret;
+
+			ret = collect_stats(fd, compat_ptr(compat_sthdr.buf),
+					    compat_sthdr.nrstats);
+			if (ret < 0)
+				return ret;
+
+			compat_sthdr.nrstats = ret;
+			ret = rtdm_copy_to_user(fd, arg, &compat_sthdr,
+						sizeof(compat_sthdr));
+		} else
+#endif
+		{
+			ret = rtdm_copy_from_user(fd, &sthdr, arg,
+						  sizeof(sthdr));
+			if (ret)
+				return ret;
+
+			ret = collect_stats(fd, sthdr.buf, sthdr.nrstats);
+			if (ret < 0)
+				return ret;
+
+			sthdr.nrstats = ret;
+			ret = rtdm_copy_to_user(fd, arg, &sthdr, sizeof(sthdr));
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct rtdm_driver heapcheck_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(heap_check,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_HEAPCHECK,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 1,
+	.ops = {
+		.close		= heapcheck_close,
+		.ioctl_nrt	= heapcheck_ioctl,
+	},
+};
+
+static struct rtdm_device heapcheck_device = {
+	.driver = &heapcheck_driver,
+	.label = "heapcheck",
+};
+
+static int __init heapcheck_init(void)
+{
+	return rtdm_dev_register(&heapcheck_device);
+}
+
+static void __exit heapcheck_exit(void)
+{
+	rtdm_dev_unregister(&heapcheck_device);
+}
+
+module_init(heapcheck_init);
+module_exit(heapcheck_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/Makefile	2022-03-21 12:58:31.347869868 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/drivers/xenomai/gpio/gpio-omap.c	1970-01-01 01:00:00.000000000 +0100
+obj-$(CONFIG_XENOMAI) += autotune/ serial/ testing/ can/ net/ analogy/ ipc/ udd/ gpio/ gpiopwm/ spi/
+++ linux-patched/drivers/xenomai/gpio/gpio-omap.c	2022-03-21 12:58:31.340869936 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-sun8i-h3.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2020 Greg Gallagher <greg@embeddedgreg.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_OMAP  6
+
+static const char *compat_array[] = {
+	"ti,omap4-gpio",
+	"ti,omap3-gpio",
+	"ti,omap2-gpio",
+};
+
+static int __init omap_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_array_of(NULL, compat_array,
+					   ARRAY_SIZE(compat_array),
+					   RTDM_SUBCLASS_OMAP);
+}
+module_init(omap_gpio_init);
+
+static void __exit omap_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_OMAP);
+}
+module_exit(omap_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/gpio/gpio-sun8i-h3.c	2022-03-21 12:58:31.333870005 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-bcm2835.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_H3  3
+
+static int __init h3_gpio_init(void)
+{
+	int ret;
+	
+	ret = rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-pinctrl",
+				    RTDM_SUBCLASS_H3);
+	if (ret)
+		return ret;
+
+	return rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-r-pinctrl",
+				     RTDM_SUBCLASS_H3);
+}
+module_init(h3_gpio_init);
+
+static void __exit h3_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_H3);
+}
+module_exit(h3_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/gpio/gpio-bcm2835.c	2022-03-21 12:58:31.325870083 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_BCM2835  1
+
+static int __init bcm2835_gpio_init(void)
+{
+ 	return rtdm_gpiochip_scan_of(NULL, "brcm,bcm2835-gpio",
+				     RTDM_SUBCLASS_BCM2835);
+}
+module_init(bcm2835_gpio_init);
+
+static void __exit bcm2835_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_BCM2835);
+}
+module_exit(bcm2835_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
+++ linux-patched/drivers/xenomai/gpio/Kconfig	2022-03-21 12:58:31.318870151 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-mxc.c	1970-01-01 01:00:00.000000000 +0100
+menu "Real-time GPIO drivers"
+
+config XENO_DRIVERS_GPIO
+       bool "GPIO controller"
+       depends on GPIOLIB
+       help
+
+       Real-time capable GPIO module.
+
+if XENO_DRIVERS_GPIO
+
+config XENO_DRIVERS_GPIO_BCM2835
+	depends on MACH_BCM2708 || ARCH_BCM2835
+	tristate "Support for BCM2835 GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Broadcom's BCM2835 SoC.
+
+config XENO_DRIVERS_GPIO_MXC
+	depends on GPIO_MXC
+	tristate "Support for MXC GPIOs"
+	help
+
+	Suitable for the GPIO controller available from
+	Freescale/NXP's MXC architecture.
+
+config XENO_DRIVERS_GPIO_SUN8I_H3
+	depends on MACH_SUN8I && PINCTRL_SUN8I_H3
+	tristate "Support for SUN8I H3 GPIOs"
+	help
+
+	Suitable for the GPIO controller available from Allwinner's H3
+	SoC, as found on the NanoPI boards.
+
+config XENO_DRIVERS_GPIO_ZYNQ7000
+	depends on ARCH_ZYNQ || ARCH_ZYNQMP
+	tristate "Support for Zynq7000 GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Xilinx's Zynq7000 SoC.
+
+config XENO_DRIVERS_GPIO_XILINX
+	depends on ARCH_ZYNQ || ARCH_ZYNQMP
+	tristate "Support for Xilinx GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Xilinx's softcore IP.
+
+config XENO_DRIVERS_GPIO_OMAP
+	depends on ARCH_OMAP2PLUS || ARCH_OMAP
+	tristate "Support for OMAP GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	OMAP family SOC.
+
+config XENO_DRIVERS_GPIO_CHERRYVIEW
+	depends on PINCTRL_CHERRYVIEW
+	tristate "Support for Cherryview GPIOs"
+	help
+
+	Enables support for the Intel Cherryview GPIO controller
+
+config XENO_DRIVERS_GPIO_DEBUG
+       bool "Enable GPIO core debugging features"
+
+endif
+
+endmenu
+++ linux-patched/drivers/xenomai/gpio/gpio-mxc.c	2022-03-21 12:58:31.310870229 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-core.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_MXC  2
+
+static const char *compat_array[] = {
+	"fsl,imx6q-gpio",
+	"fsl,imx7d-gpio",
+};
+
+static int __init mxc_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_array_of(NULL, compat_array,
+					   ARRAY_SIZE(compat_array),
+					   RTDM_SUBCLASS_MXC);
+}
+module_init(mxc_gpio_init);
+
+static void __exit mxc_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_MXC);
+}
+module_exit(mxc_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/gpio/gpio-core.c	2022-03-21 12:58:31.303870297 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-zynq7000.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <rtdm/gpio.h>
+
+struct rtdm_gpio_chan {
+	int requested : 1,
+		has_direction : 1,
+		is_output : 1,
+	        is_interrupt : 1,
+		want_timestamp : 1;
+};
+
+static LIST_HEAD(rtdm_gpio_chips);
+
+static DEFINE_MUTEX(chip_lock);
+
+static int gpio_pin_interrupt(rtdm_irq_t *irqh)
+{
+	struct rtdm_gpio_pin *pin;
+
+	pin = rtdm_irq_get_arg(irqh, struct rtdm_gpio_pin);
+
+	if (pin->monotonic_timestamp)
+		pin->timestamp = rtdm_clock_read_monotonic();
+	else
+		pin->timestamp = rtdm_clock_read();
+	rtdm_event_signal(&pin->event);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int request_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin,
+			    struct rtdm_gpio_chan *chan,
+			    int trigger)
+{
+	int ret, irq_trigger, irq;
+
+	if (trigger & ~GPIO_TRIGGER_MASK)
+		return -EINVAL;
+
+	if (!chan->requested) {
+		ret = gpio_request(gpio, pin->name);
+		if (ret) {
+			if (ret != -EPROBE_DEFER)
+				printk(XENO_ERR 
+				       "can not request GPIO%d\n", gpio);
+			return ret;
+		}
+		chan->requested = true;
+	}
+
+	ret = gpio_direction_input(gpio);
+	if (ret) {
+		printk(XENO_ERR "cannot set GPIO%d as input\n", gpio);
+		goto fail;
+	}
+
+	chan->has_direction = true;
+	gpio_export(gpio, true);
+
+	rtdm_event_clear(&pin->event);
+
+	/*
+	 * Attempt to hook the interrupt associated to that pin. We
+	 * might fail getting a valid IRQ number, in case the GPIO
+	 * chip did not define any mapping handler (->to_irq). If so,
+	 * just assume that either we have no IRQ indeed, or interrupt
+	 * handling may be open coded elsewhere.
+	 */
+	irq = gpio_to_irq(gpio);
+	if (irq < 0)
+		goto done;
+
+	irq_trigger = 0;
+	if (trigger & GPIO_TRIGGER_EDGE_RISING)
+		irq_trigger |= IRQ_TYPE_EDGE_RISING;
+	if (trigger & GPIO_TRIGGER_EDGE_FALLING)
+		irq_trigger |= IRQ_TYPE_EDGE_FALLING;
+	if (trigger & GPIO_TRIGGER_LEVEL_HIGH)
+		irq_trigger |= IRQ_TYPE_LEVEL_HIGH;
+	if (trigger & GPIO_TRIGGER_LEVEL_LOW)
+		irq_trigger |= IRQ_TYPE_LEVEL_LOW;
+
+	if (irq_trigger)
+		irq_set_irq_type(irq, irq_trigger);
+	
+	ret = rtdm_irq_request(&pin->irqh, irq, gpio_pin_interrupt,
+			       0, pin->name, pin);
+	if (ret) {
+		printk(XENO_ERR "cannot request GPIO%d interrupt\n", gpio);
+		goto fail;
+	}
+
+
+	rtdm_irq_enable(&pin->irqh);
+done:
+	chan->is_interrupt = true;
+
+	return 0;
+fail:
+	gpio_free(gpio);
+	chan->requested = false;
+
+	return ret;
+}
+
+static void release_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin,
+			     struct rtdm_gpio_chan *chan)
+{
+	if (chan->is_interrupt) {
+		rtdm_irq_free(&pin->irqh);
+		chan->is_interrupt = false;
+	}
+	gpio_free(gpio);
+	chan->requested = false;
+}
+
+static int gpio_pin_ioctl_nrt(struct rtdm_fd *fd,
+			      unsigned int request, void *arg)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	int ret = 0, val, trigger;
+	struct rtdm_gpio_pin *pin;
+	
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	switch (request) {
+	case GPIO_RTIOC_DIR_OUT:
+		ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val));
+		if (ret)
+			return ret;
+		ret = gpio_direction_output(gpio, val);
+		if (ret == 0) {
+			chan->has_direction = true;
+			chan->is_output = true;
+		}
+		break;
+	case GPIO_RTIOC_DIR_IN:
+		ret = gpio_direction_input(gpio);
+		if (ret == 0)
+			chan->has_direction = true;
+		break;
+	case GPIO_RTIOC_IRQEN:
+		if (chan->is_interrupt) {
+			return -EBUSY;
+		}
+		ret = rtdm_safe_copy_from_user(fd, &trigger,
+					       arg, sizeof(trigger));
+		if (ret)
+			return ret;
+		ret = request_gpio_irq(gpio, pin, chan, trigger);
+		break;
+	case GPIO_RTIOC_IRQDIS:
+		if (chan->is_interrupt) {
+			release_gpio_irq(gpio, pin, chan);
+			chan->requested = false;
+			chan->is_interrupt = false;
+		}
+		break;
+	case GPIO_RTIOC_REQS:
+		ret = gpio_request(gpio, pin->name);
+		if (ret)
+			return ret;
+		else
+			chan->requested = true;
+		break;
+	case GPIO_RTIOC_RELS:
+		gpio_free(gpio);
+		chan->requested = false;
+		break;
+	case GPIO_RTIOC_TS_MONO:
+	case GPIO_RTIOC_TS_REAL:
+		ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val));
+		if (ret)
+			return ret;
+		chan->want_timestamp = !!val;
+		pin->monotonic_timestamp = request == GPIO_RTIOC_TS_MONO;
+		break;
+	default:
+		return -EINVAL;
+	}
+	
+	return ret;
+}
+
+static ssize_t gpio_pin_read_rt(struct rtdm_fd *fd,
+				void __user *buf, size_t len)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_readout rdo;
+	struct rtdm_gpio_pin *pin;
+	int ret;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (chan->is_output)
+		return -EINVAL;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	if (chan->want_timestamp) {
+		if (len < sizeof(rdo))
+			return -EINVAL;
+
+		if (!(fd->oflags & O_NONBLOCK)) {
+			ret = rtdm_event_wait(&pin->event);
+			if (ret)
+				return ret;
+			rdo.timestamp = pin->timestamp;
+		} else if (pin->monotonic_timestamp) {
+			rdo.timestamp = rtdm_clock_read_monotonic();
+		} else {
+			rdo.timestamp = rtdm_clock_read();
+		}
+
+		len = sizeof(rdo);
+		rdo.value = gpiod_get_raw_value(pin->desc);
+		ret = rtdm_safe_copy_to_user(fd, buf, &rdo, len);
+	} else {
+		if (len < sizeof(rdo.value))
+			return -EINVAL;
+
+		if (!(fd->oflags & O_NONBLOCK)) {
+			ret = rtdm_event_wait(&pin->event);
+			if (ret)
+				return ret;
+		}
+
+		len = sizeof(rdo.value);
+		rdo.value = gpiod_get_raw_value(pin->desc);
+		ret = rtdm_safe_copy_to_user(fd, buf, &rdo.value, len);
+	}
+	
+	return ret ?: len;
+}
+
+static ssize_t gpio_pin_write_rt(struct rtdm_fd *fd,
+				 const void __user *buf, size_t len)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_pin *pin;
+	int value, ret;
+
+	if (len < sizeof(value))
+		return -EINVAL;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (!chan->is_output)
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_from_user(fd, &value, buf, sizeof(value));
+	if (ret)
+		return ret;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+	gpiod_set_raw_value(pin->desc, value);
+
+	return sizeof(value);
+}
+
+static int gpio_pin_select(struct rtdm_fd *fd, struct xnselector *selector,
+			   unsigned int type, unsigned int index)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_pin *pin;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (chan->is_output)
+		return -EINVAL;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	return rtdm_event_select(&pin->event, selector, type, index);
+}
+
+int gpio_pin_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	int ret = 0;
+	struct rtdm_gpio_pin *pin;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+	ret = gpio_request(gpio, pin->name);
+	if (ret) {
+		printk(XENO_ERR "failed to request pin %d : %d\n", gpio, ret);
+		return ret;
+	} else {
+		chan->requested = true;
+	}
+
+	return 0;
+}
+
+static void gpio_pin_close(struct rtdm_fd *fd)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	struct rtdm_gpio_pin *pin;
+
+	if (chan->requested) {
+		pin = container_of(dev, struct rtdm_gpio_pin, dev);
+		release_gpio_irq(gpio, pin, chan);
+	}
+}
+
+static void delete_pin_devices(struct rtdm_gpio_chip *rgc)
+{
+	struct rtdm_gpio_pin *pin;
+	struct rtdm_device *dev;
+	int offset;
+
+	for (offset = 0; offset < rgc->gc->ngpio; offset++) {
+		pin = rgc->pins + offset;
+		dev = &pin->dev;
+		rtdm_dev_unregister(dev);
+		rtdm_event_destroy(&pin->event);
+		kfree(dev->label);
+		kfree(pin->name);
+	}
+}
+
+static int create_pin_devices(struct rtdm_gpio_chip *rgc)
+{
+	struct gpio_chip *gc = rgc->gc;
+	struct rtdm_gpio_pin *pin;
+	struct rtdm_device *dev;
+	int offset, ret, gpio;
+
+	for (offset = 0; offset < gc->ngpio; offset++) {
+		ret = -ENOMEM;
+		gpio = gc->base + offset;
+		pin = rgc->pins + offset;
+		pin->name = kasprintf(GFP_KERNEL, "gpio%d", gpio);
+		if (pin->name == NULL)
+			goto fail_name;
+		pin->desc = gpio_to_desc(gpio);
+		if (pin->desc == NULL) {
+			ret = -ENODEV;
+			goto fail_desc;
+		}
+		dev = &pin->dev;
+		dev->driver = &rgc->driver;
+		dev->label = kasprintf(GFP_KERNEL, "%s/gpio%%d", gc->label);
+		if (dev->label == NULL)
+			goto fail_label;
+		dev->minor = gpio;
+		dev->device_data = rgc;
+		ret = rtdm_dev_register(dev);
+		if (ret)
+			goto fail_register;
+		rtdm_event_init(&pin->event, 0);
+	}
+
+	return 0;
+
+fail_register:
+	kfree(dev->label);
+fail_desc:
+fail_label:
+	kfree(pin->name);
+fail_name:
+	delete_pin_devices(rgc);
+
+	return ret;
+}
+
+static char *gpio_pin_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s/%s",
+			 dev->class->name,
+			 dev_name(dev));
+}
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc, int gpio_subclass)
+{
+	int ret;
+
+	rgc->devclass = class_create(gc->owner, gc->label);
+	if (IS_ERR(rgc->devclass)) {
+		printk(XENO_ERR "cannot create sysfs class\n");
+		return PTR_ERR(rgc->devclass);
+	}
+	rgc->devclass->devnode = gpio_pin_devnode;
+
+	rgc->driver.profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(rtdm_gpio_chip,
+				  RTDM_CLASS_GPIO,
+				  gpio_subclass,
+				  0);
+	rgc->driver.device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR;
+	rgc->driver.base_minor = gc->base;
+	rgc->driver.device_count = gc->ngpio;
+	rgc->driver.context_size = sizeof(struct rtdm_gpio_chan);
+	rgc->driver.ops = (struct rtdm_fd_ops){
+		.open		=	gpio_pin_open,
+		.close		=	gpio_pin_close,
+		.ioctl_nrt	=	gpio_pin_ioctl_nrt,
+		.read_rt	=	gpio_pin_read_rt,
+		.write_rt	=	gpio_pin_write_rt,
+		.select		=	gpio_pin_select,
+	};
+	
+	rtdm_drv_set_sysclass(&rgc->driver, rgc->devclass);
+
+	rgc->gc = gc;
+	rtdm_lock_init(&rgc->lock);
+
+	ret = create_pin_devices(rgc);
+	if (ret)
+		class_destroy(rgc->devclass);
+	
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_add);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc, int gpio_subclass)
+{
+	struct rtdm_gpio_chip *rgc;
+	size_t asize;
+	int ret;
+
+	if (gc->ngpio == 0)
+		return ERR_PTR(-EINVAL);
+
+	asize = sizeof(*rgc) + gc->ngpio * sizeof(struct rtdm_gpio_pin);
+	rgc = kzalloc(asize, GFP_KERNEL);
+	if (rgc == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_gpiochip_add(rgc, gc, gpio_subclass);
+	if (ret) {
+		kfree(rgc);
+		return ERR_PTR(ret);
+	}
+
+	mutex_lock(&chip_lock);
+	list_add(&rgc->next, &rtdm_gpio_chips);
+	mutex_unlock(&chip_lock);
+
+	return rgc;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_alloc);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc)
+{
+	mutex_lock(&chip_lock);
+	list_del(&rgc->next);
+	mutex_unlock(&chip_lock);
+	delete_pin_devices(rgc);
+	class_destroy(rgc->devclass);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset)
+{
+	struct rtdm_gpio_pin *pin;
+
+	if (offset >= rgc->gc->ngpio)
+		return -EINVAL;
+
+	pin = rgc->pins + offset;
+	if (pin->monotonic_timestamp)
+		pin->timestamp = rtdm_clock_read_monotonic();
+	else
+		pin->timestamp = rtdm_clock_read();
+	rtdm_event_signal(&pin->event);
+	
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_post_event);
+
+static int gpiochip_match_name(struct gpio_chip *chip, void *data)
+{
+	const char *name = data;
+
+	return !strcmp(chip->label, name);
+}
+
+static struct gpio_chip *find_chip_by_name(const char *name)
+{
+	return gpiochip_find((void *)name, gpiochip_match_name);
+}
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass)
+{
+	struct gpio_chip *gc = find_chip_by_name(label);
+
+	if (gc == NULL)
+		return -EPROBE_DEFER;
+
+	return rtdm_gpiochip_add(rgc, gc, gpio_subclass);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_add_by_name);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type)
+{
+	struct rtdm_gpio_chip *rgc;
+	struct gpio_chip *chip;
+	int ret = -ENODEV;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	chip = find_chip_by_name(label);
+	if (chip == NULL)
+		return ret;
+
+	ret = 0;
+	rgc = rtdm_gpiochip_alloc(chip, type);
+	if (IS_ERR(rgc))
+		ret = PTR_ERR(rgc);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_find);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type)
+{
+	int ret = -ENODEV, _ret, n;
+
+	for (n = 0; n < nentries; n++) {
+		_ret = rtdm_gpiochip_find(from, label[n], type);
+		if (_ret) {
+			if (_ret != -ENODEV)
+				return _ret;
+		} else
+			ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_array_find);
+
+#ifdef CONFIG_OF
+
+#include <linux/of_platform.h>
+
+struct gpiochip_holder {
+	struct gpio_chip *chip;
+	struct list_head next;
+};
+	
+struct gpiochip_match_data {
+	struct device *parent;
+	struct list_head list;
+};
+
+static int match_gpio_chip(struct gpio_chip *gc, void *data)
+{
+	struct gpiochip_match_data *d = data;
+	struct gpiochip_holder *h;
+
+	if (cobalt_gpiochip_dev(gc) == d->parent) {
+		h = kmalloc(sizeof(*h), GFP_KERNEL);
+		if (h) {
+			h->chip = gc;
+			list_add(&h->next, &d->list);
+		}
+	}
+
+	/*
+	 * Iterate over all existing GPIO chips, we may have several
+	 * hosted by the same pin controller mapping different ranges.
+	 */
+	return 0;
+}
+
+int rtdm_gpiochip_scan_of(struct device_node *from, const char *compat,
+			  int type)
+{
+	struct gpiochip_match_data match;
+	struct gpiochip_holder *h, *n;
+	struct device_node *np = from;
+	struct platform_device *pdev;
+	struct rtdm_gpio_chip *rgc;
+	int ret = -ENODEV, _ret;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (;;) {
+		np = of_find_compatible_node(np, NULL, compat);
+		if (np == NULL)
+			break;
+		pdev = of_find_device_by_node(np);
+		of_node_put(np);
+		if (pdev == NULL)
+			break;
+		match.parent = &pdev->dev;
+		INIT_LIST_HEAD(&match.list);
+		gpiochip_find(&match, match_gpio_chip);
+		if (!list_empty(&match.list)) {
+			ret = 0;
+			list_for_each_entry_safe(h, n, &match.list, next) {
+				list_del(&h->next);
+				_ret = 0;
+				rgc = rtdm_gpiochip_alloc(h->chip, type);
+				if (IS_ERR(rgc))
+					_ret = PTR_ERR(rgc);
+				kfree(h);
+				if (_ret && !ret)
+					ret = _ret;
+			}
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_of);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type)
+{
+	int ret = -ENODEV, _ret, n;
+
+	for (n = 0; n < nentries; n++) {
+		_ret = rtdm_gpiochip_scan_of(from, compat[n], type);
+		if (_ret) {
+			if (_ret != -ENODEV)
+				return _ret;
+		} else
+			ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_array_of);
+
+#endif /* CONFIG_OF */
+
+void rtdm_gpiochip_remove_by_type(int type)
+{
+	struct rtdm_gpio_chip *rgc, *n;
+
+	mutex_lock(&chip_lock);
+
+	list_for_each_entry_safe(rgc, n, &rtdm_gpio_chips, next) {
+		if (rgc->driver.profile_info.subclass_id == type) {
+			mutex_unlock(&chip_lock);
+			rtdm_gpiochip_remove(rgc);
+			kfree(rgc);
+			mutex_lock(&chip_lock);
+		}
+	}
+
+	mutex_unlock(&chip_lock);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove_by_type);
+++ linux-patched/drivers/xenomai/gpio/gpio-zynq7000.c	2022-03-21 12:58:31.293870395 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/Makefile	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com>
+ * 
+ * This driver is inspired by:
+ * gpio-bcm2835.c, please see original file for copyright information
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_ZYNQ7000  4
+
+static int __init zynq7000_gpio_init(void)
+{
+ 	return rtdm_gpiochip_scan_of(NULL, "xlnx,zynq-gpio-1.0", 
+                     RTDM_SUBCLASS_ZYNQ7000);
+}
+module_init(zynq7000_gpio_init);
+
+static void __exit zynq7000_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_ZYNQ7000);
+}
+module_exit(zynq7000_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
+++ linux-patched/drivers/xenomai/gpio/Makefile	2022-03-21 12:58:31.285870473 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-xilinx.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-$(CONFIG_XENO_DRIVERS_GPIO_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_XENO_DRIVERS_GPIO_BCM2835) += xeno-gpio-bcm2835.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_MXC) += xeno-gpio-mxc.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_SUN8I_H3) += xeno-gpio-sun8i-h3.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_ZYNQ7000) += xeno-gpio-zynq7000.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_XILINX) += xeno-gpio-xilinx.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_OMAP) += xeno-gpio-omap.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_CHERRYVIEW) += xeno-gpio-cherryview.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO) += gpio-core.o
+
+xeno-gpio-bcm2835-y := gpio-bcm2835.o
+xeno-gpio-mxc-y := gpio-mxc.o
+xeno-gpio-sun8i-h3-y := gpio-sun8i-h3.o
+xeno-gpio-zynq7000-y := gpio-zynq7000.o
+xeno-gpio-xilinx-y := gpio-xilinx.o
+xeno-gpio-omap-y := gpio-omap.o
+xeno-gpio-cherryview-y := gpio-cherryview.o
+++ linux-patched/drivers/xenomai/gpio/gpio-xilinx.c	2022-03-21 12:58:31.278870541 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-cherryview.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com>
+ *
+ * This driver controls the gpio that can be located on the PL
+ * of the Zynq SOC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_XILINX  5
+
+static int __init xilinx_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_of(NULL, "xlnx,xps-gpio-1.00.a",
+                     RTDM_SUBCLASS_XILINX);
+}
+module_init(xilinx_gpio_init);
+
+static void __exit xilinx_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_XILINX);
+}
+module_exit(xilinx_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
+++ linux-patched/drivers/xenomai/gpio/gpio-cherryview.c	2022-03-21 12:58:31.270870619 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A_pci.h	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * @note Copyright (C) 2021 Hongzhan Chen <hongzhan.chen@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_CHERRYVIEW  7
+
+static const char *label_array[] = {
+	"INT33FF:00",
+	"INT33FF:01",
+	"INT33FF:02",
+	"INT33FF:03",
+};
+
+static int __init cherryview_gpio_init(void)
+{
+	return rtdm_gpiochip_array_find(NULL, label_array,
+					ARRAY_SIZE(label_array),
+					RTDM_SUBCLASS_CHERRYVIEW);
+}
+module_init(cherryview_gpio_init);
+
+static void __exit cherryview_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_CHERRYVIEW);
+}
+module_exit(cherryview_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/serial/16550A_pci.h	2022-03-21 12:58:31.263870687 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2011 Stefan Kisdaroczi <kisda@hispeed.ch>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI)
+
+#include <linux/pci.h>
+
+struct rt_16550_pci_board {
+	char *name;
+	resource_size_t resource_base_addr;
+	unsigned int nports;
+	unsigned int port_ofs;
+	unsigned long irqtype;
+	unsigned int baud_base;
+	int tx_fifo;
+};
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI_MOXA)
+
+#define PCI_DEVICE_ID_CP112UL	0x1120
+#define PCI_DEVICE_ID_CP114UL	0x1143
+#define PCI_DEVICE_ID_CP138U	0x1380
+
+static const struct rt_16550_pci_board rt_16550_moxa_c104 = {
+	.name = "Moxa C104H/PCI",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_c168 = {
+	.name = "Moxa C168H/PCI",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp114 = {
+	.name = "Moxa CP-114",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp132 = {
+	.name = "Moxa CP-132",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp102u = {
+	.name = "Moxa CP-102U",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp102ul = {
+	.name = "Moxa CP-102UL",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp104u = {
+	.name = "Moxa CP-104U",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp112ul = {
+	.name = "Moxa CP-112UL",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp114ul = {
+	.name = "Moxa CP-114UL",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp118u = {
+	.name = "Moxa CP-118U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp132u = {
+	.name = "Moxa CP-132U",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp134u = {
+	.name = "Moxa CP-134U",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp138u = {
+	.name = "Moxa CP-138U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp168u = {
+	.name = "Moxa CP-168U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+#endif
+
+const struct pci_device_id rt_16550_pci_table[] = {
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI_MOXA)
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104),
+	 .driver_data = (unsigned long)&rt_16550_moxa_c104},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168),
+	 .driver_data = (unsigned long)&rt_16550_moxa_c168},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp114},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp132},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp102u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp102ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp104u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp112ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp114ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp118u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp132u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp134u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp138u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp168u},
+#endif
+	{ }
+};
+
+static int rt_16550_pci_probe(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	struct rt_16550_pci_board *board;
+	int err;
+	int i;
+	int port = 0;
+	int base_addr;
+	int max_devices = 0;
+
+	if (!ent->driver_data)
+		return -ENODEV;
+
+	board = (struct rt_16550_pci_board *)ent->driver_data;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (!rt_16550_addr_param(i))
+			max_devices++;
+
+	if (board->nports > max_devices)
+		return -ENODEV;
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	base_addr = pci_resource_start(pdev, board->resource_base_addr);
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if ((port < board->nports) && (!rt_16550_addr_param(i))) {
+			io[i] = base_addr + port * board->port_ofs;
+			irq[i] = pdev->irq;
+			irqtype[i] = board->irqtype;
+			baud_base[i] = board->baud_base;
+			tx_fifo[i] = board->tx_fifo;
+			port++;
+		}
+	}
+
+	return 0;
+}
+
+static void rt_16550_pci_remove(struct pci_dev *pdev) {
+	pci_disable_device( pdev );
+};
+
+static struct pci_driver rt_16550_pci_driver = {
+	.name     = RT_16550_DRIVER_NAME,
+	.id_table = rt_16550_pci_table,
+	.probe    = rt_16550_pci_probe,
+	.remove   = rt_16550_pci_remove
+};
+
+static int pci_registered;
+
+static inline void rt_16550_pci_init(void)
+{
+	if (pci_register_driver(&rt_16550_pci_driver) == 0)
+		pci_registered = 1;
+}
+
+static inline void rt_16550_pci_cleanup(void)
+{
+	if (pci_registered)
+		pci_unregister_driver(&rt_16550_pci_driver);
+}
+
+#else /* Linux < 2.6.0 || !CONFIG_PCI || !(..._16550A_PCI */
+
+#define rt_16550_pci_init()	do { } while (0)
+#define rt_16550_pci_cleanup()	do { } while (0)
+
+#endif /* Linux < 2.6.0 || !CONFIG_PCI || !(..._16550A_PCI */
+++ linux-patched/drivers/xenomai/serial/16550A.c	2022-03-21 12:58:31.256870756 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/rt_imx_uart.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_DESCRIPTION("RTDM-based driver for 16550A UARTs");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("1.5.2");
+MODULE_LICENSE("GPL");
+
+#define RT_16550_DRIVER_NAME	"xeno_16550A"
+
+#define MAX_DEVICES		8
+
+#define IN_BUFFER_SIZE		4096
+#define OUT_BUFFER_SIZE		4096
+
+#define DEFAULT_BAUD_BASE	115200
+#define DEFAULT_TX_FIFO		16
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+#define LCR_DLAB		0x80
+
+#define FCR_FIFO		0x01
+#define FCR_RESET_RX		0x02
+#define FCR_RESET_TX		0x04
+
+#define IER_RX			0x01
+#define IER_TX			0x02
+#define IER_STAT		0x04
+#define IER_MODEM		0x08
+
+#define IIR_MODEM		0x00
+#define IIR_PIRQ		0x01
+#define IIR_TX			0x02
+#define IIR_RX			0x04
+#define IIR_STAT		0x06
+#define IIR_MASK		0x07
+
+#define RHR			0	/* Receive Holding Buffer */
+#define THR			0	/* Transmit Holding Buffer */
+#define DLL			0	/* Divisor Latch LSB */
+#define IER			1	/* Interrupt Enable Register */
+#define DLM			1	/* Divisor Latch MSB */
+#define IIR			2	/* Interrupt Id Register */
+#define FCR			2	/* Fifo Control Register */
+#define LCR			3	/* Line Control Register */
+#define MCR			4	/* Modem Control Register */
+#define LSR			5	/* Line Status Register */
+#define MSR			6	/* Modem Status Register */
+
+struct rt_16550_context {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	unsigned long base_addr;	/* hardware IO base address */
+#ifdef CONFIG_XENO_DRIVERS_16550A_ANY
+	int io_mode;			/* hardware IO-access mode */
+#endif
+	int tx_fifo;			/* cached global tx_fifo[<device>] */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+	int ier_status;			/* IER cache */
+	int mcr_status;			/* MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+};
+
+static const struct rtser_config default_config = {
+	0xFFFF, RTSER_DEF_BAUD, RTSER_DEF_PARITY, RTSER_DEF_BITS,
+	RTSER_DEF_STOPB, RTSER_DEF_HAND, RTSER_DEF_FIFO_DEPTH, 0,
+	RTSER_DEF_TIMEOUT, RTSER_DEF_TIMEOUT, RTSER_DEF_TIMEOUT,
+	RTSER_DEF_TIMESTAMP_HISTORY, RTSER_DEF_EVENT_MASK, RTSER_DEF_RS485
+};
+
+static struct rtdm_device *device[MAX_DEVICES];
+
+static unsigned int irq[MAX_DEVICES];
+static unsigned long irqtype[MAX_DEVICES] = {
+	[0 ... MAX_DEVICES-1] = RTDM_IRQTYPE_SHARED | RTDM_IRQTYPE_EDGE
+};
+static unsigned int baud_base[MAX_DEVICES];
+static int tx_fifo[MAX_DEVICES];
+
+module_param_array(irq, uint, NULL, 0400);
+module_param_array(baud_base, uint, NULL, 0400);
+module_param_array(tx_fifo, int, NULL, 0400);
+
+MODULE_PARM_DESC(irq, "IRQ numbers of the serial devices");
+MODULE_PARM_DESC(baud_base, "Maximum baud rate of the serial device "
+		 "(internal clock rate / 16)");
+MODULE_PARM_DESC(tx_fifo, "Transmitter FIFO size");
+
+#include "16550A_io.h"
+#include "16550A_pnp.h"
+#include "16550A_pci.h"
+
+static inline int rt_16550_rx_interrupt(struct rt_16550_context *ctx,
+					uint64_t * timestamp)
+{
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+	int rbytes = 0;
+	int lsr = 0;
+	int c;
+
+	do {
+		c = rt_16550_reg_in(mode, base, RHR);	/* read input char */
+
+		ctx->in_buf[ctx->in_tail] = c;
+		if (ctx->in_history)
+			ctx->in_history[ctx->in_tail] = *timestamp;
+		ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+		if (++ctx->in_npend > IN_BUFFER_SIZE) {
+			lsr |= RTSER_SOFT_OVERRUN_ERR;
+			ctx->in_npend--;
+		}
+
+		rbytes++;
+		lsr &= ~RTSER_LSR_DATA;
+		lsr |= (rt_16550_reg_in(mode, base, LSR) &
+			(RTSER_LSR_DATA | RTSER_LSR_OVERRUN_ERR |
+			 RTSER_LSR_PARITY_ERR | RTSER_LSR_FRAMING_ERR |
+			 RTSER_LSR_BREAK_IND));
+	} while (lsr & RTSER_LSR_DATA);
+
+	/* save new errors */
+	ctx->status |= lsr;
+
+	/* If we are enforcing the RTSCTS control flow and the input
+	   buffer is busy above the specified high watermark, clear
+	   RTS. */
+/*	if (uart->i_count >= uart->config.rts_hiwm &&
+	    (uart->config.handshake & RT_UART_RTSCTS) != 0 &&
+	    (uart->modem & MCR_RTS) != 0) {
+		uart->modem &= ~MCR_RTS;
+		rt_16550_reg_out(mode, base, MCR, uart->modem);
+	}*/
+
+	return rbytes;
+}
+
+static void rt_16550_tx_fill(struct rt_16550_context *ctx)
+{
+	int c;
+	int count;
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+
+/*	if (uart->modem & MSR_CTS)*/
+	{
+		for (count = ctx->tx_fifo;
+		     (count > 0) && (ctx->out_npend > 0);
+		     count--, ctx->out_npend--) {
+			c = ctx->out_buf[ctx->out_head++];
+			rt_16550_reg_out(mode, base, THR, c);
+			ctx->out_head &= (OUT_BUFFER_SIZE - 1);
+		}
+	}
+}
+
+static inline void rt_16550_stat_interrupt(struct rt_16550_context *ctx)
+{
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+
+	ctx->status |= (rt_16550_reg_in(mode, base, LSR) &
+			(RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			 RTSER_LSR_FRAMING_ERR | RTSER_LSR_BREAK_IND));
+}
+
+static int rt_16550_interrupt(rtdm_irq_t * irq_context)
+{
+	struct rt_16550_context *ctx;
+	unsigned long base;
+	int mode;
+	int iir;
+	uint64_t timestamp = rtdm_clock_read();
+	int rbytes = 0;
+	int events = 0;
+	int modem;
+	int ret = RTDM_IRQ_NONE;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_16550_context);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	while (1) {
+		iir = rt_16550_reg_in(mode, base, IIR) & IIR_MASK;
+		if (iir & IIR_PIRQ)
+			break;
+
+		if (iir == IIR_RX) {
+			rbytes += rt_16550_rx_interrupt(ctx, &timestamp);
+			events |= RTSER_EVENT_RXPEND;
+		} else if (iir == IIR_STAT)
+			rt_16550_stat_interrupt(ctx);
+		else if (iir == IIR_TX)
+			rt_16550_tx_fill(ctx);
+		else if (iir == IIR_MODEM) {
+			modem = rt_16550_reg_in(mode, base, MSR);
+			if (modem & (modem << 4))
+				events |= RTSER_EVENT_MODEMHI;
+			if ((modem ^ 0xF0) & (modem << 4))
+				events |= RTSER_EVENT_MODEMLO;
+		}
+
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else
+			ctx->in_nwait -= rbytes;
+	}
+
+	if (ctx->status) {
+		events |= RTSER_EVENT_ERRPEND;
+		ctx->ier_status &= ~IER_STAT;
+	}
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
+		/* mask transmitter empty interrupt */
+		ctx->ier_status &= ~IER_TX;
+
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	/* update interrupt mask */
+	rt_16550_reg_out(mode, base, IER, ctx->ier_status);
+
+	rtdm_lock_put(&ctx->lock);
+
+	return ret;
+}
+
+static int rt_16550_set_config(struct rt_16550_context *ctx,
+			       const struct rtser_config *config,
+			       uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+	int err = 0;
+
+	/* make line configuration atomic and IRQ-safe */
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD) {
+		int dev_id = rtdm_fd_minor(rtdm_private_to_fd(ctx));
+		int baud_div;
+
+		ctx->config.baud_rate = config->baud_rate;
+		baud_div = (baud_base[dev_id] + (ctx->config.baud_rate>>1)) /
+			ctx->config.baud_rate;
+		rt_16550_reg_out(mode, base, LCR, LCR_DLAB);
+		rt_16550_reg_out(mode, base, DLL, baud_div & 0xff);
+		rt_16550_reg_out(mode, base, DLM, baud_div >> 8);
+	}
+
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+
+	if (config->config_mask & (RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS |
+				   RTSER_SET_STOP_BITS |
+				   RTSER_SET_BAUD)) {
+		rt_16550_reg_out(mode, base, LCR,
+				 (ctx->config.parity << 3) |
+				 (ctx->config.stop_bits << 2) |
+				 ctx->config.data_bits);
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+	}
+
+	if (config->config_mask & RTSER_SET_FIFO_DEPTH) {
+		ctx->config.fifo_depth = config->fifo_depth & FIFO_MASK;
+		rt_16550_reg_out(mode, base, FCR,
+				 FCR_FIFO | FCR_RESET_RX | FCR_RESET_TX);
+		rt_16550_reg_out(mode, base, FCR,
+				 FCR_FIFO | ctx->config.fifo_depth);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	   care not to use and change timeouts at the same time. */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		/* change timestamp history atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		/* change event mask atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND)
+		    && ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+
+		if (config->event_mask & (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			/* enable modem status interrupt */
+			ctx->ier_status |= IER_MODEM;
+		else
+			/* disable modem status interrupt */
+			ctx->ier_status &= ~IER_MODEM;
+		rt_16550_reg_out(mode, base, IER, ctx->ier_status);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_HANDSHAKE) {
+		/* change handshake atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.handshake = config->handshake;
+
+		switch (ctx->config.handshake) {
+		case RTSER_RTSCTS_HAND:
+			// ...?
+
+		default:	/* RTSER_NO_HAND */
+			ctx->mcr_status =
+			    RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
+			break;
+		}
+		rt_16550_reg_out(mode, base, MCR, ctx->mcr_status);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	return err;
+}
+
+void rt_16550_cleanup_ctx(struct rt_16550_context *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+int rt_16550_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_16550_context *ctx;
+	int dev_id = rtdm_fd_minor(fd);
+	int err;
+	uint64_t *dummy;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	rt_16550_init_io_ctx(dev_id, ctx);
+
+	ctx->tx_fifo = tx_fifo[dev_id];
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	rt_16550_set_config(ctx, &default_config, &dummy);
+
+	err = rtdm_irq_request(&ctx->irq_handle, irq[dev_id],
+			rt_16550_interrupt, irqtype[dev_id],
+			rtdm_fd_device(fd)->name, ctx);
+	if (err) {
+		/* reset DTR and RTS */
+		rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx), ctx->base_addr,
+				 MCR, 0);
+
+		rt_16550_cleanup_ctx(ctx);
+
+		return err;
+	}
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* enable interrupts */
+	ctx->ier_status = IER_RX;
+	rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx), ctx->base_addr, IER,
+			 IER_RX);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	return 0;
+}
+
+void rt_16550_close(struct rtdm_fd *fd)
+{
+	struct rt_16550_context *ctx;
+	unsigned long base;
+	int mode;
+	uint64_t *in_history;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* reset DTR and RTS */
+	rt_16550_reg_out(mode, base, MCR, 0);
+
+	/* mask all UART interrupts and clear pending ones. */
+	rt_16550_reg_out(mode, base, IER, 0);
+	rt_16550_reg_in(mode, base, IIR);
+	rt_16550_reg_in(mode, base, LSR);
+	rt_16550_reg_in(mode, base, RHR);
+	rt_16550_reg_in(mode, base, MSR);
+
+	in_history = ctx->in_history;
+	ctx->in_history = NULL;
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	rt_16550_cleanup_ctx(ctx);
+
+	kfree(in_history);
+}
+
+int rt_16550_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_16550_context *ctx;
+	int err = 0;
+	unsigned long base;
+	int mode;
+
+	ctx = rtdm_fd_to_private(fd);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->config,
+						   sizeof(struct
+							  rtser_config));
+		else
+			memcpy(arg, &ctx->config,
+			       sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err =
+			    rtdm_safe_copy_from_user(fd, &config_buf,
+						     arg,
+						     sizeof(struct
+							    rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate >
+			    baud_base[rtdm_fd_minor(fd)] ||
+			    config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			/*
+			 * Reflect the call to non-RT as we will likely
+			 * allocate or free the buffer.
+			 */
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+
+			if (config->timestamp_history &
+			    RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_16550_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+			status_buf.line_status =
+			    rt_16550_reg_in(mode, base, LSR) | status;
+			status_buf.modem_status =
+			    rt_16550_reg_in(mode, base, MSR);
+
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &status_buf,
+						   sizeof(struct
+							  rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status =
+			    rt_16550_reg_in(mode, base, LSR) | status;
+			((struct rtser_status *)arg)->modem_status =
+			    rt_16550_reg_in(mode, base, MSR);
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->mcr_status,
+						   sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr;
+		rt_16550_reg_out(mode, base, MCR, new_mcr);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			/* Only enable error interrupt
+			   when the user waits for it. */
+			if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
+				ctx->ier_status |= IER_STAT;
+				rt_16550_reg_out(mode, base, IER,
+						 ctx->ier_status);
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &=
+		    ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+	      wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		int lcr = ((long)arg & RTSER_BREAK_SET) << 6;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		lcr |=
+		    (ctx->config.parity << 3) | (ctx->config.stop_bits << 2) |
+		    ctx->config.data_bits;
+
+		rt_16550_reg_out(mode, base, LCR, lcr);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_16550_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_16550_reg_out(mode, base, FCR, fcr);
+			rt_16550_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+ssize_t rt_16550_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct rt_16550_context *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		/* switch on error interrupt - the user is ready to listen */
+		if ((ctx->ier_status & IER_STAT) == 0) {
+			ctx->ier_status |= IER_STAT;
+			rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
+					 ctx->base_addr, IER,
+					 ctx->ier_status);
+		}
+
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				   separately. */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			if ((ctx->in_npend -= block) == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			   non-blocking call or contains the error
+			   returned by rtdm_event_wait[_until] */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				   before exit. */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = read;
+
+	return ret;
+}
+
+ssize_t rt_16550_write(struct rtdm_fd *fd, const void *buf, size_t nbyte)
+{
+	struct rt_16550_context *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	int lsr;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.tx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.tx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				   end separately. */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			lsr = rt_16550_reg_in(rt_16550_io_mode_from_ctx(ctx),
+					      ctx->base_addr, LSR);
+			if (lsr & RTSER_LSR_THR_EMTPY)
+				rt_16550_tx_fill(ctx);
+
+			if (ctx->out_npend > 0 && !(ctx->ier_status & IER_TX)) {
+				/* unmask tx interrupt */
+				ctx->ier_status |= IER_TX;
+				rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
+						 ctx->base_addr, IER,
+						 ctx->ier_status);
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret =
+		    rtdm_event_timedwait(&ctx->out_event,
+					 ctx->config.tx_timeout,
+					 &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+			if (ret == -EWOULDBLOCK) {
+				/* Fix error code for non-blocking mode. */
+				ret = -EAGAIN;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver uart16550A_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(uart16550A,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= MAX_DEVICES,
+	.context_size		= sizeof(struct rt_16550_context),
+	.ops = {
+		.open		= rt_16550_open,
+		.close		= rt_16550_close,
+		.ioctl_rt	= rt_16550_ioctl,
+		.ioctl_nrt	= rt_16550_ioctl,
+		.read_rt	= rt_16550_read,
+		.write_rt	= rt_16550_write,
+	},
+};
+
+void rt_16550_exit(void);
+
+int __init rt_16550_init(void)
+{
+	struct rtdm_device *dev;
+	unsigned long base;
+	char *name;
+	int mode;
+	int err;
+	int i;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	rt_16550_pnp_init();
+	rt_16550_pci_init();
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if (!rt_16550_addr_param(i))
+			continue;
+
+		err = -EINVAL;
+		if (!irq[i] || !rt_16550_addr_param_valid(i))
+			goto cleanup_out;
+
+		dev = kmalloc(sizeof(struct rtdm_device) +
+			      RTDM_MAX_DEVNAME_LEN, GFP_KERNEL);
+		err = -ENOMEM;
+		if (!dev)
+			goto cleanup_out;
+
+		dev->driver = &uart16550A_driver;
+		dev->label = "rtser%d";
+		name = (char *)(dev + 1);
+		ksformat(name, RTDM_MAX_DEVNAME_LEN, dev->label, i);
+
+		err = rt_16550_init_io(i, name);
+		if (err)
+			goto kfree_out;
+
+		if (baud_base[i] == 0)
+			baud_base[i] = DEFAULT_BAUD_BASE;
+
+		if (tx_fifo[i] == 0)
+			tx_fifo[i] = DEFAULT_TX_FIFO;
+
+		/* Mask all UART interrupts and clear pending ones. */
+		base = rt_16550_base_addr(i);
+		mode = rt_16550_io_mode(i);
+		rt_16550_reg_out(mode, base, IER, 0);
+		rt_16550_reg_in(mode, base, IIR);
+		rt_16550_reg_in(mode, base, LSR);
+		rt_16550_reg_in(mode, base, RHR);
+		rt_16550_reg_in(mode, base, MSR);
+
+		err = rtdm_dev_register(dev);
+
+		if (err)
+			goto release_io_out;
+
+		device[i] = dev;
+	}
+
+	return 0;
+
+      release_io_out:
+	rt_16550_release_io(i);
+
+      kfree_out:
+	kfree(dev);
+
+      cleanup_out:
+	rt_16550_exit();
+
+	return err;
+}
+
+void rt_16550_exit(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (device[i]) {
+			rtdm_dev_unregister(device[i]);
+			rt_16550_release_io(i);
+			kfree(device[i]);
+		}
+
+	rt_16550_pci_cleanup();
+	rt_16550_pnp_cleanup();
+}
+
+module_init(rt_16550_init);
+module_exit(rt_16550_exit);
+++ linux-patched/drivers/xenomai/serial/rt_imx_uart.c	2022-03-21 12:58:31.248870834 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A_pnp.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright 2012 Wolfgang Grandegger <wg@denx.de>
+ *
+ * Derived from the Linux IMX UART driver (drivers/tty/serial/imx.c)
+ * and 16650A RTserial driver.
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2004 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/platform_device.h>
+#include <linux/sysrq.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/div64.h>
+#include <linux/platform_data/serial-imx.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTDM-based driver for IMX UARTs");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME	"xeno_imx_uart"
+
+/* Register definitions */
+#define URXD0	0x0  /* Receiver Register */
+#define URTX0	0x40 /* Transmitter Register */
+#define UCR1	0x80 /* Control Register 1 */
+#define UCR2	0x84 /* Control Register 2 */
+#define UCR3	0x88 /* Control Register 3 */
+#define UCR4	0x8c /* Control Register 4 */
+#define UFCR	0x90 /* FIFO Control Register */
+#define USR1	0x94 /* Status Register 1 */
+#define USR2	0x98 /* Status Register 2 */
+#define UESC	0x9c /* Escape Character Register */
+#define UTIM	0xa0 /* Escape Timer Register */
+#define UBIR	0xa4 /* BRM Incremental Register */
+#define UBMR	0xa8 /* BRM Modulator Register */
+#define UBRC	0xac /* Baud Rate Count Register */
+#define MX2_ONEMS 0xb0 /* One Millisecond register */
+#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
+#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
+
+
+
+/* UART Control Register Bit Fields.*/
+#define URXD_CHARRDY	(1<<15)
+#define URXD_ERR	(1<<14)
+#define URXD_OVRRUN	(1<<13)
+#define URXD_FRMERR	(1<<12)
+#define URXD_BRK	(1<<11)
+#define URXD_PRERR	(1<<10)
+#define UCR1_ADEN	(1<<15) /* Auto dectect interrupt */
+#define UCR1_ADBR	(1<<14) /* Auto detect baud rate */
+#define UCR1_TRDYEN	(1<<13) /* Transmitter ready interrupt enable */
+#define UCR1_IDEN	(1<<12) /* Idle condition interrupt */
+#define UCR1_RRDYEN	(1<<9)	/* Recv ready interrupt enable */
+#define UCR1_RDMAEN	(1<<8)	/* Recv ready DMA enable */
+#define UCR1_IREN	(1<<7)	/* Infrared interface enable */
+#define UCR1_TXMPTYEN	(1<<6)	/* Transimitter empty interrupt enable */
+#define UCR1_RTSDEN	(1<<5)	/* RTS delta interrupt enable */
+#define UCR1_SNDBRK	(1<<4)	/* Send break */
+#define UCR1_TDMAEN	(1<<3)	/* Transmitter ready DMA enable */
+#define MX1_UCR1_UARTCLKEN	(1<<2)	/* UART clock enabled, mx1 only */
+#define UCR1_DOZE	(1<<1)	/* Doze */
+#define UCR1_UARTEN	(1<<0)	/* UART enabled */
+#define UCR2_ESCI	(1<<15) /* Escape seq interrupt enable */
+#define UCR2_IRTS	(1<<14) /* Ignore RTS pin */
+#define UCR2_CTSC	(1<<13) /* CTS pin control */
+#define UCR2_CTS	(1<<12) /* Clear to send */
+#define UCR2_ESCEN	(1<<11) /* Escape enable */
+#define UCR2_PREN	(1<<8)	/* Parity enable */
+#define UCR2_PROE	(1<<7)	/* Parity odd/even */
+#define UCR2_STPB	(1<<6)	/* Stop */
+#define UCR2_WS		(1<<5)	/* Word size */
+#define UCR2_RTSEN	(1<<4)	/* Request to send interrupt enable */
+#define UCR2_ATEN	(1<<3)	/* Aging Timer Enable */
+#define UCR2_TXEN	(1<<2)	/* Transmitter enabled */
+#define UCR2_RXEN	(1<<1)	/* Receiver enabled */
+#define UCR2_SRST	(1<<0)	/* SW reset */
+#define UCR3_DTREN	(1<<13) /* DTR interrupt enable */
+#define UCR3_PARERREN	(1<<12) /* Parity enable */
+#define UCR3_FRAERREN	(1<<11) /* Frame error interrupt enable */
+#define UCR3_DSR	(1<<10) /* Data set ready */
+#define UCR3_DCD	(1<<9)	/* Data carrier detect */
+#define UCR3_RI		(1<<8)	/* Ring indicator */
+#define UCR3_ADNIMP	(1<<7)	/* Autobaud Detection Not Improved */
+#define UCR3_RXDSEN	(1<<6)	/* Receive status interrupt enable */
+#define UCR3_AIRINTEN	(1<<5)	/* Async IR wake interrupt enable */
+#define UCR3_AWAKEN	(1<<4)	/* Async wake interrupt enable */
+#define UCR3_DTRDEN	(1<<3)	/* Data Terminal Ready Delta Enable. */
+#define MX1_UCR3_REF25		(1<<3)	/* Ref freq 25 MHz, only on mx1 */
+#define MX1_UCR3_REF30		(1<<2)	/* Ref Freq 30 MHz, only on mx1 */
+#define MX2_UCR3_RXDMUXSEL	(1<<2)	/* RXD Muxed Input Select, on mx2/mx3 */
+#define UCR3_INVT	(1<<1)	/* Inverted Infrared transmission */
+#define UCR3_BPEN	(1<<0)	/* Preset registers enable */
+#define UCR4_CTSTL_SHF	10	/* CTS trigger level shift */
+#define UCR4_CTSTL_MASK	0x3F	/* CTS trigger is 6 bits wide */
+#define UCR4_INVR	(1<<9)	/* Inverted infrared reception */
+#define UCR4_ENIRI	(1<<8)	/* Serial infrared interrupt enable */
+#define UCR4_WKEN	(1<<7)	/* Wake interrupt enable */
+#define UCR4_REF16	(1<<6)	/* Ref freq 16 MHz */
+#define UCR4_IRSC	(1<<5)	/* IR special case */
+#define UCR4_TCEN	(1<<3)	/* Transmit complete interrupt enable */
+#define UCR4_BKEN	(1<<2)	/* Break condition interrupt enable */
+#define UCR4_OREN	(1<<1)	/* Receiver overrun interrupt enable */
+#define UCR4_DREN	(1<<0)	/* Recv data ready interrupt enable */
+#define UFCR_RXTL_SHF	0	/* Receiver trigger level shift */
+#define UFCR_RFDIV	(7<<7)	/* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x)	(((x) < 7 ? 6 - (x) : 6) << 7)
+#define UFCR_TXTL_SHF	10	/* Transmitter trigger level shift */
+#define UFCR_DCEDTE	(1<<6)
+#define USR1_PARITYERR	(1<<15) /* Parity error interrupt flag */
+#define USR1_RTSS	(1<<14) /* RTS pin status */
+#define USR1_TRDY	(1<<13) /* Transmitter ready interrupt/dma flag */
+#define USR1_RTSD	(1<<12) /* RTS delta */
+#define USR1_ESCF	(1<<11) /* Escape seq interrupt flag */
+#define USR1_FRAMERR	(1<<10) /* Frame error interrupt flag */
+#define USR1_RRDY	(1<<9)	/* Receiver ready interrupt/dma flag */
+#define USR1_AGTIM	(1<<8)	/* Ageing Timer Interrupt Flag */
+#define USR1_DTRD	(1<<7)	/* DTR Delta */
+#define USR1_RXDS	(1<<6)	/* Receiver idle interrupt flag */
+#define USR1_AIRINT	(1<<5)	/* Async IR wake interrupt flag */
+#define USR1_AWAKE	(1<<4)	/* Async wake interrupt flag */
+#define USR2_ADET	(1<<15) /* Auto baud rate detect complete */
+#define USR2_TXFE	(1<<14) /* Transmit buffer FIFO empty */
+#define USR2_DTRF	(1<<13) /* DTR edge interrupt flag */
+#define USR2_IDLE	(1<<12) /* Idle condition */
+#define USR2_RIDELT	(1<<10) /* Ring Indicator Delta */
+#define USR2_RIIN	(1<<9)	/* Ring Indicator Input */
+#define USR2_IRINT	(1<<8)	/* Serial infrared interrupt flag */
+#define USR2_WAKE	(1<<7)	/* Wake */
+#define USR2_DCDDELT	(1<<6)	/* Data Carrier Detect Delta */
+#define USR2_DCDIN	(1<<5)	/* Data Carrier Detect Input */
+#define USR2_RTSF	(1<<4)	/* RTS edge interrupt flag */
+#define USR2_TXDC	(1<<3)	/* Transmitter complete */
+#define USR2_BRCD	(1<<2)	/* Break condition */
+#define USR2_ORE	(1<<1)	/* Overrun error */
+#define USR2_RDR	(1<<0)	/* Recv data ready */
+#define UTS_FRCPERR	(1<<13) /* Force parity error */
+#define UTS_LOOP	(1<<12) /* Loop tx and rx */
+#define UTS_TXEMPTY	(1<<6)	/* TxFIFO empty */
+#define UTS_RXEMPTY	(1<<5)	/* RxFIFO empty */
+#define UTS_TXFULL	(1<<4)	/* TxFIFO full */
+#define UTS_RXFULL	(1<<3)	/* RxFIFO full */
+#define UTS_SOFTRST	(1<<0)	/* Software reset */
+
+#define IN_BUFFER_SIZE		4096
+#define OUT_BUFFER_SIZE		4096
+
+#define TX_FIFO_SIZE		32
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+#define IER_RX			0x01
+#define IER_TX			0x02
+#define IER_STAT		0x04
+#define IER_MODEM		0x08
+
+#define IMX_ISR_PASS_LIMIT	256
+#define UART_CREAD_BIT		256
+
+#define RT_IMX_UART_MAX		5
+
+static int tx_fifo[RT_IMX_UART_MAX];
+module_param_array(tx_fifo, int, NULL, 0400);
+MODULE_PARM_DESC(tx_fifo, "Transmitter FIFO size");
+
+/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
+enum imx_uart_type {
+	IMX1_UART,
+	IMX21_UART,
+	IMX53_UART,
+	IMX6Q_UART,
+};
+
+/* device type dependent stuff */
+struct imx_uart_data {
+	unsigned int uts_reg;
+	enum imx_uart_type devtype;
+};
+
+
+struct rt_imx_uart_port {
+	unsigned char __iomem *membase;	/* read/write[bwl] */
+	resource_size_t mapbase;	/* for ioremap */
+	unsigned int irq;		/* irq number */
+	int tx_fifo;			/* TX fifo size*/
+	unsigned int have_rtscts;
+	unsigned int use_dcedte;
+	unsigned int use_hwflow;
+	struct clk *clk_ipg;		/* clock id for UART clock */
+	struct clk *clk_per;		/* clock id for UART clock */
+	const struct imx_uart_data *devdata;
+	unsigned int uartclk;		/* base uart clock */
+	struct rtdm_device rtdm_dev;	/* RTDM device structure */
+};
+
+
+static struct imx_uart_data imx_uart_devdata[] = {
+	[IMX1_UART] = {
+		.uts_reg = IMX1_UTS,
+		.devtype = IMX1_UART,
+	},
+	[IMX21_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX21_UART,
+	},
+	[IMX53_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX53_UART,
+	},
+	[IMX6Q_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX6Q_UART,
+	},
+};
+
+static const struct platform_device_id rt_imx_uart_id_table[] = {
+	{
+		.name = "imx1-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
+	}, {
+		.name = "imx21-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
+	}, {
+		.name = "imx53-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX53_UART],
+	}, {
+		.name = "imx6q-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, rt_imx_uart_id_table);
+
+static const struct of_device_id rt_imx_uart_dt_ids[] = {
+	{
+		.compatible = "fsl,imx6q-uart",
+		.data = &imx_uart_devdata[IMX6Q_UART], },
+	{
+		.compatible = "fsl,imx53-uart",
+		.data = &imx_uart_devdata[IMX53_UART], },
+	{
+		.compatible = "fsl,imx1-uart",
+		.data = &imx_uart_devdata[IMX1_UART], },
+	{
+		.compatible = "fsl,imx21-uart",
+		.data = &imx_uart_devdata[IMX21_UART], },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt_imx_uart_dt_ids);
+
+struct rt_imx_uart_ctx {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+	int ier_status;			/* IER cache */
+	int mcr_status;			/* MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+
+	/*
+	 * The port structure holds all the information about the UART
+	 * port like base address, and so on.
+	 */
+	struct rt_imx_uart_port *port;
+};
+
+static const struct rtser_config default_config = {
+	.config_mask = 0xFFFF,
+	.baud_rate = RTSER_DEF_BAUD,
+	.parity = RTSER_DEF_PARITY,
+	.data_bits = RTSER_DEF_BITS,
+	.stop_bits = RTSER_DEF_STOPB,
+	.handshake = RTSER_DEF_HAND,
+	.fifo_depth = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout = RTSER_DEF_TIMEOUT,
+	.tx_timeout = RTSER_DEF_TIMEOUT,
+	.event_timeout = RTSER_DEF_TIMEOUT,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	.event_mask = RTSER_DEF_EVENT_MASK,
+};
+
+static void rt_imx_uart_stop_tx(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long temp;
+
+	temp = readl(ctx->port->membase + UCR1);
+	writel(temp & ~UCR1_TXMPTYEN, ctx->port->membase + UCR1);
+}
+
+static void rt_imx_uart_start_tx(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long temp;
+
+	temp = readl(ctx->port->membase + UCR1);
+	writel(temp | UCR1_TXMPTYEN, ctx->port->membase + UCR1);
+}
+
+static void rt_imx_uart_enable_ms(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long ucr3;
+
+	/*
+	 * RTS interrupt is enabled only if we are using interrupt-driven
+	 * software controlled hardware flow control
+	 */
+	if (!ctx->port->use_hwflow) {
+		unsigned long ucr1 = readl(ctx->port->membase + UCR1);
+
+		ucr1 |= UCR1_RTSDEN;
+		writel(ucr1, ctx->port->membase + UCR1);
+	}
+	ucr3 = readl(ctx->port->membase + UCR3);
+	ucr3 |= UCR3_DTREN;
+	if (ctx->port->use_dcedte) /* DTE mode */
+		ucr3 |= UCR3_DCD | UCR3_RI;
+	writel(ucr3, ctx->port->membase + UCR3);
+}
+
+static int rt_imx_uart_rx_chars(struct rt_imx_uart_ctx *ctx,
+				uint64_t *timestamp)
+{
+	unsigned int rx, temp;
+	int rbytes = 0;
+	int lsr = 0;
+
+	while (readl(ctx->port->membase + USR2) & USR2_RDR) {
+		rx = readl(ctx->port->membase + URXD0);
+		temp = readl(ctx->port->membase + USR2);
+		if (temp & USR2_BRCD) {
+			writel(USR2_BRCD, ctx->port->membase + USR2);
+			lsr |= RTSER_LSR_BREAK_IND;
+		}
+
+		if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR)) {
+			if (rx & URXD_PRERR)
+				lsr |= RTSER_LSR_PARITY_ERR;
+			else if (rx & URXD_FRMERR)
+				lsr |= RTSER_LSR_FRAMING_ERR;
+			if (rx & URXD_OVRRUN)
+				lsr |= RTSER_LSR_OVERRUN_ERR;
+		}
+
+		/* save received character */
+		ctx->in_buf[ctx->in_tail] = rx & 0xff;
+		if (ctx->in_history)
+			ctx->in_history[ctx->in_tail] = *timestamp;
+		ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+		if (unlikely(ctx->in_npend >= IN_BUFFER_SIZE))
+			lsr |= RTSER_SOFT_OVERRUN_ERR;
+		else
+			ctx->in_npend++;
+
+		rbytes++;
+	}
+
+	/* save new errors */
+	ctx->status |= lsr;
+
+	return rbytes;
+}
+
+static void rt_imx_uart_tx_chars(struct rt_imx_uart_ctx *ctx)
+{
+	int ch;
+	unsigned int uts_reg = ctx->port->devdata->uts_reg;
+
+	while (ctx->out_npend > 0 &&
+	       !(readl(ctx->port->membase + uts_reg) & UTS_TXFULL)) {
+		ch = ctx->out_buf[ctx->out_head++];
+		writel(ch, ctx->port->membase + URTX0);
+		ctx->out_head &= (OUT_BUFFER_SIZE - 1);
+		ctx->out_npend--;
+	}
+}
+
+static int rt_imx_uart_modem_status(struct rt_imx_uart_ctx *ctx,
+				     unsigned int usr1,
+				     unsigned int usr2)
+{
+	int events = 0;
+
+	/* Clear the status bits that triggered the interrupt */
+	writel(usr1, ctx->port->membase + USR1);
+	writel(usr2, ctx->port->membase + USR2);
+
+	if (ctx->port->use_dcedte) { /* DTE mode */
+		if (usr2 & USR2_DCDDELT)
+			events |= !(usr2 & USR2_DCDIN) ?
+				RTSER_EVENT_MODEMHI : RTSER_EVENT_MODEMLO;
+	}
+	if (!ctx->port->use_hwflow && (usr1 & USR1_RTSD)) {
+		events |= (usr1 & USR1_RTSS) ?
+			RTSER_EVENT_MODEMHI : RTSER_EVENT_MODEMLO;
+	}
+
+	return events;
+}
+
+static int rt_imx_uart_int(rtdm_irq_t *irq_context)
+{
+	uint64_t timestamp = rtdm_clock_read();
+	struct rt_imx_uart_ctx *ctx;
+	unsigned int usr1, usr2, ucr1;
+	int rbytes = 0, events = 0;
+	int ret = RTDM_IRQ_NONE;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_imx_uart_ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	usr1 = readl(ctx->port->membase + USR1);
+	usr2 = readl(ctx->port->membase + USR2);
+	ucr1 = readl(ctx->port->membase + UCR1);
+
+	/*
+	 * Read if there is data available
+	 */
+	if (usr1 & USR1_RRDY) {
+		if (likely(ucr1 & UCR1_RRDYEN)) {
+			rbytes = rt_imx_uart_rx_chars(ctx, &timestamp);
+			events |= RTSER_EVENT_RXPEND;
+		}
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	/*
+	 * Send data if there is data to be sent
+	 */
+	if (usr1 & USR1_TRDY) {
+		if (likely(ucr1 & UCR1_TXMPTYEN))
+			rt_imx_uart_tx_chars(ctx);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	/*
+	 * Handle modem status events
+	 */
+	if ((usr1 & (USR1_RTSD | USR1_DTRD)) ||
+	    (usr2 & (USR2_DCDDELT | USR2_RIDELT))) {
+		events |= rt_imx_uart_modem_status(ctx, usr1, usr2);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else {
+			ctx->in_nwait -= rbytes;
+		}
+	}
+
+	if (ctx->status) {
+		events |= RTSER_EVENT_ERRPEND;
+#ifdef FIXME
+		ctx->ier_status &= ~IER_STAT;
+#endif
+	}
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
+		rt_imx_uart_stop_tx(ctx);
+		ctx->ier_status &= ~IER_TX;
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	rtdm_lock_put(&ctx->lock);
+
+	if (ret != RTDM_IRQ_HANDLED)
+		pr_warn("%s: unhandled interrupt\n", __func__);
+	return ret;
+}
+
+static unsigned int rt_imx_uart_get_msr(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long usr1 = readl(ctx->port->membase + USR1);
+	unsigned long usr2 = readl(ctx->port->membase + USR2);
+	unsigned int msr = 0;
+
+	if (usr1 & USR1_RTSD)
+		msr |= RTSER_MSR_DCTS;
+	if (usr1 & USR1_DTRD)
+		msr |= RTSER_MSR_DDSR;
+	if (usr2 & USR2_RIDELT)
+		msr |= RTSER_MSR_TERI;
+	if (usr2 & USR2_DCDDELT)
+		msr |= RTSER_MSR_DDCD;
+
+	if (usr1 & USR1_RTSS)
+		msr |= RTSER_MSR_CTS;
+
+	if (ctx->port->use_dcedte) { /* DTE mode */
+		if (!(usr2 & USR2_DCDIN))
+			msr |= RTSER_MSR_DCD;
+		if (!(usr2 & USR2_RIIN))
+			msr |= RTSER_MSR_RI;
+	}
+
+	return msr;
+}
+
+static void rt_imx_uart_set_mcr(struct rt_imx_uart_ctx *ctx,
+				unsigned int mcr)
+{
+	unsigned int uts_reg = ctx->port->devdata->uts_reg;
+	unsigned long ucr2 = readl(ctx->port->membase + UCR2);
+	unsigned long ucr3 = readl(ctx->port->membase + UCR3);
+	unsigned long uts = readl(ctx->port->membase + uts_reg);
+
+	if (mcr & RTSER_MCR_RTS) {
+		/*
+		 * Return to hardware-driven hardware flow control if the
+		 * option is enabled
+		 */
+		if (ctx->port->use_hwflow) {
+			ucr2 |= UCR2_CTSC;
+		} else {
+			ucr2 |= UCR2_CTS;
+			ucr2 &= ~UCR2_CTSC;
+		}
+	} else {
+		ucr2 &= ~(UCR2_CTS | UCR2_CTSC);
+	}
+	writel(ucr2, ctx->port->membase + UCR2);
+
+	if (mcr & RTSER_MCR_DTR)
+		ucr3 |= UCR3_DSR;
+	else
+		ucr3 &= ~UCR3_DSR;
+	writel(ucr3, ctx->port->membase + UCR3);
+
+	if (mcr & RTSER_MCR_LOOP)
+		uts |= UTS_LOOP;
+	else
+		uts &= ~UTS_LOOP;
+	writel(uts, ctx->port->membase + uts_reg);
+}
+
+static void rt_imx_uart_break_ctl(struct rt_imx_uart_ctx *ctx,
+				  int break_state)
+{
+	unsigned long ucr1 = readl(ctx->port->membase + UCR1);
+
+	if (break_state == RTSER_BREAK_SET)
+		ucr1 |= UCR1_SNDBRK;
+	else
+		ucr1 &= ~UCR1_SNDBRK;
+	writel(ucr1, ctx->port->membase + UCR1);
+}
+
+static int rt_imx_uart_set_config(struct rt_imx_uart_ctx *ctx,
+				  const struct rtser_config *config,
+				  uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	int err = 0;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD)
+		ctx->config.baud_rate = config->baud_rate;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	 * care not to use and change timeouts at the same time.
+	 */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND)
+		    && ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+	}
+
+	if (config->config_mask & RTSER_SET_HANDSHAKE) {
+		ctx->config.handshake = config->handshake;
+
+		switch (ctx->config.handshake) {
+		case RTSER_RTSCTS_HAND:
+			/* ...? */
+
+		default:	/* RTSER_NO_HAND */
+			ctx->mcr_status = RTSER_MCR_RTS | RTSER_MCR_OUT1;
+			break;
+		}
+		rt_imx_uart_set_mcr(ctx, ctx->mcr_status);
+	}
+
+	/* configure hardware with new parameters */
+	if (config->config_mask & (RTSER_SET_BAUD |
+				   RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS |
+				   RTSER_SET_STOP_BITS |
+				   RTSER_SET_EVENT_MASK |
+				   RTSER_SET_HANDSHAKE)) {
+		struct rt_imx_uart_port *port = ctx->port;
+		unsigned int ucr2, old_ucr1, old_txrxen, old_ucr2;
+		unsigned int baud = ctx->config.baud_rate;
+		unsigned int div, ufcr;
+		unsigned long num, denom;
+		uint64_t tdiv64;
+
+		if (ctx->config.data_bits == RTSER_8_BITS)
+			ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
+		else
+			ucr2 = UCR2_SRST | UCR2_IRTS;
+
+		if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
+			if (port->have_rtscts) {
+				ucr2 &= ~UCR2_IRTS;
+				ucr2 |= UCR2_CTSC;
+			}
+		}
+
+		if (ctx->config.stop_bits == RTSER_2_STOPB)
+			ucr2 |= UCR2_STPB;
+		if (ctx->config.parity == RTSER_ODD_PARITY ||
+		    ctx->config.parity == RTSER_EVEN_PARITY) {
+			ucr2 |= UCR2_PREN;
+			if (ctx->config.parity == RTSER_ODD_PARITY)
+				ucr2 |= UCR2_PROE;
+		}
+
+		/*
+		 * disable interrupts and drain transmitter
+		 */
+		old_ucr1 = readl(port->membase + UCR1);
+		old_ucr1 &= ~UCR1_RTSDEN; /* reset in  rt_imx_uart_enable_ms()*/
+		writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN),
+		       port->membase + UCR1);
+		old_ucr2 = readl(port->membase + USR2);
+		writel(old_ucr2 & ~UCR2_ATEN, port->membase + USR2);
+		while (!(readl(port->membase + USR2) & USR2_TXDC))
+			barrier();
+
+		/* then, disable everything */
+		old_txrxen = readl(port->membase + UCR2);
+		writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
+		       port->membase + UCR2);
+		old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
+		div = port->uartclk / (baud * 16);
+		if (div > 7)
+			div = 7;
+		if (!div)
+			div = 1;
+
+		rational_best_approximation(16 * div * baud, port->uartclk,
+					    1 << 16, 1 << 16, &num, &denom);
+
+		tdiv64 = port->uartclk;
+		tdiv64 *= num;
+		do_div(tdiv64, denom * 16 * div);
+
+		num -= 1;
+		denom -= 1;
+
+		ufcr = readl(port->membase + UFCR);
+		ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
+
+		if (port->use_dcedte)
+			ufcr |= UFCR_DCEDTE;
+
+		writel(ufcr, port->membase + UFCR);
+
+		writel(num, port->membase + UBIR);
+		writel(denom, port->membase + UBMR);
+
+		writel(port->uartclk / div / 1000, port->membase + MX2_ONEMS);
+
+		writel(old_ucr1, port->membase + UCR1);
+
+		/* set the parity, stop bits and data size */
+		writel(ucr2 | old_txrxen, port->membase + UCR2);
+
+		if (config->event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			rt_imx_uart_enable_ms(ctx);
+
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	return err;
+}
+
+void rt_imx_uart_cleanup_ctx(struct rt_imx_uart_ctx *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+#define TXTL 2 /* reset default */
+#define RXTL 1 /* reset default */
+
+static int rt_imx_uart_setup_ufcr(struct rt_imx_uart_port *port)
+{
+	unsigned int val;
+	unsigned int ufcr_rfdiv;
+
+	/* set receiver / transmitter trigger level.
+	 * RFDIV is set such way to satisfy requested uartclk value
+	 */
+	val = TXTL << 10 | RXTL;
+	ufcr_rfdiv = (clk_get_rate(port->clk_per) + port->uartclk / 2) /
+		port->uartclk;
+
+	if (!ufcr_rfdiv)
+		ufcr_rfdiv = 1;
+
+	val |= UFCR_RFDIV_REG(ufcr_rfdiv);
+
+	writel(val, port->membase + UFCR);
+
+	return 0;
+}
+
+/* half the RX buffer size */
+#define CTSTL 16
+
+static void uart_reset(struct rt_imx_uart_port *port)
+{
+	unsigned int uts_reg = port->devdata->uts_reg;
+	int n = 100;
+	u32 temp;
+
+	/* Reset fifo's and state machines */
+	temp = readl(port->membase + UCR2);
+	temp &= ~UCR2_SRST;
+	writel(temp, port->membase + UCR2);
+	n = 100;
+	while (!(readl(port->membase + uts_reg) & UTS_SOFTRST) && --n > 0)
+		udelay(1);
+}
+
+static int rt_imx_uart_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_imx_uart_ctx *ctx;
+	struct rt_imx_uart_port *port;
+	rtdm_lockctx_t lock_ctx;
+	unsigned long temp;
+	uint64_t *dummy;
+
+	ctx = rtdm_fd_to_private(fd);
+	ctx->port = (struct rt_imx_uart_port *)rtdm_fd_device(fd)->device_data;
+
+	port = ctx->port;
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	/*
+	 * disable the DREN bit (Data Ready interrupt enable) before
+	 * requesting IRQs
+	 */
+	temp = readl(port->membase + UCR4);
+
+	/* set the trigger level for CTS */
+	temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
+	temp |= CTSTL << UCR4_CTSTL_SHF;
+	writel(temp & ~UCR4_DREN, port->membase + UCR4);
+
+	uart_reset(port);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/*
+	 * Finally, clear status and enable interrupts
+	 */
+	writel(USR1_RTSD | USR1_DTRD, port->membase + USR1);
+	writel(USR2_ORE, port->membase + USR2);
+
+	temp = readl(port->membase + UCR1) & ~UCR1_RRDYEN;
+	temp |= UCR1_UARTEN;
+	if (port->have_rtscts)
+		temp |= UCR1_RTSDEN;
+	writel(temp, port->membase + UCR1);
+
+	temp = readl(port->membase + UCR4);
+	temp |= UCR4_OREN;
+	writel(temp, port->membase + UCR4);
+
+	temp = readl(port->membase + UCR2) & ~(UCR2_ATEN|UCR2_RTSEN);
+	temp |= (UCR2_RXEN | UCR2_TXEN);
+	if (!port->have_rtscts)
+		temp |= UCR2_IRTS;
+	writel(temp, port->membase + UCR2);
+
+	temp = readl(port->membase + UCR3);
+	temp |= MX2_UCR3_RXDMUXSEL;
+	writel(temp, port->membase + UCR3);
+
+	temp = readl(port->membase + UCR1);
+	temp |= UCR1_RRDYEN;
+	writel(temp, port->membase + UCR1);
+
+	temp = readl(port->membase + UCR2);
+	temp |= UCR2_ATEN;
+	writel(temp, port->membase + UCR2);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rt_imx_uart_set_config(ctx, &default_config, &dummy);
+
+	rt_imx_uart_setup_ufcr(port);
+
+	return rtdm_irq_request(&ctx->irq_handle,
+				port->irq, rt_imx_uart_int, 0,
+				rtdm_fd_device(fd)->name, ctx);
+}
+
+void rt_imx_uart_close(struct rtdm_fd *fd)
+{
+	struct rt_imx_uart_port *port;
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	unsigned long temp;
+
+	ctx = rtdm_fd_to_private(fd);
+	port = ctx->port;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	temp = readl(port->membase + UCR2);
+	temp &= ~(UCR2_ATEN|UCR2_RTSEN|UCR2_RXEN|UCR2_TXEN|UCR2_IRTS);
+	writel(temp, port->membase + UCR2);
+	/*
+	 * Disable all interrupts, port and break condition, then
+	 * reset.
+	 */
+	temp = readl(port->membase + UCR1);
+	temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+	writel(temp, port->membase + UCR1);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	uart_reset(port);
+
+	rt_imx_uart_cleanup_ctx(ctx);
+	kfree(ctx->in_history);
+}
+
+static int rt_imx_uart_ioctl(struct rtdm_fd *fd,
+			     unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_imx_uart_ctx *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->config,
+						   sizeof(struct rtser_config));
+		else
+			memcpy(arg, &ctx->config,
+			       sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		/*
+		 * We may call regular kernel services ahead, ask for
+		 * re-entering secondary mode if need be.
+		 */
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err =
+			    rtdm_safe_copy_from_user(fd, &config_buf,
+						     arg,
+						     sizeof(struct
+							    rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate > clk_get_rate(ctx->port->clk_per) / 16 ||
+		     config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			if (config->timestamp_history &
+						RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_imx_uart_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status, msr;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		msr = rt_imx_uart_get_msr(ctx);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+
+			status_buf.line_status = status;
+			status_buf.modem_status = msr;
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &status_buf,
+						   sizeof(struct
+							  rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status = 0;
+			((struct rtser_status *)arg)->modem_status = msr;
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->mcr_status,
+						   sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr;
+		rt_imx_uart_set_mcr(ctx, new_mcr);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			/* Only enable error interrupt
+			 * when the user waits for it.
+			 */
+			if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
+				ctx->ier_status |= IER_STAT;
+#ifdef FIXME
+				rt_imx_uart_reg_out(mode, base, IER,
+						 ctx->ier_status);
+#endif
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &=
+		    ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		rt_imx_uart_break_ctl(ctx, (unsigned long)arg);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+#ifdef FIXME
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_imx_uart_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_imx_uart_reg_out(mode, base, FCR, fcr);
+			rt_imx_uart_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+#endif
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+ssize_t rt_imx_uart_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				 * separately.
+				 */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			ctx->in_npend -= block;
+			if (ctx->in_npend == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			 * non-blocking call or contains the error
+			 * returned by rtdm_event_wait[_until]
+			 */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				 * return immediately.
+				 */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				 * before exit.
+				 */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT)))
+		ret = read;
+
+	return ret;
+}
+
+static ssize_t rt_imx_uart_write(struct rtdm_fd *fd, const void *buf,
+				size_t nbyte)
+{
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.rx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				 * end separately.
+				 */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			ctx->ier_status |= IER_TX;
+			rt_imx_uart_start_tx(ctx);
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->out_event,
+					   ctx->config.tx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				 * return immediately.
+				 */
+				ret = -EBADF;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver imx_uart_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(imx_uart,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_count		= RT_IMX_UART_MAX,
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.context_size		= sizeof(struct rt_imx_uart_ctx),
+	.ops = {
+		.open		= rt_imx_uart_open,
+		.close		= rt_imx_uart_close,
+		.ioctl_rt	= rt_imx_uart_ioctl,
+		.ioctl_nrt	= rt_imx_uart_ioctl,
+		.read_rt	= rt_imx_uart_read,
+		.write_rt	= rt_imx_uart_write,
+	},
+};
+
+
+#ifdef CONFIG_OF
+
+/*
+ * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
+ * could successfully get all information from dt or a negative errno.
+ */
+static int rt_imx_uart_probe_dt(struct rt_imx_uart_port *port,
+				struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *of_id =
+			of_match_device(rt_imx_uart_dt_ids, &pdev->dev);
+	int ret;
+
+	if (!np)
+		/* no device tree device */
+		return 1;
+
+	ret = of_alias_get_id(np, "serial");
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+		return ret;
+	}
+
+	pdev->id = ret;
+
+	if (of_get_property(np, "uart-has-rtscts", NULL) ||
+	    of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
+		port->have_rtscts = 1;
+	if (of_get_property(np, "fsl,irda-mode", NULL))
+		dev_warn(&pdev->dev, "IRDA not yet supported\n");
+
+	if (of_get_property(np, "fsl,dte-mode", NULL))
+		port->use_dcedte = 1;
+
+	port->devdata = of_id->data;
+
+	return 0;
+}
+#else
+static inline int rt_imx_uart_probe_dt(struct rt_imx_uart_port *port,
+				       struct platform_device *pdev)
+{
+	return 1;
+}
+#endif
+
+static void rt_imx_uart_probe_pdata(struct rt_imx_uart_port *port,
+				    struct platform_device *pdev)
+{
+	struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+	port->devdata = (struct imx_uart_data  *) pdev->id_entry->driver_data;
+
+	if (!pdata)
+		return;
+
+	if (pdata->flags & IMXUART_HAVE_RTSCTS)
+		port->have_rtscts = 1;
+}
+
+static int rt_imx_uart_probe(struct platform_device *pdev)
+{
+	struct rtdm_device *dev;
+	struct rt_imx_uart_port *port;
+	struct resource *res;
+	int ret;
+
+	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ret = rt_imx_uart_probe_dt(port, pdev);
+	if (ret > 0)
+		rt_imx_uart_probe_pdata(port, pdev);
+	else if (ret < 0)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	port->irq = platform_get_irq(pdev, 0);
+
+	if (port->irq <= 0)
+		return -ENODEV;
+
+	port->membase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(port->membase))
+		return PTR_ERR(port->membase);
+
+	dev = &port->rtdm_dev;
+	dev->driver = &imx_uart_driver;
+	dev->label = "rtser%d";
+	dev->device_data = port;
+
+	if (!tx_fifo[pdev->id] || tx_fifo[pdev->id] > TX_FIFO_SIZE)
+		port->tx_fifo = TX_FIFO_SIZE;
+	else
+		port->tx_fifo = tx_fifo[pdev->id];
+
+	port->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(port->clk_ipg))
+		return PTR_ERR(port->clk_ipg);
+
+	port->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(port->clk_per))
+		return PTR_ERR(port->clk_per);
+
+	clk_prepare_enable(port->clk_ipg);
+	clk_prepare_enable(port->clk_per);
+	port->uartclk = clk_get_rate(port->clk_per);
+
+	port->use_hwflow = 1;
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, port);
+
+	pr_info("%s on IMX UART%d: membase=0x%p irq=%d uartclk=%d\n",
+	       dev->name, pdev->id, port->membase, port->irq, port->uartclk);
+	return 0;
+}
+
+static int rt_imx_uart_remove(struct platform_device *pdev)
+{
+	struct imxuart_platform_data *pdata;
+	struct rt_imx_uart_port *port = platform_get_drvdata(pdev);
+	struct rtdm_device *dev = &port->rtdm_dev;
+
+	pdata = pdev->dev.platform_data;
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable_unprepare(port->clk_ipg);
+	clk_disable_unprepare(port->clk_per);
+	rtdm_dev_unregister(dev);
+
+	return 0;
+}
+
+static struct platform_driver rt_imx_uart_driver = {
+	.probe = rt_imx_uart_probe,
+	.remove	= rt_imx_uart_remove,
+	.id_table = rt_imx_uart_id_table,
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = rt_imx_uart_dt_ids,
+	},
+	.prevent_deferred_probe = true,
+};
+
+
+static int __init rt_imx_uart_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	ret = platform_driver_register(&rt_imx_uart_driver);
+	if (ret) {
+		pr_err("%s; Could not register  driver (err=%d)\n",
+			__func__, ret);
+	}
+
+	return ret;
+}
+
+static void __exit rt_imx_uart_exit(void)
+{
+	platform_driver_unregister(&rt_imx_uart_driver);
+}
+
+module_init(rt_imx_uart_init);
+module_exit(rt_imx_uart_exit);
+++ linux-patched/drivers/xenomai/serial/16550A_pnp.h	2022-03-21 12:58:31.241870902 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#if defined(CONFIG_PNP) && \
+    (defined(CONFIG_XENO_DRIVERS_16550A_PIO) || \
+     defined(CONFIG_XENO_DRIVERS_16550A_ANY))
+
+#include <linux/pnp.h>
+
+#define UNKNOWN_DEV 0x3000
+
+/* Bluntly cloned from drivers/serial/8250_pnp.c */
+static const struct pnp_device_id rt_16550_pnp_tbl[] = {
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"AAC000F",		0	},
+	/* Anchor Datacomm BV */
+	/* SXPro 144 External Data Fax Modem Plug & Play */
+	{	"ADC0001",		0	},
+	/* SXPro 288 External Data Fax Modem Plug & Play */
+	{	"ADC0002",		0	},
+	/* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
+	{	"AEI0250",		0	},
+	/* Actiontec ISA PNP 56K X2 Fax Modem */
+	{	"AEI1240",		0	},
+	/* Rockwell 56K ACF II Fax+Data+Voice Modem */
+	{	"AKY1021",		0 /*SPCI_FL_NO_SHIRQ*/	},
+	/* AZT3005 PnP SOUND DEVICE */
+	{	"AZT4001",		0	},
+	/* Best Data Products Inc. Smart One 336F PnP Modem */
+	{	"BDP3336",		0	},
+	/*  Boca Research */
+	/* Boca Complete Ofc Communicator 14.4 Data-FAX */
+	{	"BRI0A49",		0	},
+	/* Boca Research 33,600 ACF Modem */
+	{	"BRI1400",		0	},
+	/* Boca 33.6 Kbps Internal FD34FSVD */
+	{	"BRI3400",		0	},
+	/* Boca 33.6 Kbps Internal FD34FSVD */
+	{	"BRI0A49",		0	},
+	/* Best Data Products Inc. Smart One 336F PnP Modem */
+	{	"BDP3336",		0	},
+	/* Computer Peripherals Inc */
+	/* EuroViVa CommCenter-33.6 SP PnP */
+	{	"CPI4050",		0	},
+	/* Creative Labs */
+	/* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
+	{	"CTL3001",		0	},
+	/* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
+	{	"CTL3011",		0	},
+	/* Creative */
+	/* Creative Modem Blaster Flash56 DI5601-1 */
+	{	"DMB1032",		0	},
+	/* Creative Modem Blaster V.90 DI5660 */
+	{	"DMB2001",		0	},
+	/* E-Tech */
+	/* E-Tech CyberBULLET PC56RVP */
+	{	"ETT0002",		0	},
+	/* FUJITSU */
+	/* Fujitsu 33600 PnP-I2 R Plug & Play */
+	{	"FUJ0202",		0	},
+	/* Fujitsu FMV-FX431 Plug & Play */
+	{	"FUJ0205",		0	},
+	/* Fujitsu 33600 PnP-I4 R Plug & Play */
+	{	"FUJ0206",		0	},
+	/* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */
+	{	"FUJ0209",		0	},
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"GVC000F",		0	},
+	/* Hayes */
+	/* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
+	{	"HAY0001",		0	},
+	/* Hayes Optima 336 V.34 + FAX + Voice PnP */
+	{	"HAY000C",		0	},
+	/* Hayes Optima 336B V.34 + FAX + Voice PnP */
+	{	"HAY000D",		0	},
+	/* Hayes Accura 56K Ext Fax Modem PnP */
+	{	"HAY5670",		0	},
+	/* Hayes Accura 56K Ext Fax Modem PnP */
+	{	"HAY5674",		0	},
+	/* Hayes Accura 56K Fax Modem PnP */
+	{	"HAY5675",		0	},
+	/* Hayes 288, V.34 + FAX */
+	{	"HAYF000",		0	},
+	/* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */
+	{	"HAYF001",		0	},
+	/* IBM */
+	/* IBM Thinkpad 701 Internal Modem Voice */
+	{	"IBM0033",		0	},
+	/* Intertex */
+	/* Intertex 28k8 33k6 Voice EXT PnP */
+	{	"IXDC801",		0	},
+	/* Intertex 33k6 56k Voice EXT PnP */
+	{	"IXDC901",		0	},
+	/* Intertex 28k8 33k6 Voice SP EXT PnP */
+	{	"IXDD801",		0	},
+	/* Intertex 33k6 56k Voice SP EXT PnP */
+	{	"IXDD901",		0	},
+	/* Intertex 28k8 33k6 Voice SP INT PnP */
+	{	"IXDF401",		0	},
+	/* Intertex 28k8 33k6 Voice SP EXT PnP */
+	{	"IXDF801",		0	},
+	/* Intertex 33k6 56k Voice SP EXT PnP */
+	{	"IXDF901",		0	},
+	/* Kortex International */
+	/* KORTEX 28800 Externe PnP */
+	{	"KOR4522",		0	},
+	/* KXPro 33.6 Vocal ASVD PnP */
+	{	"KORF661",		0	},
+	/* Lasat */
+	/* LASAT Internet 33600 PnP */
+	{	"LAS4040",		0	},
+	/* Lasat Safire 560 PnP */
+	{	"LAS4540",		0	},
+	/* Lasat Safire 336  PnP */
+	{	"LAS5440",		0	},
+	/* Microcom, Inc. */
+	/* Microcom TravelPorte FAST V.34 Plug & Play */
+	{	"MNP0281",		0	},
+	/* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */
+	{	"MNP0336",		0	},
+	/* Microcom DeskPorte FAST EP 28.8 Plug & Play */
+	{	"MNP0339",		0	},
+	/* Microcom DeskPorte 28.8P Plug & Play */
+	{	"MNP0342",		0	},
+	/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+	{	"MNP0500",		0	},
+	/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+	{	"MNP0501",		0	},
+	/* Microcom DeskPorte 28.8S Internal Plug & Play */
+	{	"MNP0502",		0	},
+	/* Motorola */
+	/* Motorola BitSURFR Plug & Play */
+	{	"MOT1105",		0	},
+	/* Motorola TA210 Plug & Play */
+	{	"MOT1111",		0	},
+	/* Motorola HMTA 200 (ISDN) Plug & Play */
+	{	"MOT1114",		0	},
+	/* Motorola BitSURFR Plug & Play */
+	{	"MOT1115",		0	},
+	/* Motorola Lifestyle 28.8 Internal */
+	{	"MOT1190",		0	},
+	/* Motorola V.3400 Plug & Play */
+	{	"MOT1501",		0	},
+	/* Motorola Lifestyle 28.8 V.34 Plug & Play */
+	{	"MOT1502",		0	},
+	/* Motorola Power 28.8 V.34 Plug & Play */
+	{	"MOT1505",		0	},
+	/* Motorola ModemSURFR External 28.8 Plug & Play */
+	{	"MOT1509",		0	},
+	/* Motorola Premier 33.6 Desktop Plug & Play */
+	{	"MOT150A",		0	},
+	/* Motorola VoiceSURFR 56K External PnP */
+	{	"MOT150F",		0	},
+	/* Motorola ModemSURFR 56K External PnP */
+	{	"MOT1510",		0	},
+	/* Motorola ModemSURFR 56K Internal PnP */
+	{	"MOT1550",		0	},
+	/* Motorola ModemSURFR Internal 28.8 Plug & Play */
+	{	"MOT1560",		0	},
+	/* Motorola Premier 33.6 Internal Plug & Play */
+	{	"MOT1580",		0	},
+	/* Motorola OnlineSURFR 28.8 Internal Plug & Play */
+	{	"MOT15B0",		0	},
+	/* Motorola VoiceSURFR 56K Internal PnP */
+	{	"MOT15F0",		0	},
+	/* Com 1 */
+	/*  Deskline K56 Phone System PnP */
+	{	"MVX00A1",		0	},
+	/* PC Rider K56 Phone System PnP */
+	{	"MVX00F2",		0	},
+	/* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */
+	{	"nEC8241",		0	},
+	/* Pace 56 Voice Internal Plug & Play Modem */
+	{	"PMC2430",		0	},
+	/* Generic */
+	/* Generic standard PC COM port	 */
+	{	"PNP0500",		0	},
+	/* Generic 16550A-compatible COM port */
+	{	"PNP0501",		0	},
+	/* Compaq 14400 Modem */
+	{	"PNPC000",		0	},
+	/* Compaq 2400/9600 Modem */
+	{	"PNPC001",		0	},
+	/* Dial-Up Networking Serial Cable between 2 PCs */
+	{	"PNPC031",		0	},
+	/* Dial-Up Networking Parallel Cable between 2 PCs */
+	{	"PNPC032",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC100",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC101",		0	},
+	/*  Standard 28800 bps Modem*/
+	{	"PNPC102",		0	},
+	/*  Standard Modem*/
+	{	"PNPC103",		0	},
+	/*  Standard 9600 bps Modem*/
+	{	"PNPC104",		0	},
+	/*  Standard 14400 bps Modem*/
+	{	"PNPC105",		0	},
+	/*  Standard 28800 bps Modem*/
+	{	"PNPC106",		0	},
+	/*  Standard Modem */
+	{	"PNPC107",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC108",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC109",		0	},
+	/* Standard 28800 bps Modem */
+	{	"PNPC10A",		0	},
+	/* Standard Modem */
+	{	"PNPC10B",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC10C",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC10D",		0	},
+	/* Standard 28800 bps Modem */
+	{	"PNPC10E",		0	},
+	/* Standard Modem */
+	{	"PNPC10F",		0	},
+	/* Standard PCMCIA Card Modem */
+	{	"PNP2000",		0	},
+	/* Rockwell */
+	/* Modular Technology */
+	/* Rockwell 33.6 DPF Internal PnP */
+	/* Modular Technology 33.6 Internal PnP */
+	{	"ROK0030",		0	},
+	/* Kortex International */
+	/* KORTEX 14400 Externe PnP */
+	{	"ROK0100",		0	},
+	/* Rockwell 28.8 */
+	{	"ROK4120",		0	},
+	/* Viking Components, Inc */
+	/* Viking 28.8 INTERNAL Fax+Data+Voice PnP */
+	{	"ROK4920",		0	},
+	/* Rockwell */
+	/* British Telecom */
+	/* Modular Technology */
+	/* Rockwell 33.6 DPF External PnP */
+	/* BT Prologue 33.6 External PnP */
+	/* Modular Technology 33.6 External PnP */
+	{	"RSS00A0",		0	},
+	/* Viking 56K FAX INT */
+	{	"RSS0262",		0	},
+	/* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */
+	{       "RSS0250",              0       },
+	/* SupraExpress 28.8 Data/Fax PnP modem */
+	{	"SUP1310",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1421",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1590",		0	},
+	/* SupraExpress 336i Sp ASVD */
+	{	"SUP1620",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1760",		0	},
+	/* SupraExpress 56i Sp Intl */
+	{	"SUP2171",		0	},
+	/* Phoebe Micro */
+	/* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */
+	{	"TEX0011",		0	},
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"UAC000F",		0	},
+	/* 3Com Corp. */
+	/* Gateway Telepath IIvi 33.6 */
+	{	"USR0000",		0	},
+	/* U.S. Robotics Sporster 33.6K Fax INT PnP */
+	{	"USR0002",		0	},
+	/*  Sportster Vi 14.4 PnP FAX Voicemail */
+	{	"USR0004",		0	},
+	/* U.S. Robotics 33.6K Voice INT PnP */
+	{	"USR0006",		0	},
+	/* U.S. Robotics 33.6K Voice EXT PnP */
+	{	"USR0007",		0	},
+	/* U.S. Robotics Courier V.Everything INT PnP */
+	{	"USR0009",		0	},
+	/* U.S. Robotics 33.6K Voice INT PnP */
+	{	"USR2002",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR2070",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP */
+	{	"USR2080",		0	},
+	/* U.S. Robotics 56K FAX INT */
+	{	"USR3031",		0	},
+	/* U.S. Robotics 56K FAX INT */
+	{	"USR3050",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR3070",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP */
+	{	"USR3080",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR3090",		0	},
+	/* U.S. Robotics 56K Message  */
+	{	"USR9100",		0	},
+	/* U.S. Robotics 56K FAX EXT PnP*/
+	{	"USR9160",		0	},
+	/* U.S. Robotics 56K FAX INT PnP*/
+	{	"USR9170",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP*/
+	{	"USR9180",		0	},
+	/* U.S. Robotics 56K Voice INT PnP*/
+	{	"USR9190",		0	},
+	/* Wacom tablets */
+	{	"WACF004",		0	},
+	{	"WACF005",		0	},
+	{       "WACF006",              0       },
+	/* Compaq touchscreen */
+	{       "FPI2002",              0 },
+	/* Fujitsu Stylistic touchscreens */
+	{       "FUJ02B2",              0 },
+	{       "FUJ02B3",              0 },
+	/* Fujitsu Stylistic LT touchscreens */
+	{       "FUJ02B4",              0 },
+	/* Passive Fujitsu Stylistic touchscreens */
+	{       "FUJ02B6",              0 },
+	{       "FUJ02B7",              0 },
+	{       "FUJ02B8",              0 },
+	{       "FUJ02B9",              0 },
+	{       "FUJ02BC",              0 },
+	/* Rockwell's (PORALiNK) 33600 INT PNP */
+	{	"WCI0003",		0	},
+	/* Unkown PnP modems */
+	{	"PNPCXXX",		UNKNOWN_DEV	},
+	/* More unkown PnP modems */
+	{	"PNPDXXX",		UNKNOWN_DEV	},
+	{	"",			0	}
+};
+
+static int rt_16550_pnp_probe(struct pnp_dev *dev,
+			       const struct pnp_device_id *dev_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (pnp_port_valid(dev, 0) &&
+		    pnp_port_start(dev, 0) == io[i]) {
+			if (!irq[i])
+				irq[i] = pnp_irq(dev, 0);
+			return 0;
+		}
+
+	return -ENODEV;
+}
+
+static struct pnp_driver rt_16550_pnp_driver = {
+	.name		= RT_16550_DRIVER_NAME,
+	.id_table	= rt_16550_pnp_tbl,
+	.probe		= rt_16550_pnp_probe,
+};
+
+static int pnp_registered;
+
+static inline void rt_16550_pnp_init(void)
+{
+	if (pnp_register_driver(&rt_16550_pnp_driver) == 0)
+		pnp_registered = 1;
+}
+
+static inline void rt_16550_pnp_cleanup(void)
+{
+	if (pnp_registered)
+		pnp_unregister_driver(&rt_16550_pnp_driver);
+}
+
+#else /* !CONFIG_PNP || !(..._16550A_IO || ..._16550A_ANY) */
+
+#define rt_16550_pnp_init()	do { } while (0)
+#define rt_16550_pnp_cleanup()	do { } while (0)
+
+#endif /* !CONFIG_PNP || !(..._16550A_IO || ..._16550A_ANY) */
+++ linux-patched/drivers/xenomai/serial/Kconfig	2022-03-21 12:58:31.234870970 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/mpc52xx_uart.c	1970-01-01 01:00:00.000000000 +0100
+menu "Serial drivers"
+
+config XENO_DRIVERS_16550A
+	tristate "16550A UART driver"
+	help
+	Real-time UART driver for 16550A compatible controllers. See
+	doc/txt/16550A-driver.txt for more details.
+
+choice
+	prompt "Hardware access mode"
+	depends on XENO_DRIVERS_16550A
+	default XENO_DRIVERS_16550A_PIO
+
+config XENO_DRIVERS_16550A_PIO
+	bool "Port-based I/O"
+	help
+	Hardware access only via I/O ports. Use module parameter
+	"io=<port>[,<port>[,...]]" to specify the base port of a device.
+
+config XENO_DRIVERS_16550A_MMIO
+	bool "Memory-mapped I/O"
+	help
+	Hardware access only via memory mapping. Use module paramter
+	"mem=<addr>[,<addr>[,...]]" to specify the physical base address of
+	a device.
+
+config XENO_DRIVERS_16550A_ANY
+	bool "Any access mode"
+	help
+	Decide at module load-time (or via kernel parameter) which access
+	mode to use for which device. This mode is useful when devices of
+	both types can be present in a system, also at the same time.
+
+	Both "io" and "mem" module parameters are available, but always only
+	one of them can be applied on a particular device. Use, e.g.,
+	"io=0x3f8,0 mem=0,0xe0000000" to address device 1 via IO base port
+	0x3f8 and device 2 via physical base address 0xe0000000.
+
+endchoice
+
+config XENO_DRIVERS_16550A_PCI
+	depends on PCI && (XENO_DRIVERS_16550A_PIO || XENO_DRIVERS_16550A_ANY)
+	bool "PCI board support"
+	default n
+	help
+
+	This option activates support for PCI serial boards.
+
+config XENO_DRIVERS_16550A_PCI_MOXA
+	depends on XENO_DRIVERS_16550A_PCI
+	bool "Moxa PCI boards"
+	default n
+	help
+
+	This option activates support for the following Moxa boards:
+	PCI Serial Boards:
+	  C104H/PCI, C168H/PCI
+	  CP-114, CP-132
+	Universal PCI Serial Boards:
+	  CP-102U, CP-102UL, CP-104U
+	  CP-112UL, CP-114UL, CP-118U
+	  CP-132U, CP-134U, CP-138U
+	  CP-168U
+
+config XENO_DRIVERS_MPC52XX_UART
+	depends on PPC_MPC52xx
+	tristate "MPC52xx PSC UART driver"
+	help
+	Real-time UART driver for the PSC on the MPC5200 processor.
+
+config XENO_DRIVERS_IMX_UART
+	depends on ARCH_IMX || ARCH_MXC
+	tristate "RT IMX UART driver"
+	select RATIONAL
+	help
+	Real-time UART driver for the Freescale Semiconductor MXC Internal
+	UART compatible controllers.
+
+endmenu
+++ linux-patched/drivers/xenomai/serial/mpc52xx_uart.c	2022-03-21 12:58:31.226871048 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A_io.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2011 Wolfgang Grandegger <wg@denx.de>.
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_DESCRIPTION("RTDM-based driver for MPC52xx UARTs");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+#define RT_MPC52XX_UART_DRVNAM	"xeno_mpc52xx_uart"
+
+#define IN_BUFFER_SIZE		512
+#define OUT_BUFFER_SIZE		512
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+
+struct rt_mpc52xx_uart_port {
+	const struct device *dev;
+	struct mpc52xx_psc __iomem *psc;
+	struct mpc52xx_psc_fifo __iomem *fifo;
+	unsigned int uartclk;
+	int irq;
+	int num;
+};
+
+struct rt_mpc52xx_uart_ctx {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+
+	int mcr_status;			/* emulated MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+
+	unsigned int imr_status;	/* interrupt mask register cache */
+	int tx_empty;			/* shift register empty flag */
+
+	struct rt_mpc52xx_uart_port *port; /* Port related data */
+};
+
+static const struct rtser_config default_config = {
+	.config_mask = 0xFFFF,
+	.baud_rate = RTSER_DEF_BAUD,
+	.parity = RTSER_DEF_PARITY,
+	.data_bits = RTSER_DEF_BITS,
+	.stop_bits = RTSER_DEF_STOPB,
+	.handshake = RTSER_DEF_HAND,
+	.fifo_depth = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout = RTSER_DEF_TIMEOUT,
+	.tx_timeout = RTSER_DEF_TIMEOUT,
+	.event_timeout = RTSER_DEF_TIMEOUT,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	.event_mask = RTSER_DEF_EVENT_MASK,
+	.rs485 = RTSER_DEF_RS485,
+};
+
+/* lookup table for matching device nodes to index numbers */
+static struct device_node *rt_mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
+
+static inline void psc_fifo_init(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	out_8(&ctx->port->fifo->rfcntl, 0x00);
+	out_be16(&ctx->port->fifo->rfalarm, 0x1ff);
+	out_8(&ctx->port->fifo->tfcntl, 0x07);
+	out_be16(&ctx->port->fifo->tfalarm, 0x80);
+}
+
+static inline int psc_raw_rx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_RXRDY;
+}
+
+static inline int psc_raw_tx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_TXRDY;
+}
+
+static inline int psc_rx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_isr) &
+		ctx->imr_status & MPC52xx_PSC_IMR_RXRDY;
+}
+
+static int psc_tx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_isr) &
+		ctx->imr_status & MPC52xx_PSC_IMR_TXRDY;
+}
+
+static inline int psc_tx_empty(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_TXEMP;
+}
+
+static inline void psc_start_tx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status |= MPC52xx_PSC_IMR_TXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_stop_tx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_TXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_stop_rx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_RXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_write_char(struct rt_mpc52xx_uart_ctx *ctx,
+				  unsigned char c)
+{
+	out_8(&ctx->port->psc->mpc52xx_psc_buffer_8, c);
+}
+
+static inline unsigned char psc_read_char(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_8(&ctx->port->psc->mpc52xx_psc_buffer_8);
+}
+
+static inline void psc_disable_ints(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status = 0;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static void psc_set_mcr(struct rt_mpc52xx_uart_ctx *ctx,
+			unsigned int mcr)
+{
+	if (mcr & RTSER_MCR_RTS)
+		out_8(&ctx->port->psc->op1, MPC52xx_PSC_OP_RTS);
+	else
+		out_8(&ctx->port->psc->op0, MPC52xx_PSC_OP_RTS);
+}
+
+/* FIXME: status interrupts not yet handled properly */
+static unsigned int psc_get_msr(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	unsigned int msr = RTSER_MSR_DSR;
+	u8 status = in_8(&ctx->port->psc->mpc52xx_psc_ipcr);
+
+	if (!(status & MPC52xx_PSC_CTS))
+		msr |= RTSER_MSR_CTS;
+	if (!(status & MPC52xx_PSC_DCD))
+		msr |= RTSER_MSR_DCD;
+
+	return msr;
+}
+
+static void psc_enable_ms(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	struct mpc52xx_psc *psc = ctx->port->psc;
+
+	/* clear D_*-bits by reading them */
+	in_8(&psc->mpc52xx_psc_ipcr);
+	/* enable CTS and DCD as IPC interrupts */
+	out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+	ctx->imr_status |= MPC52xx_PSC_IMR_IPC;
+	out_be16(&psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static void psc_disable_ms(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	struct mpc52xx_psc *psc = ctx->port->psc;
+
+	/* disable CTS and DCD as IPC interrupts */
+	out_8(&psc->mpc52xx_psc_acr, 0);
+
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_IPC;
+	out_be16(&psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static struct of_device_id mpc5200_gpio_ids[] = {
+	{ .compatible = "fsl,mpc5200-gpio", },
+	{ .compatible = "mpc5200-gpio", },
+	{}
+};
+
+static void rt_mpc52xx_uart_init_hw(struct rt_mpc52xx_uart_port *port)
+{
+	struct mpc52xx_gpio __iomem *gpio;
+	struct device_node *gpio_np;
+	u32 port_config;
+
+	if (port->num == 6) {
+		gpio_np = of_find_matching_node(NULL, mpc5200_gpio_ids);
+		gpio = of_iomap(gpio_np, 0);
+		of_node_put(gpio_np);
+		if (!gpio) {
+			dev_err(port->dev, "PSC%d port_config: "
+				"couldn't map gpio ids\n", port->num);
+			return;
+		}
+		port_config = in_be32(&gpio->port_config);
+		port_config &= 0xFF0FFFFF; /* port config for PSC6 */
+		port_config |= 0x00500000;
+		dev_dbg(port->dev, "PSC%d port_config: old:%x new:%x\n",
+			port->num, in_be32(&gpio->port_config), port_config);
+		out_be32(&gpio->port_config, port_config);
+		iounmap(gpio);
+	}
+}
+
+static inline void rt_mpc52xx_uart_put_char(struct rt_mpc52xx_uart_ctx *ctx,
+					    uint64_t *timestamp,
+					    unsigned char ch)
+{
+	ctx->in_buf[ctx->in_tail] = ch;
+	if (ctx->in_history)
+		ctx->in_history[ctx->in_tail] = *timestamp;
+	ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+	if (++ctx->in_npend > IN_BUFFER_SIZE) {
+		ctx->status |= RTSER_SOFT_OVERRUN_ERR;
+		ctx->in_npend--;
+	}
+}
+
+static inline int rt_mpc52xx_uart_rx_interrupt(struct rt_mpc52xx_uart_ctx *ctx,
+					       uint64_t *timestamp)
+{
+	int rbytes = 0;
+	int psc_status;
+
+	psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
+	while (psc_status & MPC52xx_PSC_SR_RXRDY) {
+		/* read input character */
+		rt_mpc52xx_uart_put_char(ctx, timestamp, psc_read_char(ctx));
+		rbytes++;
+
+		/* save new errors */
+		if (psc_status & (MPC52xx_PSC_SR_OE | MPC52xx_PSC_SR_PE |
+				  MPC52xx_PSC_SR_FE | MPC52xx_PSC_SR_RB)) {
+			if (psc_status & MPC52xx_PSC_SR_PE)
+				ctx->status |= RTSER_LSR_PARITY_ERR;
+			if (psc_status & MPC52xx_PSC_SR_FE)
+				ctx->status |= RTSER_LSR_FRAMING_ERR;
+			if (psc_status & MPC52xx_PSC_SR_RB)
+				ctx->status |= RTSER_LSR_BREAK_IND;
+
+			/*
+			 * Overrun is special, since it's reported
+			 * immediately, and doesn't affect the current
+			 * character.
+			 */
+			if (psc_status & MPC52xx_PSC_SR_OE) {
+				ctx->status |= RTSER_LSR_OVERRUN_ERR;
+				rt_mpc52xx_uart_put_char(ctx, timestamp, 0);
+				rbytes++;
+			}
+
+			/* Clear error condition */
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_RST_ERR_STAT);
+		}
+
+		psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
+	};
+
+	return rbytes;
+}
+
+static inline int rt_mpc52xx_uart_tx_interrupt(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	while (psc_raw_tx_rdy(ctx) && (ctx->out_npend > 0)) {
+		if (ctx->config.rs485 &&
+		    (ctx->mcr_status & RTSER_MCR_RTS) == 0) {
+			/* switch RTS */
+			ctx->mcr_status |= RTSER_MCR_RTS;
+			dev_dbg(ctx->port->dev, "Set RTS, mcr_status=%#x\n",
+				ctx->mcr_status);
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+		if (ctx->config.rs485 ||
+		    ((ctx->config.event_mask & RTSER_EVENT_TXEMPTY) &&
+		     (ctx->imr_status & MPC52xx_PSC_IMR_TXEMP) == 0)) {
+			/* enable tx-empty interrupt */
+			ctx->imr_status |= MPC52xx_PSC_IMR_TXEMP;
+			dev_dbg(ctx->port->dev, "Enable TXEMP interrupt, "
+				"imr_status=%#x\n", ctx->imr_status);
+			out_be16(&ctx->port->psc->mpc52xx_psc_imr,
+				 ctx->imr_status);
+		}
+
+		psc_write_char(ctx, ctx->out_buf[ctx->out_head++]);
+		ctx->out_head &= OUT_BUFFER_SIZE - 1;
+		ctx->out_npend--;
+	}
+
+	return ctx->out_npend;
+}
+
+static int rt_mpc52xx_uart_interrupt(rtdm_irq_t *irq_context)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	uint64_t timestamp = rtdm_clock_read();
+	int rbytes = 0;
+	int events = 0;
+	int ret = RTDM_IRQ_NONE;
+	int goon = 1;
+	int n;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_mpc52xx_uart_ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	while (goon) {
+		goon = 0;
+		if (psc_rx_rdy(ctx)) {
+			dev_dbg(ctx->port->dev, "RX interrupt\n");
+			n = rt_mpc52xx_uart_rx_interrupt(ctx, &timestamp);
+			if (n) {
+				rbytes += n;
+				events |= RTSER_EVENT_RXPEND;
+			}
+		}
+		if (psc_tx_rdy(ctx))
+			goon |= rt_mpc52xx_uart_tx_interrupt(ctx);
+
+		if (psc_tx_empty(ctx)) {
+			if (ctx->config.rs485 &&
+			    (ctx->mcr_status & RTSER_MCR_RTS)) {
+				/* reset RTS */
+				ctx->mcr_status &= ~RTSER_MCR_RTS;
+				dev_dbg(ctx->port->dev, "Reset RTS, "
+					"mcr_status=%#x\n", ctx->mcr_status);
+				psc_set_mcr(ctx, ctx->mcr_status);
+			}
+			/* disable tx-empty interrupt */
+			ctx->imr_status &= ~MPC52xx_PSC_IMR_TXEMP;
+			dev_dbg(ctx->port->dev, "Disable TXEMP interrupt, "
+				"imr_status=%#x\n", ctx->imr_status);
+			out_be16(&ctx->port->psc->mpc52xx_psc_imr,
+				 ctx->imr_status);
+
+			events |= RTSER_EVENT_TXEMPTY;
+			ctx->tx_empty = 1;
+		}
+
+		if (ctx->config.event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO)) {
+			u8 status = in_8(&ctx->port->psc->mpc52xx_psc_ipcr);
+
+			if (status & MPC52xx_PSC_D_DCD)
+				events |= (status & MPC52xx_PSC_DCD) ?
+					RTSER_EVENT_MODEMLO :
+					RTSER_EVENT_MODEMHI;
+			if (status & MPC52xx_PSC_D_CTS)
+				events |= (status & MPC52xx_PSC_CTS) ?
+					RTSER_EVENT_MODEMLO :
+					RTSER_EVENT_MODEMHI;
+			dev_dbg(ctx->port->dev, "Modem line changed, "
+				"events=%#x\n", events);
+		}
+
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else
+			ctx->in_nwait -= rbytes;
+	}
+
+	if (ctx->status)
+		events |= RTSER_EVENT_ERRPEND;
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->imr_status & MPC52xx_PSC_IMR_TXRDY) &&
+	    (ctx->out_npend == 0)) {
+		psc_stop_tx(ctx);
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	rtdm_lock_put(&ctx->lock);
+
+	return ret;
+}
+
+
+static int rt_mpc52xx_uart_set_config(struct rt_mpc52xx_uart_ctx *ctx,
+				      const struct rtser_config *config,
+				      uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	int err = 0;
+
+	/* make line configuration atomic and IRQ-safe */
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD)
+		ctx->config.baud_rate = config->baud_rate;
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+	if (config->config_mask & RTSER_SET_HANDSHAKE)
+		ctx->config.handshake = config->handshake;
+
+	if (config->config_mask & (RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS | RTSER_SET_STOP_BITS |
+				   RTSER_SET_BAUD | RTSER_SET_HANDSHAKE)) {
+		struct mpc52xx_psc *psc = ctx->port->psc;
+		unsigned char mr1 = 0, mr2 = 0;
+		unsigned int divisor;
+		u16 prescaler;
+
+		switch (ctx->config.data_bits) {
+		case RTSER_5_BITS:
+			mr1 |= MPC52xx_PSC_MODE_5_BITS;
+			break;
+		case RTSER_6_BITS:
+			mr1 |= MPC52xx_PSC_MODE_6_BITS;
+			break;
+		case RTSER_7_BITS:
+			mr1 |= MPC52xx_PSC_MODE_7_BITS;
+			break;
+		case RTSER_8_BITS:
+		default:
+			mr1 |= MPC52xx_PSC_MODE_8_BITS;
+			break;
+		}
+
+		switch (ctx->config.parity) {
+		case RTSER_ODD_PARITY:
+			mr1 |= MPC52xx_PSC_MODE_PARODD;
+			break;
+		case RTSER_EVEN_PARITY:
+			mr1 |= MPC52xx_PSC_MODE_PAREVEN;
+			break;
+		case RTSER_NO_PARITY:
+		default:
+			mr1 |= MPC52xx_PSC_MODE_PARNONE;
+			break;
+		}
+
+		if (ctx->config.stop_bits == RTSER_2_STOPB)
+			mr2 |= (ctx->config.data_bits == RTSER_5_BITS) ?
+				MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
+				MPC52xx_PSC_MODE_TWO_STOP;
+		else
+			mr2 |= MPC52xx_PSC_MODE_ONE_STOP;
+
+		if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
+			mr1 |= MPC52xx_PSC_MODE_RXRTS;
+			mr2 |= MPC52xx_PSC_MODE_TXCTS;
+		} else if (config->config_mask & RTSER_SET_HANDSHAKE) {
+			ctx->mcr_status =
+				RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+
+		/* Reset the TX & RX */
+		out_8(&psc->command, MPC52xx_PSC_RST_RX);
+		out_8(&psc->command, MPC52xx_PSC_RST_TX);
+
+		/* Send new mode settings */
+		out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+		out_8(&psc->mode, mr1);
+		out_8(&psc->mode, mr2);
+
+		/* Set baudrate */
+		divisor = (ctx->port->uartclk + 16 * ctx->config.baud_rate) /
+			(32 * ctx->config.baud_rate);
+		prescaler = 0xdd00;
+		out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
+		out_8(&psc->ctur, divisor >> 8);
+		out_8(&psc->ctlr, divisor & 0xff);
+
+		dev_info(ctx->port->dev,
+			 "mr1=%#x mr2=%#x baud=%d divisor=%d prescaler=%x\n",
+			 mr1, mr2, ctx->config.baud_rate, divisor, prescaler);
+
+		/* Reenable TX & RX */
+		out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
+		out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+
+		/* Enable RX */
+		ctx->imr_status |= MPC52xx_PSC_IMR_RXRDY;
+		out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+	}
+
+	if (config->config_mask & RTSER_SET_RS485) {
+		ctx->config.rs485 = config->rs485;
+		if (config->rs485) {
+			/* reset RTS */
+			ctx->mcr_status &= ~RTSER_MCR_RTS;
+			dev_dbg(ctx->port->dev, "Reset RTS, mcr_status=%#x\n",
+				ctx->mcr_status);
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	   care not to use and change timeouts at the same time. */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		/* change timestamp history atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		/* change event mask atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND) &&
+		    ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+
+		if ((config->event_mask & RTSER_EVENT_TXEMPTY) &&
+		    !ctx->out_npend && ctx->tx_empty)
+			ctx->ioc_events |= RTSER_EVENT_TXEMPTY;
+
+		if (config->event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			psc_enable_ms(ctx);
+		else
+			psc_disable_ms(ctx);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	return err;
+}
+
+void rt_mpc52xx_uart_cleanup_ctx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+static int rt_mpc52xx_uart_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	uint64_t *dummy;
+	int err;
+
+	ctx = rtdm_fd_to_private(fd);
+	ctx->port = (struct rt_mpc52xx_uart_port *)rtdm_fd_device(fd)->device_data;
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	psc_disable_ints(ctx);
+
+	/* Reset/activate the port, clear and enable interrupts */
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RST_RX);
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RST_TX);
+
+	out_be32(&ctx->port->psc->sicr, 0);	/* UART mode DCD ignored */
+
+	psc_fifo_init(ctx);
+
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_TX_ENABLE);
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RX_ENABLE);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rt_mpc52xx_uart_set_config(ctx, &default_config, &dummy);
+
+	err = rtdm_irq_request(&ctx->irq_handle, ctx->port->irq,
+			       rt_mpc52xx_uart_interrupt, 0,
+			       rtdm_fd_device(fd)->name, ctx);
+	if (err) {
+		psc_set_mcr(ctx, 0);
+		rt_mpc52xx_uart_cleanup_ctx(ctx);
+
+		return err;
+	}
+
+	return 0;
+}
+
+static void rt_mpc52xx_uart_close(struct rtdm_fd *fd)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	uint64_t *in_history;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* reset DTR and RTS */
+	psc_set_mcr(ctx, 0);
+
+	psc_disable_ints(ctx);
+
+	in_history = ctx->in_history;
+	ctx->in_history = NULL;
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	rt_mpc52xx_uart_cleanup_ctx(ctx);
+
+	kfree(in_history);
+}
+
+static int rt_mpc52xx_uart_ioctl(struct rtdm_fd *fd,
+				 unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_mpc52xx_uart_ctx *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &ctx->config,
+						     sizeof(struct
+							    rtser_config));
+		else
+			memcpy(arg, &ctx->config, sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err = rtdm_safe_copy_from_user(fd, &config_buf,
+						       arg,
+						       sizeof(struct
+							      rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			/*
+			 * Reflect the call to non-RT as we will likely
+			 * allocate or free the buffer.
+			 */
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+
+			if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_mpc52xx_uart_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+			status_buf.line_status = status;
+			status_buf.modem_status = psc_get_msr(ctx);
+
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &status_buf,
+						     sizeof(struct
+							    rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status = status;
+			((struct rtser_status *)arg)->modem_status =
+				psc_get_msr(ctx);
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &ctx->mcr_status,
+						     sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		if ((new_mcr & RTSER_MCR_RTS) != RTSER_MCR_RTS)
+			dev_warn(ctx->port->dev,
+				 "MCR: Only RTS is supported\n");
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr & RTSER_MCR_RTS;
+		psc_set_mcr(ctx, ctx->mcr_status);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &= ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+	      wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTSER_BREAK_SET)
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_START_BRK);
+		else
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_STOP_BRK);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+#ifdef ISREADY
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_mpc52xx_uart_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_mpc52xx_uart_reg_out(mode, base, FCR, fcr);
+			rt_mpc52xx_uart_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+#endif
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+static ssize_t rt_mpc52xx_uart_read(struct rtdm_fd *fd, void *buf,
+				    size_t nbyte)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				   separately. */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			if ((ctx->in_npend -= block) == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			   non-blocking call or contains the error
+			   returned by rtdm_event_wait[_until] */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				   before exit. */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = read;
+
+	return ret;
+}
+
+static ssize_t rt_mpc52xx_uart_write(struct rtdm_fd *fd,
+				     const void *buf,
+				     size_t nbyte)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.rx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				   end separately. */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			/* Mark shift register not empty */
+			ctx->ioc_events &= ~RTSER_EVENT_TXEMPTY;
+			ctx->tx_empty = 0;
+
+			psc_start_tx(ctx);
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->out_event,
+					   ctx->config.tx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+			if (ret == -EWOULDBLOCK) {
+				/* Fix error code for non-blocking mode. */
+				ret = -EAGAIN;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver mpc52xx_uart_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(imx_uart,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_count		= MPC52xx_PSC_MAXNUM,
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.context_size		= sizeof(struct rt_mpc52xx_uart_ctx),
+	.ops = {
+		.open		= rt_mpc52xx_uart_open,
+		.close		= rt_mpc52xx_uart_close,
+		.ioctl_rt	= rt_mpc52xx_uart_ioctl,
+		.ioctl_nrt	= rt_mpc52xx_uart_ioctl,
+		.read_rt	= rt_mpc52xx_uart_read,
+		.write_rt	= rt_mpc52xx_uart_write,
+	},
+};
+
+static int rt_mpc52xx_uart_of_probe(struct platform_device *op)
+{
+	struct rt_mpc52xx_uart_port *port;
+	struct rtdm_device *dev;
+	struct resource res;
+	int ret, idx;
+
+	dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p)\n", op);
+
+	/* Check validity & presence */
+	for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
+		if (rt_mpc52xx_uart_nodes[idx] == op->dev.of_node)
+			break;
+	if (idx >= MPC52xx_PSC_MAXNUM)
+		return -EINVAL;
+
+	port = kmalloc(sizeof(*port), GFP_KERNEL);
+	if (!port) {
+		dev_err(&op->dev, "Could allocate port space\n");
+		return -ENOMEM;
+	}
+	port->dev = &op->dev;
+
+	/*
+	 * Set the uart clock to the input clock of the psc, the different
+	 * prescalers are taken into account in the set_baudrate() methods
+	 * of the respective chip
+	 */
+	port->uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+	if (port->uartclk == 0) {
+		dev_err(&op->dev, "Could not find uart clock frequency\n");
+		ret = -EINVAL;
+		goto out_kfree_port;
+	}
+
+	/* Fetch register locations */
+	ret = of_address_to_resource(op->dev.of_node, 0, &res);
+	if (ret) {
+		dev_err(&op->dev, "Could not get resources\n");
+		goto out_kfree_port;
+	}
+	port->num = ((res.start >> 8) & 0xf) / 2;
+	if (port->num < 6)
+		port->num++;
+
+	if (!request_mem_region(res.start, resource_size(&res),
+				RT_MPC52XX_UART_DRVNAM)) {
+		ret = -EBUSY;
+		goto out_kfree_port;
+	}
+
+	port->psc = ioremap(res.start, resource_size(&res));
+	if (!port->psc) {
+		dev_err(&op->dev, "Could not map PSC registers\n");
+		ret = -ENOMEM;
+		goto out_release_mem_region;
+	}
+	port->fifo = (struct mpc52xx_psc_fifo __iomem *)(port->psc + 1);
+
+	port->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+	if (port->irq <= 0) {
+		dev_err(&op->dev, "Could not get irq\n");
+		ret = -ENODEV;
+		goto out_iounmap;
+	}
+
+	dev = kmalloc(sizeof(struct rtdm_device), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&op->dev, "Could allocate device context\n");
+		ret = -ENOMEM;
+		goto out_dispose_irq_mapping;
+	}
+
+	dev->driver = &mpc52xx_uart_driver;
+	dev->label = "rtserPSC%d";
+	dev->device_data = port;
+
+	rt_mpc52xx_uart_init_hw(port);
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		goto out_kfree_dev;
+
+	dev_set_drvdata(&op->dev, dev);
+
+	dev_info(&op->dev, "%s on PSC%d at 0x%p, irq=%d, clk=%i\n",
+		 dev->name, port->num, port->psc, port->irq,
+		 port->uartclk);
+
+	return 0;
+
+out_kfree_dev:
+	kfree(dev);
+out_dispose_irq_mapping:
+	irq_dispose_mapping(port->irq);
+out_iounmap:
+	iounmap(port->psc);
+out_release_mem_region:
+	release_mem_region(res.start, resource_size(&res));
+out_kfree_port:
+	kfree(port);
+
+	return ret;
+}
+
+static int rt_mpc52xx_uart_of_remove(struct platform_device *op)
+{
+	struct rtdm_device *dev = dev_get_drvdata(&op->dev);
+	struct rt_mpc52xx_uart_port *port = dev->device_data;
+	struct resource res;
+
+	dev_set_drvdata(&op->dev, NULL);
+
+	rtdm_dev_unregister(dev);
+	irq_dispose_mapping(port->irq);
+	iounmap(port->psc);
+	if (!of_address_to_resource(op->dev.of_node, 0, &res))
+		release_mem_region(res.start, resource_size(&res));
+	kfree(port);
+	kfree(dev);
+
+	return 0;
+}
+
+static struct of_device_id rt_mpc52xx_uart_of_match[] = {
+	{ .compatible = "fsl,mpc5200b-psc-uart", },
+	{ .compatible = "fsl,mpc5200-psc-uart", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, rt_mpc52xx_uart_of_match);
+
+static struct platform_driver rt_mpc52xx_uart_of_driver = {
+	.probe = rt_mpc52xx_uart_of_probe,
+	.remove	=  rt_mpc52xx_uart_of_remove,
+	.driver = {
+		.name = "rt-mpc52xx-psc-uart",
+		.owner = THIS_MODULE,
+		.of_match_table = rt_mpc52xx_uart_of_match,
+	},
+};
+
+static void rt_mpc52xx_uart_of_enumerate(void)
+{
+	struct device_node *np;
+	int idx = 0;
+
+	/* Assign index to each PSC in device tree line the linux driver does */
+	for_each_matching_node(np, rt_mpc52xx_uart_of_match) {
+		of_node_get(np);
+		rt_mpc52xx_uart_nodes[idx] = np;
+		idx++;
+	}
+}
+
+static int __init rt_mpc52xx_uart_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	printk(KERN_INFO "RTserial: MPC52xx PSC UART driver\n");
+
+	rt_mpc52xx_uart_of_enumerate();
+
+	ret = platform_driver_register(&rt_mpc52xx_uart_of_driver);
+	if (ret) {
+		printk(KERN_ERR
+		       "%s; Could not register  driver (err=%d)\n",
+		       __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit rt_mpc52xx_uart_exit(void)
+{
+	platform_driver_unregister(&rt_mpc52xx_uart_of_driver);
+}
+
+module_init(rt_mpc52xx_uart_init);
+module_exit(rt_mpc52xx_uart_exit);
+++ linux-patched/drivers/xenomai/serial/16550A_io.h	2022-03-21 12:58:31.219871116 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Manages the I/O access method of the driver. */
+
+typedef enum { MODE_PIO, MODE_MMIO } io_mode_t;
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PIO) || \
+    defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+static unsigned long io[MAX_DEVICES];
+module_param_array(io, ulong, NULL, 0400);
+MODULE_PARM_DESC(io, "I/O port addresses of the serial devices");
+#endif /* CONFIG_XENO_DRIVERS_16550A_PIO || CONFIG_XENO_DRIVERS_16550A_ANY */
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_MMIO) || \
+    defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+static unsigned long mem[MAX_DEVICES];
+static void *mapped_io[MAX_DEVICES];
+module_param_array(mem, ulong, NULL, 0400);
+MODULE_PARM_DESC(mem, "I/O memory addresses of the serial devices");
+#endif /* CONFIG_XENO_DRIVERS_16550A_MMIO || CONFIG_XENO_DRIVERS_16550A_ANY */
+
+#ifdef CONFIG_XENO_DRIVERS_16550A_PIO
+
+#define RT_16550_IO_INLINE inline
+
+extern void *mapped_io[]; /* dummy */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return io[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return 1;
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return MODE_PIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return MODE_PIO;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	ctx->base_addr = io[dev_id];
+}
+
+#elif defined(CONFIG_XENO_DRIVERS_16550A_MMIO)
+
+#define RT_16550_IO_INLINE inline
+
+extern unsigned long io[]; /* dummy */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return mem[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return 1;
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return (unsigned long)mapped_io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return MODE_MMIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return MODE_MMIO;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	ctx->base_addr = (unsigned long)mapped_io[dev_id];
+}
+
+#elif defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+
+#define RT_16550_IO_INLINE /* uninline */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return (io[dev_id]) ? io[dev_id] : mem[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return !(io[dev_id] && mem[dev_id]);
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return (io[dev_id]) ? io[dev_id] : (unsigned long)mapped_io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return (io[dev_id]) ? MODE_PIO : MODE_MMIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return ctx->io_mode;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	if (io[dev_id]) {
+		ctx->base_addr = io[dev_id];
+		ctx->io_mode   = MODE_PIO;
+	} else {
+		ctx->base_addr = (unsigned long)mapped_io[dev_id];
+		ctx->io_mode   = MODE_MMIO;
+	}
+}
+
+#else
+# error Unsupported I/O access method
+#endif
+
+static RT_16550_IO_INLINE u8
+rt_16550_reg_in(io_mode_t io_mode, unsigned long base, int off)
+{
+	switch (io_mode) {
+	case MODE_PIO:
+		return inb(base + off);
+	default: /* MODE_MMIO */
+		return readb((void *)base + off);
+	}
+}
+
+static RT_16550_IO_INLINE void
+rt_16550_reg_out(io_mode_t io_mode, unsigned long base, int off, u8 val)
+{
+	switch (io_mode) {
+	case MODE_PIO:
+		outb(val, base + off);
+		break;
+	case MODE_MMIO:
+		writeb(val, (void *)base + off);
+		break;
+	}
+}
+
+static int rt_16550_init_io(int dev_id, char* name)
+{
+	switch (rt_16550_io_mode(dev_id)) {
+	case MODE_PIO:
+		if (!request_region(rt_16550_addr_param(dev_id), 8, name))
+			return -EBUSY;
+		break;
+	case MODE_MMIO:
+		mapped_io[dev_id] = ioremap(rt_16550_addr_param(dev_id), 8);
+		if (!mapped_io[dev_id])
+			return -EBUSY;
+		break;
+	}
+	return 0;
+}
+
+static void rt_16550_release_io(int dev_id)
+{
+	switch (rt_16550_io_mode(dev_id)) {
+	case MODE_PIO:
+		release_region(io[dev_id], 8);
+		break;
+	case MODE_MMIO:
+		iounmap(mapped_io[dev_id]);
+		break;
+	}
+}
+++ linux-patched/drivers/xenomai/serial/Makefile	2022-03-21 12:58:31.212871185 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/transfer.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENO_DRIVERS_16550A) += xeno_16550A.o
+obj-$(CONFIG_XENO_DRIVERS_MPC52XX_UART) += xeno_mpc52xx_uart.o
+obj-$(CONFIG_XENO_DRIVERS_IMX_UART) += xeno_imx_uart.o
+
+xeno_16550A-y := 16550A.o
+xeno_mpc52xx_uart-y := mpc52xx_uart.o
+xeno_imx_uart-y := rt_imx_uart.o
+++ linux-patched/drivers/xenomai/analogy/transfer.c	2022-03-21 12:58:31.204871263 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+/* --- Initialization / cleanup / cancel functions --- */
+
+int a4l_precleanup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev;
+	struct a4l_transfer *tsf;
+	int i, err = 0;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	if (tsf == NULL) {
+		__a4l_err("a4l_precleanup_transfer: "
+			  "incoherent status, transfer block not reachable\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < tsf->nb_subd; i++) {
+		unsigned long *status = &tsf->subds[i]->status;
+
+		__a4l_dbg(1, core_dbg, "subd[%d]->status=0x%08lx\n", i, *status);
+
+		if (test_and_set_bit(A4L_SUBD_BUSY, status)) {
+			__a4l_err("a4l_precleanup_transfer: "
+				  "device busy, acquisition occuring\n");
+			err = -EBUSY;
+			goto out_error;
+		} else
+			set_bit(A4L_SUBD_CLEAN, status);
+	}
+
+	return 0;
+
+out_error:
+	for (i = 0; i < tsf->nb_subd; i++) {
+		unsigned long *status = &tsf->subds[i]->status;
+
+		if (test_bit(A4L_SUBD_CLEAN, status)){
+			clear_bit(A4L_SUBD_BUSY, status);
+			clear_bit(A4L_SUBD_CLEAN, status);
+		}
+	}
+
+	return err;
+}
+
+int a4l_cleanup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev;
+	struct a4l_transfer *tsf;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Releases the pointers tab, if need be */
+	if (tsf->subds != NULL) {
+		rtdm_free(tsf->subds);
+	}
+
+	memset(tsf, 0, sizeof(struct a4l_transfer));
+
+	return 0;
+}
+
+void a4l_presetup_transfer(struct a4l_device_context *cxt)
+{
+	struct a4l_device *dev = NULL;
+	struct a4l_transfer *tsf;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Clear the structure */
+	memset(tsf, 0, sizeof(struct a4l_transfer));
+
+	tsf->default_bufsize = A4L_BUF_DEFSIZE;
+
+	/* 0 is also considered as a valid IRQ, then
+	   the IRQ number must be initialized with another value */
+	tsf->irq_desc.irq = A4L_IRQ_UNUSED;
+}
+
+int a4l_setup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = NULL;
+	struct a4l_transfer *tsf;
+	struct list_head *this;
+	int i = 0, ret = 0;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Recovers the subdevices count
+	   (as they are registered in a linked list */
+	list_for_each(this, &dev->subdvsq) {
+		tsf->nb_subd++;
+	}
+
+	__a4l_dbg(1, core_dbg, "nb_subd=%d\n", tsf->nb_subd);
+
+	/* Allocates a suitable tab for the subdevices */
+	tsf->subds = rtdm_malloc(tsf->nb_subd * sizeof(struct a4l_subdevice *));
+	if (tsf->subds == NULL) {
+		__a4l_err("a4l_setup_transfer: call1(alloc) failed \n");
+		ret = -ENOMEM;
+		goto out_setup_tsf;
+	}
+
+	/* Recovers the subdevices pointers */
+	list_for_each(this, &dev->subdvsq) {
+		tsf->subds[i++] = list_entry(this, struct a4l_subdevice, list);
+	}
+
+out_setup_tsf:
+
+	if (ret != 0)
+		a4l_cleanup_transfer(cxt);
+
+	return ret;
+}
+
+/* --- IRQ handling section --- */
+
+int a4l_request_irq(struct a4l_device * dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie)
+{
+	int ret;
+
+	if (dev->transfer.irq_desc.irq != A4L_IRQ_UNUSED)
+		return -EBUSY;
+
+	ret = __a4l_request_irq(&dev->transfer.irq_desc, irq, handler, flags,
+		cookie);
+	if (ret != 0) {
+		__a4l_err("a4l_request_irq: IRQ registration failed\n");
+		dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+	}
+
+	return ret;
+}
+
+int a4l_free_irq(struct a4l_device * dev, unsigned int irq)
+{
+
+	int ret = 0;
+
+	if (dev->transfer.irq_desc.irq != irq)
+		return -EINVAL;
+
+	/* There is less need to use a spinlock
+	   than for a4l_request_irq() */
+	ret = __a4l_free_irq(&dev->transfer.irq_desc);
+
+	if (ret == 0)
+		dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+
+	return ret;
+}
+
+unsigned int a4l_get_irq(struct a4l_device * dev)
+{
+	return dev->transfer.irq_desc.irq;
+}
+
+/* --- Proc section --- */
+
+#ifdef CONFIG_PROC_FS
+
+int a4l_rdproc_transfer(struct seq_file *seq, void *v)
+{
+	struct a4l_transfer *transfer = (struct a4l_transfer *) seq->private;
+	int i;
+
+	if (v != SEQ_START_TOKEN)
+		return -EINVAL;
+
+	seq_printf(seq, "--  Subdevices --\n\n");
+	seq_printf(seq, "| idx | type\n");
+
+	/* Gives the subdevice type's name */
+	for (i = 0; i < transfer->nb_subd; i++) {
+		char *type;
+		switch (transfer->subds[i]->flags & A4L_SUBD_TYPES) {
+		case A4L_SUBD_UNUSED:
+			type = "Unused subdevice";
+			break;
+		case A4L_SUBD_AI:
+			type = "Analog input subdevice";
+			break;
+		case A4L_SUBD_AO:
+			type = "Analog output subdevice";
+			break;
+		case A4L_SUBD_DI:
+			type = "Digital input subdevice";
+			break;
+		case A4L_SUBD_DO:
+			type = "Digital output subdevice";
+			break;
+		case A4L_SUBD_DIO:
+			type = "Digital input/output subdevice";
+			break;
+		case A4L_SUBD_COUNTER:
+			type = "Counter subdevice";
+			break;
+		case A4L_SUBD_TIMER:
+			type = "Timer subdevice";
+			break;
+		case A4L_SUBD_MEMORY:
+			type = "Memory subdevice";
+			break;
+		case A4L_SUBD_CALIB:
+			type = "Calibration subdevice";
+			break;
+		case A4L_SUBD_PROC:
+			type = "Processor subdevice";
+			break;
+		case A4L_SUBD_SERIAL:
+			type = "Serial subdevice";
+			break;
+		default:
+			type = "Unknown subdevice";
+		}
+
+		seq_printf(seq, "|  %02d | %s\n", i, type);
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
+++ linux-patched/drivers/xenomai/analogy/Kconfig	2022-03-21 12:58:31.197871331 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/fake.c	1970-01-01 01:00:00.000000000 +0100
+menu "ANALOGY drivers"
+
+config XENO_DRIVERS_ANALOGY
+	tristate "ANALOGY interface"
+	help
+
+	ANALOGY is a framework aimed at supporting data acquisition
+	devices.
+
+config XENO_DRIVERS_ANALOGY_DEBUG
+       depends on XENO_DRIVERS_ANALOGY
+       bool "Analogy debug trace"
+       default n
+       help
+
+       Enable debugging traces in Analogy so as to monitor Analogy's
+       core and drivers behaviours.
+
+config XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       bool "Analogy debug ftrace"
+       default n
+       help
+
+       Route the Analogy a4l_dbg and a4l_info statements to /sys/kernel/debug/
+
+config XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       int "Analogy core debug level threshold"
+       default 0
+       help
+
+       Define the level above which the debugging traces will not be
+       displayed.
+
+       WARNING: this threshold is only applied on the Analogy
+       core. That will not affect the driver.
+
+config XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       int "Analogy driver debug level threshold"
+       default 0
+       help
+
+       Define the level above which the debugging traces will not be
+       displayed.
+
+       WARNING: this threshold is only applied on the Analogy
+       driver. That will not affect the core.
+
+source "drivers/xenomai/analogy/testing/Kconfig"
+source "drivers/xenomai/analogy/intel/Kconfig"
+source "drivers/xenomai/analogy/national_instruments/Kconfig"
+source "drivers/xenomai/analogy/sensoray/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/analogy/testing/fake.c	2022-03-21 12:58:31.190871399 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/Kconfig	1970-01-01 01:00:00.000000000 +0100
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#define TASK_PERIOD 1000000
+
+#define AI_SUBD 0
+#define DIO_SUBD 1
+#define AO_SUBD 2
+#define AI2_SUBD 3
+
+#define TRANSFER_SIZE 0x1000
+
+/* --- Driver related structures --- */
+struct fake_priv {
+	/* Attach configuration parameters
+	   (they should be relocated in ai_priv) */
+	unsigned long amplitude_div;
+	unsigned long quanta_cnt;
+
+	/* Task descriptor */
+	rtdm_task_t task;
+
+	/* Statuses of the asynchronous subdevices */
+	int ai_running;
+	int ao_running;
+	int ai2_running;
+};
+
+struct ai_priv {
+
+	/* Specific timing fields */
+	unsigned long scan_period_ns;
+	unsigned long convert_period_ns;
+	unsigned long current_ns;
+	unsigned long reminder_ns;
+	unsigned long long last_ns;
+
+	/* Misc fields */
+	unsigned long amplitude_div;
+	unsigned long quanta_cnt;
+};
+
+struct ao_ai2_priv {
+	/* Asynchronous loop stuff */
+	uint8_t buffer[TRANSFER_SIZE];
+	int count;
+	/* Synchronous loop stuff */
+	uint16_t insn_value;
+};
+
+struct dio_priv {
+	/* Bits status */
+	uint16_t bits_values;
+};
+
+/* --- Channels / ranges part --- */
+
+/* Channels descriptors */
+
+static struct a4l_channels_desc analog_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 16},
+	},
+};
+
+static struct a4l_channels_desc dio_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 16,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+/* Ranges tab */
+static struct a4l_rngtab analog_rngtab = {
+	.length = 2,
+	.rngs = {
+		RANGE_V(-5,5),
+		RANGE_V(-10,10),
+	},
+};
+/* Ranges descriptor */
+static struct a4l_rngdesc analog_rngdesc = RNG_GLOBAL(analog_rngtab);
+
+/* Command options masks */
+
+static struct a4l_cmd_desc ai_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+static struct a4l_cmd_desc ao_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+/* --- Analog input simulation --- */
+
+/* --- Values generation for 1st AI --- */
+
+static inline uint16_t ai_value_output(struct ai_priv *priv)
+{
+	static uint16_t output_tab[8] = {
+		0x0001, 0x2000, 0x4000, 0x6000,
+		0x8000, 0xa000, 0xc000, 0xffff
+	};
+	static unsigned int output_idx;
+	static DEFINE_RTDM_LOCK(output_lock);
+
+	unsigned long flags;
+	unsigned int idx;
+
+	rtdm_lock_get_irqsave(&output_lock, flags);
+
+	output_idx += priv->quanta_cnt;
+	if(output_idx == 8)
+		output_idx = 0;
+	idx = output_idx;
+
+	rtdm_lock_put_irqrestore(&output_lock, flags);
+
+	return output_tab[idx] / priv->amplitude_div;
+}
+
+int ai_push_values(struct a4l_subdevice *subd)
+{
+	uint64_t now_ns, elapsed_ns = 0;
+	struct a4l_cmd_desc *cmd;
+	struct ai_priv *priv;
+	int i = 0;
+
+	if (!subd)
+		return -EINVAL;
+
+	priv = (struct ai_priv *)subd->priv;
+
+	cmd = a4l_get_cmd(subd);
+	if (!cmd)
+		return -EPIPE;
+
+	now_ns = a4l_get_time();
+	elapsed_ns += now_ns - priv->last_ns + priv->reminder_ns;
+	priv->last_ns = now_ns;
+
+	while(elapsed_ns >= priv->scan_period_ns) {
+		int j;
+
+		for(j = 0; j < cmd->nb_chan; j++) {
+			uint16_t value = ai_value_output(priv);
+			a4l_buf_put(subd, &value, sizeof(uint16_t));
+		}
+
+		elapsed_ns -= priv->scan_period_ns;
+		i++;
+	}
+
+	priv->current_ns += i * priv->scan_period_ns;
+	priv->reminder_ns = elapsed_ns;
+
+	if (i != 0)
+		a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+/* --- Data retrieval for AO --- */
+
+int ao_pull_values(struct a4l_subdevice *subd)
+{
+	struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv;
+	int err;
+
+	/* Let's have a look at how many samples are available */
+	priv->count = a4l_buf_count(subd) < TRANSFER_SIZE ?
+		      a4l_buf_count(subd) : TRANSFER_SIZE;
+
+	if (!priv->count)
+		return 0;
+
+	err = a4l_buf_get(subd, priv->buffer, priv->count);
+	if (err < 0) {
+		a4l_err(subd->dev, "ao_get_values: a4l_buf_get failed (err=%d)\n", err);
+		priv->count = 0;
+		return err;
+
+	}
+
+	a4l_info(subd->dev, " %d bytes added to private buffer from async p=%p\n",
+		priv->count, subd->buf->buf);
+
+	a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+/* --- Data redirection for 2nd AI (from AO) --- */
+
+int ai2_push_values(struct a4l_subdevice *subd)
+{
+	struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv);
+	int err = 0;
+
+	if (priv->count) {
+		err = a4l_buf_put(subd, priv->buffer, priv->count);
+
+		/* If there is no more place in the asynchronous
+		buffer, data are likely to be dropped; it is just a
+		test driver so no need to implement trickier mechanism */
+		err = (err == -EAGAIN) ? 0 : err;
+
+		a4l_info(subd->dev, "%d bytes added to async buffer p=%p\n",
+			priv->count, subd->buf->buf);
+
+		priv->count = 0;
+		if (err < 0)
+			a4l_err(subd->dev,
+				"ai2_push_values: "
+				"a4l_buf_put failed (err=%d)\n", err);
+		else
+			a4l_buf_evt(subd, 0);
+	}
+
+	return err;
+}
+
+/* --- Asynchronous AI functions --- */
+
+static int ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ai_priv *ai_priv = (struct ai_priv *)subd->priv;
+
+	ai_priv->scan_period_ns = cmd->scan_begin_arg;
+	ai_priv->convert_period_ns = (cmd->convert_src==TRIG_TIMER)?
+		cmd->convert_arg:0;
+
+	a4l_dbg(1, drv_dbg, subd->dev, "scan_period=%luns convert_period=%luns\n",
+		ai_priv->scan_period_ns, ai_priv->convert_period_ns);
+
+	ai_priv->last_ns = a4l_get_time();
+
+	ai_priv->current_ns = ((unsigned long)ai_priv->last_ns);
+	ai_priv->reminder_ns = 0;
+
+	priv->ai_running = 1;
+
+	return 0;
+
+}
+
+static int ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	if(cmd->scan_begin_src == TRIG_TIMER)
+	{
+		if (cmd->scan_begin_arg < 1000)
+			return -EINVAL;
+
+		if (cmd->convert_src == TRIG_TIMER &&
+		    cmd->scan_begin_arg < (cmd->convert_arg * cmd->nb_chan))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ai_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	priv->ai_running = 0;
+}
+
+static void ai_munge(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	int i;
+
+	for(i = 0; i < size / sizeof(uint16_t); i++)
+		((uint16_t *)buf)[i] += 1;
+}
+
+/* --- Asynchronous A0 functions --- */
+
+int ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	return 0;
+}
+
+int ao_trigger(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ao_running = 1;
+	return 0;
+}
+
+void ao_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ao_ai2_priv *ao_priv = (struct ao_ai2_priv *)subd->priv;
+	int running;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ao_running = 0;
+
+	running = priv->ai2_running;
+	if (running) {
+		struct a4l_subdevice *ai2_subd =
+			(struct a4l_subdevice *)a4l_get_subd(subd->dev, AI2_SUBD);
+		/* Here, we have not saved the required amount of
+		   data; so, we cannot know whether or not, it is the
+		   end of the acquisition; that is why we force it */
+		priv->ai2_running = 0;
+		ao_priv->count = 0;
+
+		a4l_info(subd->dev, "subd %d cancelling subd %d too \n",
+			subd->idx, AI2_SUBD);
+
+		a4l_buf_evt(ai2_subd, A4L_BUF_EOA);
+	}
+}
+
+/* --- Asynchronous 2nd AI functions --- */
+
+int ai2_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ai2_running = 1;
+	return 0;
+}
+
+void ai2_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ao_ai2_priv *ai2_priv = *((struct ao_ai2_priv **)subd->priv);
+
+	int running;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ai2_running = 0;
+
+	running = priv->ao_running;
+	if (running) {
+		struct a4l_subdevice *ao_subd =
+			(struct a4l_subdevice *)a4l_get_subd(subd->dev, AO_SUBD);
+		/* Here, we have not saved the required amount of
+		   data; so, we cannot know whether or not, it is the
+		   end of the acquisition; that is why we force it */
+		priv->ao_running = 0;
+		ai2_priv->count = 0;
+
+		a4l_info(subd->dev, "subd %d cancelling subd %d too \n",
+			 subd->idx, AO_SUBD);
+
+		a4l_buf_evt(ao_subd, A4L_BUF_EOA);
+	}
+
+}
+
+
+/* --- Synchronous AI functions --- */
+
+static int ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ai_priv *priv = (struct ai_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+
+	for(i = 0; i < insn->data_size / sizeof(uint16_t); i++)
+		data[i] = ai_value_output(priv);
+
+	return 0;
+}
+
+/* --- Synchronous DIO function --- */
+
+static int dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct dio_priv *priv = (struct dio_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(uint16_t))
+		return -EINVAL;
+
+	if (data[0] != 0) {
+		priv->bits_values &= ~(data[0]);
+		priv->bits_values |= (data[0] & data[1]);
+	}
+
+	data[1] = priv->bits_values;
+
+	return 0;
+}
+
+/* --- Synchronous AO + AI2 functions --- */
+
+int ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Retrieves the value to memorize */
+	priv->insn_value = data[0];
+
+	return 0;
+}
+
+int ai2_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv);
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Sets the memorized value */
+	data[0] = priv->insn_value;
+
+	return 0;
+}
+
+/* --- Global task part --- */
+
+/* One task is enough for all the asynchronous subdevices, it is just a fake
+ * driver after all
+ */
+static void task_proc(void *arg)
+{
+	struct a4l_subdevice *ai_subd, *ao_subd, *ai2_subd;
+	struct a4l_device *dev;
+	struct fake_priv *priv;
+	int running;
+
+	dev = arg;
+	ai_subd = a4l_get_subd(dev, AI_SUBD);
+	ao_subd = a4l_get_subd(dev, AO_SUBD);
+	ai2_subd = a4l_get_subd(dev, AI2_SUBD);
+
+	priv = dev->priv;
+
+	while(!rtdm_task_should_stop()) {
+
+		/* copy sample static data from the subd private buffer to the
+		 * asynchronous buffer
+		 */
+		running = priv->ai_running;
+		if (running && ai_push_values(ai_subd) < 0) {
+			/* on error, wait for detach to destroy the task */
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		/*
+		 * pull the data from the output subdevice (asynchronous buffer)
+		 * into its private buffer
+		 */
+		running = priv->ao_running;
+		if (running && ao_pull_values(ao_subd) < 0) {
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		running = priv->ai2_running;
+		/*
+		 * then loop it to the ai2 subd since their private data is shared: so
+		 * pull the data from the private buffer back into the device's
+		 * asynchronous buffer
+		 */
+		if (running && ai2_push_values(ai2_subd) < 0) {
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		rtdm_task_sleep(TASK_PERIOD);
+	}
+}
+
+/* --- Initialization functions --- */
+
+void setup_ai_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ai_cmd;
+	subd->do_cmdtest = ai_cmdtest;
+	subd->cancel = ai_cancel;
+	subd->munge = ai_munge;
+	subd->cmd_mask = &ai_cmd_mask;
+	subd->insn_read = ai_insn_read;
+}
+
+void setup_dio_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_DIO;
+	subd->chan_desc = &dio_chandesc;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = dio_insn_bits;
+}
+
+void setup_ao_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ao_cmd;
+	subd->cancel = ao_cancel;
+	subd->trigger = ao_trigger;
+	subd->cmd_mask = &ao_cmd_mask;
+	subd->insn_write = ao_insn_write;
+}
+
+void setup_ai2_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ai2_cmd;
+	subd->cancel = ai2_cancel;
+	subd->cmd_mask = &ai_cmd_mask;
+	subd->insn_read = ai2_insn_read;
+}
+
+/* --- Attach / detach functions ---  */
+
+int test_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	typedef void (*setup_subd_function) (struct a4l_subdevice *subd);
+	struct fake_priv *priv = (struct fake_priv *) dev->priv;
+	struct a4l_subdevice *subd;
+	unsigned long tmp;
+	struct ai_priv *r;
+	int i, ret = 0;
+
+	struct initializers {
+		struct a4l_subdevice *subd;
+		setup_subd_function init;
+		int private_len;
+		char *name;
+		int index;
+	} sds[] = {
+		[AI_SUBD] = {
+			.name = "AI",
+			.private_len = sizeof(struct ai_priv),
+			.init = setup_ai_subd,
+			.index = AI_SUBD,
+			.subd = NULL,
+		},
+		[DIO_SUBD] = {
+			.name = "DIO",
+			.private_len = sizeof(struct dio_priv),
+			.init = setup_dio_subd,
+			.index = DIO_SUBD,
+			.subd = NULL,
+		},
+		[AO_SUBD] = {
+			.name = "AO",
+			.private_len = sizeof(struct ao_ai2_priv),
+			.init = setup_ao_subd,
+			.index = AO_SUBD,
+			.subd = NULL,
+		},
+		[AI2_SUBD] = {
+			.name = "AI2",
+			.private_len = sizeof(struct ao_ai2_priv *),
+			.init = setup_ai2_subd,
+			.index = AI2_SUBD,
+			.subd = NULL,
+		},
+	};
+
+	a4l_dbg(1, drv_dbg, dev, "starting attach procedure...\n");
+
+	/* Set default values for attach parameters */
+	priv->amplitude_div = 1;
+	priv->quanta_cnt = 1;
+	if (arg->opts_size) {
+		unsigned long *args = (unsigned long *)arg->opts;
+		priv->amplitude_div = args[0];
+		if (arg->opts_size == 2 * sizeof(unsigned long))
+			priv->quanta_cnt = (args[1] > 7 || args[1] == 0) ?
+				1 : args[1];
+	}
+
+	/* create and register the subdevices */
+	for (i = 0; i < ARRAY_SIZE(sds) ; i++) {
+
+		subd = a4l_alloc_subd(sds[i].private_len, sds[i].init);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		ret = a4l_add_subd(dev, subd);
+		if (ret != sds[i].index)
+			return (ret < 0) ? ret : -EINVAL;
+
+		sds[i].subd = subd;
+
+		a4l_dbg(1, drv_dbg, dev, " %s subdev registered \n", sds[i].name);
+	}
+
+	/* initialize specifics */
+	r = (void *) sds[AI_SUBD].subd->priv;
+	r->amplitude_div = priv->amplitude_div;
+	r->quanta_cnt = priv->quanta_cnt;
+
+	/* A0 and AI2 shared their private buffers */
+	tmp = (unsigned long) sds[AO_SUBD].subd->priv;
+	memcpy(sds[AI2_SUBD].subd->priv, &tmp, sds[AI2_SUBD].private_len) ;
+
+	/* create the task */
+	ret = rtdm_task_init(&priv->task, "Fake AI task", task_proc, dev,
+		             RTDM_TASK_HIGHEST_PRIORITY, 0);
+	if (ret)
+		a4l_dbg(1, drv_dbg, dev, "Error creating A4L task \n");
+
+	a4l_dbg(1, drv_dbg, dev, "attach procedure completed: "
+				 "adiv = %lu, qcount = %lu \n"
+		                  , priv->amplitude_div, priv->quanta_cnt);
+
+	return ret;
+}
+
+int test_detach(struct a4l_device *dev)
+{
+	struct fake_priv *priv = (struct fake_priv *)dev->priv;
+
+	rtdm_task_destroy(&priv->task);
+	a4l_dbg(1, drv_dbg, dev, "detach procedure complete\n");
+
+	return 0;
+}
+
+/* --- Module stuff --- */
+
+static struct a4l_driver test_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_fake",
+	.driver_name = "fake",
+	.attach = test_attach,
+	.detach = test_detach,
+	.privdata_size = sizeof(struct fake_priv),
+};
+
+static int __init a4l_fake_init(void)
+{
+	return a4l_register_drv(&test_drv);
+}
+
+static void __exit a4l_fake_cleanup(void)
+{
+	a4l_unregister_drv(&test_drv);
+}
+
+MODULE_DESCRIPTION("Analogy fake driver");
+MODULE_LICENSE("GPL");
+
+module_init(a4l_fake_init);
+module_exit(a4l_fake_cleanup);
+++ linux-patched/drivers/xenomai/analogy/testing/Kconfig	2022-03-21 12:58:31.182871477 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/Makefile	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_FAKE
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "Fake driver"
+	default n
+	help
+
+	The fake driver displays many subdevices:
+	- 0: analog input;
+	- 1: digital input / output;
+	- 2: analog output;
+	- 3: analog input; data written into the subdevice 2 can be
+          read here.
+++ linux-patched/drivers/xenomai/analogy/testing/Makefile	2022-03-21 12:58:31.175871545 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/loop.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_FAKE) += analogy_fake.o
+
+analogy_fake-y := fake.o
+
+analogy_loop-y := loop.o
+++ linux-patched/drivers/xenomai/analogy/testing/loop.c	2022-03-21 12:58:31.167871623 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/pcimio.c	1970-01-01 01:00:00.000000000 +0100
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#define LOOP_TASK_PERIOD 1000000
+#define LOOP_NB_BITS 16
+
+#define LOOP_INPUT_SUBD 0
+#define LOOP_OUTPUT_SUBD 1
+
+/* Channels descriptor */
+static struct a4l_channels_desc loop_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, LOOP_NB_BITS},
+	},
+};
+
+/* Ranges tab */
+static struct a4l_rngtab loop_rngtab = {
+	.length =  2,
+	.rngs = {
+		RANGE_V(-5,5),
+		RANGE_V(-10,10),
+	},
+};
+/* Ranges descriptor */
+struct a4l_rngdesc loop_rngdesc = RNG_GLOBAL(loop_rngtab);
+
+/* Command options mask */
+static struct a4l_cmd_desc loop_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT| TRIG_NONE,
+};
+
+/* Private data organization */
+struct loop_priv {
+
+	/* Task descriptor */
+	rtdm_task_t loop_task;
+
+	/* Misc fields */
+	int loop_running;
+	uint16_t loop_insn_value;
+};
+typedef struct loop_priv lpprv_t;
+
+/* Attach arguments contents */
+struct loop_attach_arg {
+	unsigned long period;
+};
+typedef struct loop_attach_arg lpattr_t;
+
+static void loop_task_proc(void *arg);
+
+/* --- Task part --- */
+
+/* Timer task routine  */
+static void loop_task_proc(void *arg)
+{
+	struct a4l_device *dev = (struct a4l_device*)arg;
+	struct a4l_subdevice *input_subd, *output_subd;
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	input_subd = a4l_get_subd(dev, LOOP_INPUT_SUBD);
+	output_subd = a4l_get_subd(dev, LOOP_OUTPUT_SUBD);
+
+	if (input_subd == NULL || output_subd == NULL) {
+		a4l_err(dev, "loop_task_proc: subdevices unavailable\n");
+		return;
+	}
+
+	while (1) {
+
+		int running;
+
+		running = priv->loop_running;
+
+		if (running) {
+			uint16_t value;
+			int ret=0;
+
+			while (ret==0) {
+
+				ret = a4l_buf_get(output_subd,
+						  &value, sizeof(uint16_t));
+				if (ret == 0) {
+
+					a4l_info(dev,
+						 "loop_task_proc: "
+						 "data available\n");
+
+					a4l_buf_evt(output_subd, 0);
+
+					ret = a4l_buf_put(input_subd,
+							  &value,
+							  sizeof(uint16_t));
+
+					if (ret==0)
+						a4l_buf_evt(input_subd, 0);
+				}
+			}
+		}
+
+		rtdm_task_sleep(LOOP_TASK_PERIOD);
+	}
+}
+
+/* --- Analogy Callbacks --- */
+
+/* Command callback */
+int loop_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	a4l_info(subd->dev, "loop_cmd: (subd=%d)\n", subd->idx);
+
+	return 0;
+
+}
+
+/* Trigger callback */
+int loop_trigger(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	lpprv_t *priv = (lpprv_t *)subd->dev->priv;
+
+	a4l_info(subd->dev, "loop_trigger: (subd=%d)\n", subd->idx);
+
+	priv->loop_running = 1;
+
+	return 0;
+}
+
+/* Cancel callback */
+void loop_cancel(struct a4l_subdevice *subd)
+{
+	lpprv_t *priv = (lpprv_t *)subd->dev->priv;
+
+	a4l_info(subd->dev, "loop_cancel: (subd=%d)\n", subd->idx);
+
+	priv->loop_running = 0;
+}
+
+/* Read instruction callback */
+int loop_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	lpprv_t *priv = (lpprv_t*)subd->dev->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Sets the memorized value */
+	data[0] = priv->loop_insn_value;
+
+	return 0;
+}
+
+/* Write instruction callback */
+int loop_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	lpprv_t *priv = (lpprv_t*)subd->dev->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Retrieves the value to memorize */
+	priv->loop_insn_value = data[0];
+
+	return 0;
+}
+
+void setup_input_subd(struct a4l_subdevice *subd)
+{
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &loop_rngdesc;
+	subd->chan_desc = &loop_chandesc;
+	subd->do_cmd = loop_cmd;
+	subd->cancel = loop_cancel;
+	subd->cmd_mask = &loop_cmd_mask;
+	subd->insn_read = loop_insn_read;
+	subd->insn_write = loop_insn_write;
+}
+
+void setup_output_subd(struct a4l_subdevice *subd)
+{
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	subd->flags = A4L_SUBD_AO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &loop_rngdesc;
+	subd->chan_desc = &loop_chandesc;
+	subd->do_cmd = loop_cmd;
+	subd->cancel = loop_cancel;
+	subd->trigger = loop_trigger;
+	subd->cmd_mask = &loop_cmd_mask;
+	subd->insn_read = loop_insn_read;
+	subd->insn_write = loop_insn_write;
+}
+
+/* Attach callback */
+int loop_attach(struct a4l_device *dev,
+		a4l_lnkdesc_t *arg)
+{
+	int ret = 0;
+	struct a4l_subdevice *subd;
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	/* Add the fake input subdevice */
+	subd = a4l_alloc_subd(0, setup_input_subd);
+	if (subd == NULL)
+		return -ENOMEM;
+
+	ret = a4l_add_subd(dev, subd);
+	if (ret != LOOP_INPUT_SUBD)
+		/* Let Analogy free the lately allocated subdevice */
+		return (ret < 0) ? ret : -EINVAL;
+
+	/* Add the fake output subdevice */
+	subd = a4l_alloc_subd(0, setup_output_subd);
+	if (subd == NULL)
+		/* Let Analogy free the lately allocated subdevice */
+		return -ENOMEM;
+
+	ret = a4l_add_subd(dev, subd);
+	if (ret != LOOP_OUTPUT_SUBD)
+		/* Let Analogy free the lately allocated subdevices */
+		return (ret < 0) ? ret : -EINVAL;
+
+	priv->loop_running = 0;
+	priv->loop_insn_value = 0;
+
+	ret = rtmd_task_init(&priv->loop_task,
+			    "a4l_loop task",
+			    loop_task_proc,
+			    dev, RTDM_TASK_HIGHEST_PRIORITY, 0);
+
+	return ret;
+}
+
+/* Detach callback */
+int loop_detach(struct a4l_device *dev)
+{
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	rtdm_task_destroy(&priv->loop_task);
+
+	return 0;
+}
+
+/* --- Module part --- */
+
+static struct a4l_driver loop_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_loop",
+	.attach = loop_attach,
+	.detach = loop_detach,
+	.privdata_size = sizeof(lpprv_t),
+};
+
+static int __init a4l_loop_init(void)
+{
+	return a4l_register_drv(&loop_drv);
+}
+
+static void __exit a4l_loop_cleanup(void)
+{
+	a4l_unregister_drv(&loop_drv);
+}
+
+MODULE_DESCRIPTION("Analogy loop driver");
+MODULE_LICENSE("GPL");
+
+module_init(a4l_loop_init);
+module_exit(a4l_loop_cleanup);
+++ linux-patched/drivers/xenomai/analogy/national_instruments/pcimio.c	2022-03-21 12:58:31.160871692 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_mio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI PCI-MIO E series cards
+ *
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: National Instruments PCI-MIO-E series and M series
+ * (all boards)
+ *
+ * Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
+ * Herman Bruyninckx, Terry Barnaby
+ * Status: works
+ * Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
+ * PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014,
+ * PCI-6040E,PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E,
+ * PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E,
+ * PCI-6035E, PCI-6052E, PCI-6110, PCI-6111, PCI-6220, PCI-6221,
+ * PCI-6224, PCI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251,
+ * PCI-6254, PCI-6259, PCIe-6259, PCI-6280, PCI-6281, PXI-6281,
+ * PCI-6284, PCI-6289, PCI-6711, PXI-6711, PCI-6713, PXI-6713,
+ * PXI-6071E, PCI-6070E, PXI-6070E, PXI-6052E, PCI-6036E, PCI-6731,
+ * PCI-6733, PXI-6733, PCI-6143, PXI-6143
+ *
+ * These boards are almost identical to the AT-MIO E series, except that
+ * they use the PCI bus instead of ISA (i.e., AT).  See the notes for
+ * the ni_atmio.o driver for additional information about these boards.
+ *
+ * By default, the driver uses DMA to transfer analog input data to
+ * memory.  When DMA is enabled, not all triggering features are
+ * supported.
+ *
+ * Note that the PCI-6143 is a simultaneous sampling device with 8
+ * convertors. With this board all of the convertors perform one
+ * simultaneous sample during a scan interval. The period for a scan
+ * is used for the convert time in an Analgoy cmd. The convert trigger
+ * source is normally set to TRIG_NOW by default.
+ *
+ * The RTSI trigger bus is supported on these cards on subdevice
+ * 10. See the Analogy library documentation for details.
+ *
+ * References:
+ * 341079b.pdf  PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf  DAQ-STC reference manual
+ * 322080b.pdf  6711/6713/6715 User Manual
+ * 320945c.pdf  PCI E Series User Manual
+ * 322138a.pdf  PCI-6052E and DAQPad-6052E User Manual
+ *
+ * ISSUES:
+ * - When DMA is enabled, XXX_EV_CONVERT does not work correctly.
+ * - Calibration is not fully implemented
+ * - SCXI is probably broken for m-series boards
+ * - Digital I/O may not work on 673x.
+ * - Information (number of channels, bits, etc.) for some devices may
+ *   be incorrect.  Please check this and submit a bug if there are
+ *   problems for your device.
+ * - Need to deal with external reference for DAC, and other DAC
+ *   properties in board properties
+ * - Deal with at-mio-16de-10 revision D to N changes, etc.
+ * - Need to add other CALDAC type
+ * - Need to slow down DAC loading.  I don't trust NI's claim that two
+ *   writes to the PCI bus slows IO enough.  I would prefer to use
+ *   a4l_udelay().  Timing specs: (clock)
+ *     AD8522   30ns
+ *     DAC8043  120ns
+ *     DAC8800  60ns
+ *     MB88341   ?
+ *
+ */
+
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+#include "mite.h"
+
+#define PCIMIO_IRQ_POLARITY 1
+
+/* The following two tables must be in the same order */
+static struct pci_device_id ni_pci_table[] __maybe_unused = {
+	{ PCI_VENDOR_ID_NATINST, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1190, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11d0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x14e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x14f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x15b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x18b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x18c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2890, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x28c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a70, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2ab0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2b80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2b90, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2ca0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70aa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70ab, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70ac, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70af, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, ni_pci_table);
+
+/* These are not all the possible ao ranges for 628x boards.
+ They can do OFFSET +- REFERENCE where OFFSET can be
+ 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
+ be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>.  That's
+ 63 different possibilities.  An AO channel
+ can not act as it's own OFFSET or REFERENCE.
+*/
+
+#if 0
+static struct a4l_rngtab rng_ni_M_628x_ao = { 8, {
+	RANGE(-10, 10),
+	RANGE(-5, 5),
+	RANGE(-2, 2),
+	RANGE(-1, 1),
+	RANGE(-5, 15),
+	RANGE(0, 10),
+	RANGE(3, 7),
+	RANGE(4, 6),
+	RANGE_ext(-1, 1)
+}};
+static struct a4l_rngdesc range_ni_M_628x_ao =
+	RNG_GLOBAL(rng_ni_M_628x_ao);
+#endif
+
+static struct a4l_rngtab rng_ni_M_625x_ao = { 3, {
+	RANGE(-10, 10),
+	RANGE(-5, 5),
+	RANGE_ext(-1, 1)
+}};
+static struct a4l_rngdesc range_ni_M_625x_ao =
+	RNG_GLOBAL(rng_ni_M_625x_ao);
+
+static struct a4l_rngtab rng_ni_M_622x_ao = { 1, {
+	RANGE(-10, 10),
+}};
+static struct a4l_rngdesc range_ni_M_622x_ao =
+	RNG_GLOBAL(rng_ni_M_622x_ao);
+
+static ni_board ni_boards[]={
+	{       device_id:      0x0162, // NI also says 0x1620.  typo?
+		name:           "pci-mio-16xe-50",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  2048,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_8,
+		ai_speed:	50000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	50000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043},
+		has_8255:       0,
+	},
+	{       device_id:      0x1170,
+		name:           "pci-mio-16xe-10", // aka pci-6030E
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{	device_id:      0x28c0,
+		name:           "pci-6014",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:       5000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{       device_id:      0x11d0,
+		name:           "pxi-6030e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+
+	{       device_id:      0x1180,
+		name:           "pci-mio-16e-1",	/* aka pci-6070e */
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341},
+		has_8255:       0,
+	},
+	{       device_id:      0x1190,
+		name:           "pci-mio-16e-4", /* aka pci-6040e */
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		/* Note: there have been reported problems with full speed
+		 * on this board */
+		ai_speed:	2000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  512,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, // doc says mb88341
+		has_8255:       0,
+	},
+	{       device_id:      0x11c0,
+		name:           "pxi-6040e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		ai_speed:	2000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  512,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341},
+		has_8255:       0,
+	},
+
+	{       device_id:      0x1330,
+		name:           "pci-6031e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1270,
+		name:           "pci-6032e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1340,
+		name:           "pci-6033e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1350,
+		name:           "pci-6071e",
+		n_adchan:       64,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{       device_id:      0x2a60,
+		name:           "pci-6023e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	0,
+	},
+	{       device_id:      0x2a70,
+		name:           "pci-6024e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	0,
+	},
+	{       device_id:      0x2a80,
+		name:           "pci-6025e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	1,
+	},
+	{       device_id:      0x2ab0,
+		name:           "pxi-6025e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	1,
+	},
+
+	{       device_id:      0x2ca0,
+		name:           "pci-6034e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{       device_id:      0x2c80,
+		name:           "pci-6035e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{       device_id:      0x18b0,
+		name:           "pci-6052e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	3000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_unipolar:    1,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_speed:	3000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug,ad8804_debug,ad8522}, /* manual is wrong */
+	},
+	{       device_id:      0x14e0,
+		name:           "pci-6110",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	200,
+		n_aochan:       2,
+		aobits:         16,
+		reg_type:	ni_reg_611x,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804,ad8804},
+	},
+	{       device_id:      0x14f0,
+		name:           "pci-6111",
+		n_adchan:       2,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	200,
+		n_aochan:       2,
+		aobits:         16,
+		reg_type:	ni_reg_611x,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804,ad8804},
+	},
+#if 0 /* Need device IDs */
+	/* The 6115 boards probably need their own driver */
+	{       device_id:      0x2ed0,
+		name:           "pci-6115",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	100,
+		n_aochan:       2,
+		aobits:         16,
+		ao_671x:	1,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		reg_611x:	1,
+		caldac:         {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */
+	},
+#endif
+#if 0 /* Need device IDs */
+	{       device_id:      0x0000,
+		name:           "pxi-6115",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	100,
+		n_aochan:       2,
+		aobits:         16,
+		ao_671x:	1,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		reg_611x:	1,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */
+	},
+#endif
+	{       device_id:      0x1880,
+		name:           "pci-6711",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384, /* data sheet says 8192, but fifo really holds 16384 samples */
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+	{       device_id:      0x2b90,
+		name:           "pxi-6711",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+	{       device_id:      0x1870,
+		name:           "pci-6713",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{       device_id:      0x2b80,
+		name:           "pxi-6713",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:	0x2430,
+		name:           "pci-6731",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  8192,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+#if 0	/* Need device IDs */
+	{       device_id:      0x0,
+		name:           "pxi-6731",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  8192,
+		.ao_range_table = &a4l_range_bipolar10,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+#endif
+	{       device_id:      0x2410,
+		name:           "pci-6733",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{       device_id:      0x2420,
+		name:           "pxi-6733",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:      0x15b0,
+		name:           "pxi-6071e",
+		n_adchan:       64,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:       800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{	device_id:      0x11b0,
+		name:           "pxi-6070e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:       800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{	device_id:      0x18c0,
+		name:           "pxi-6052e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	3000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_unipolar:    1,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_speed:	3000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341,mb88341,ad8522},
+	},
+	{	device_id:      0x1580,
+		name:           "pxi-6031e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+	},
+	{	device_id:      0x2890,
+		name:           "pci-6036e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b0,
+		name:           "pci-6220",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,	//FIXME: guess
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70af,
+		name:           "pci-6221",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &a4l_range_bipolar10,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x71bc,
+		name:           "pci-6221_37pin",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &a4l_range_bipolar10,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70f2,
+		name:           "pci-6224",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x716c,
+		name:           "pci-6225",
+		n_adchan:       80,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_622x_ao,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70aa,
+		name:           "pci-6229",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_622x_ao,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b4,
+		name:           "pci-6250",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b8,
+		name:           "pci-6251",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x717d,
+		name:           "pcie-6251",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b7,
+		name:           "pci-6254",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70ab,
+		name:           "pci-6259",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x717f,
+		name:           "pcie-6259",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+#if 0 /* TODO: fix data size */
+	{	device_id:      0x70b6,
+		name:           "pci-6280",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  8191,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bd,
+		name:           "pci-6281",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bf,
+		name:           "pxi-6281",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bc,
+		name:           "pci-6284",
+		n_adchan:       32,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70ac,
+		name:           "pci-6289",
+		n_adchan:       32,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+#endif /* TODO: fix data size */
+	{	device_id:      0x70C0,
+		name:           "pci-6143",
+		n_adchan:       8,
+		adbits:         16,
+		ai_fifo_depth:  1024,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_6143,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		reg_type:	ni_reg_6143,
+		ao_unipolar:    0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		.caldac = {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:      0x710D,
+		name:           "pxi-6143",
+		n_adchan:       8,
+		adbits:         16,
+		ai_fifo_depth:  1024,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_6143,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		reg_type:	ni_reg_6143,
+		ao_unipolar:    0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		.caldac = {ad8804_debug,ad8804_debug},
+	},
+};
+#define n_pcimio_boards ((sizeof(ni_boards)/sizeof(ni_boards[0])))
+
+/* How we access STC registers */
+
+/* We automatically take advantage of STC registers that can be
+ * read/written directly in the I/O space of the board.  Most
+ * PCIMIO devices map the low 8 STC registers to iobase+addr*2.
+ * The 611x devices map the write registers to iobase+addr*2, and
+ * the read registers to iobase+(addr-1)*2. */
+/* However, the 611x boards still aren't working, so I'm disabling
+ * non-windowed STC access temporarily */
+
+static void e_series_win_out(struct a4l_device *dev, uint16_t data, int reg)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(reg, Window_Address);
+	ni_writew(data, Window_Data);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static uint16_t e_series_win_in(struct a4l_device *dev, int reg)
+{
+	unsigned long flags;
+	uint16_t ret;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(reg, Window_Address);
+	ret = ni_readw(Window_Data);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock,flags);
+
+	return ret;
+}
+
+static void m_series_stc_writew(struct a4l_device *dev, uint16_t data, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case ADC_FIFO_Clear:
+		offset = M_Offset_AI_FIFO_Clear;
+		break;
+	case AI_Command_1_Register:
+		offset = M_Offset_AI_Command_1;
+		break;
+	case AI_Command_2_Register:
+		offset = M_Offset_AI_Command_2;
+		break;
+	case AI_Mode_1_Register:
+		offset = M_Offset_AI_Mode_1;
+		break;
+	case AI_Mode_2_Register:
+		offset = M_Offset_AI_Mode_2;
+		break;
+	case AI_Mode_3_Register:
+		offset = M_Offset_AI_Mode_3;
+		break;
+	case AI_Output_Control_Register:
+		offset = M_Offset_AI_Output_Control;
+		break;
+	case AI_Personal_Register:
+		offset = M_Offset_AI_Personal;
+		break;
+	case AI_SI2_Load_A_Register:
+		/* This is actually a 32 bit register on m series boards */
+		ni_writel(data, M_Offset_AI_SI2_Load_A);
+		return;
+		break;
+	case AI_SI2_Load_B_Register:
+		/* This is actually a 32 bit register on m series boards */
+		ni_writel(data, M_Offset_AI_SI2_Load_B);
+		return;
+		break;
+	case AI_START_STOP_Select_Register:
+		offset = M_Offset_AI_START_STOP_Select;
+		break;
+	case AI_Trigger_Select_Register:
+		offset = M_Offset_AI_Trigger_Select;
+		break;
+	case Analog_Trigger_Etc_Register:
+		offset = M_Offset_Analog_Trigger_Etc;
+		break;
+	case AO_Command_1_Register:
+		offset = M_Offset_AO_Command_1;
+		break;
+	case AO_Command_2_Register:
+		offset = M_Offset_AO_Command_2;
+		break;
+	case AO_Mode_1_Register:
+		offset = M_Offset_AO_Mode_1;
+		break;
+	case AO_Mode_2_Register:
+		offset = M_Offset_AO_Mode_2;
+		break;
+	case AO_Mode_3_Register:
+		offset = M_Offset_AO_Mode_3;
+		break;
+	case AO_Output_Control_Register:
+		offset = M_Offset_AO_Output_Control;
+		break;
+	case AO_Personal_Register:
+		offset = M_Offset_AO_Personal;
+		break;
+	case AO_Start_Select_Register:
+		offset = M_Offset_AO_Start_Select;
+		break;
+	case AO_Trigger_Select_Register:
+		offset = M_Offset_AO_Trigger_Select;
+		break;
+	case Clock_and_FOUT_Register:
+		offset = M_Offset_Clock_and_FOUT;
+		break;
+	case Configuration_Memory_Clear:
+		offset = M_Offset_Configuration_Memory_Clear;
+		break;
+	case DAC_FIFO_Clear:
+		offset = M_Offset_AO_FIFO_Clear;
+		break;
+	case DIO_Control_Register:
+		rtdm_printk("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n", __FUNCTION__, reg);
+		return;
+		break;
+	case G_Autoincrement_Register(0):
+		offset = M_Offset_G0_Autoincrement;
+		break;
+	case G_Autoincrement_Register(1):
+		offset = M_Offset_G1_Autoincrement;
+		break;
+	case G_Command_Register(0):
+		offset = M_Offset_G0_Command;
+		break;
+	case G_Command_Register(1):
+		offset = M_Offset_G1_Command;
+		break;
+	case G_Input_Select_Register(0):
+		offset = M_Offset_G0_Input_Select;
+		break;
+	case G_Input_Select_Register(1):
+		offset = M_Offset_G1_Input_Select;
+		break;
+	case G_Mode_Register(0):
+		offset = M_Offset_G0_Mode;
+		break;
+	case G_Mode_Register(1):
+		offset = M_Offset_G1_Mode;
+		break;
+	case Interrupt_A_Ack_Register:
+		offset = M_Offset_Interrupt_A_Ack;
+		break;
+	case Interrupt_A_Enable_Register:
+		offset = M_Offset_Interrupt_A_Enable;
+		break;
+	case Interrupt_B_Ack_Register:
+		offset = M_Offset_Interrupt_B_Ack;
+		break;
+	case Interrupt_B_Enable_Register:
+		offset = M_Offset_Interrupt_B_Enable;
+		break;
+	case Interrupt_Control_Register:
+		offset = M_Offset_Interrupt_Control;
+		break;
+	case IO_Bidirection_Pin_Register:
+		offset = M_Offset_IO_Bidirection_Pin;
+		break;
+	case Joint_Reset_Register:
+		offset = M_Offset_Joint_Reset;
+		break;
+	case RTSI_Trig_A_Output_Register:
+		offset = M_Offset_RTSI_Trig_A_Output;
+		break;
+	case RTSI_Trig_B_Output_Register:
+		offset = M_Offset_RTSI_Trig_B_Output;
+		break;
+	case RTSI_Trig_Direction_Register:
+		offset = M_Offset_RTSI_Trig_Direction;
+		break;
+		/* FIXME: DIO_Output_Register (16 bit reg) is replaced
+		by M_Offset_Static_Digital_Output (32 bit) and
+		M_Offset_SCXI_Serial_Data_Out (8 bit) */
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return;
+	}
+	ni_writew(data, offset);
+}
+
+static uint16_t m_series_stc_readw(struct a4l_device *dev, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case AI_Status_1_Register:
+		offset = M_Offset_AI_Status_1;
+		break;
+	case AO_Status_1_Register:
+		offset = M_Offset_AO_Status_1;
+		break;
+	case AO_Status_2_Register:
+		offset = M_Offset_AO_Status_2;
+		break;
+	case DIO_Serial_Input_Register:
+		return ni_readb(M_Offset_SCXI_Serial_Data_In);
+		break;
+	case Joint_Status_1_Register:
+		offset = M_Offset_Joint_Status_1;
+		break;
+	case Joint_Status_2_Register:
+		offset = M_Offset_Joint_Status_2;
+		break;
+	case G_Status_Register:
+		offset = M_Offset_G01_Status;
+		break;
+	default:
+		rtdm_printk("%s: bug! "
+			    "unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return ni_readw(offset);
+}
+
+static void m_series_stc_writel(struct a4l_device *dev, uint32_t data, int reg)
+{
+	unsigned offset;
+
+	switch(reg)
+	{
+	case AI_SC_Load_A_Registers:
+		offset = M_Offset_AI_SC_Load_A;
+		break;
+	case AI_SI_Load_A_Registers:
+		offset = M_Offset_AI_SI_Load_A;
+		break;
+	case AO_BC_Load_A_Register:
+		offset = M_Offset_AO_BC_Load_A;
+		break;
+	case AO_UC_Load_A_Register:
+		offset = M_Offset_AO_UC_Load_A;
+		break;
+	case AO_UI_Load_A_Register:
+		offset = M_Offset_AO_UI_Load_A;
+		break;
+	case G_Load_A_Register(0):
+		offset = M_Offset_G0_Load_A;
+		break;
+	case G_Load_A_Register(1):
+		offset = M_Offset_G1_Load_A;
+		break;
+	case G_Load_B_Register(0):
+		offset = M_Offset_G0_Load_B;
+		break;
+	case G_Load_B_Register(1):
+		offset = M_Offset_G1_Load_B;
+		break;
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return;
+	}
+	ni_writel(data, offset);
+}
+
+static uint32_t m_series_stc_readl(struct a4l_device *dev, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case G_HW_Save_Register(0):
+		offset = M_Offset_G0_HW_Save;
+		break;
+	case G_HW_Save_Register(1):
+		offset = M_Offset_G1_HW_Save;
+		break;
+	case G_Save_Register(0):
+		offset = M_Offset_G0_Save;
+		break;
+	case G_Save_Register(1):
+		offset = M_Offset_G1_Save;
+		break;
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return 0;
+	}
+	return ni_readl(offset);
+}
+
+static void win_out2(struct a4l_device *dev, uint32_t data, int reg)
+{
+	devpriv->stc_writew(dev, data >> 16, reg);
+	devpriv->stc_writew(dev, data & 0xffff, reg + 1);
+}
+
+static uint32_t win_in2(struct a4l_device *dev, int reg)
+{
+	uint32_t bits;
+	bits = devpriv->stc_readw(dev, reg) << 16;
+	bits |= devpriv->stc_readw(dev, reg + 1);
+	return bits;
+}
+
+static void m_series_init_eeprom_buffer(struct a4l_device *dev)
+{
+	static const int Start_Cal_EEPROM = 0x400;
+	static const unsigned window_size = 10;
+	unsigned old_iodwbsr_bits;
+	unsigned old_iodwbsr1_bits;
+	unsigned old_iodwcr1_bits;
+	int i;
+
+	old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
+	       devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0xf, devpriv->mite->mite_io_addr + 0x30);
+
+	for(i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
+	{
+		devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
+	}
+
+	writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + 0x30);
+}
+
+static void init_6143(struct a4l_device *dev)
+{
+	/* Disable interrupts */
+	devpriv->stc_writew(dev, 0, Interrupt_Control_Register);
+
+	/* Initialise 6143 AI specific bits */
+
+	/* Set G0,G1 DMA mode to E series version */
+	ni_writeb(0x00, Magic_6143);
+	/* Set EOCMode, ADCMode and pipelinedelay */
+	ni_writeb(0x80, PipelineDelay_6143);
+	/* Set EOC Delay */
+	ni_writeb(0x00, EOC_Set_6143);
+
+	/* Set the FIFO half full level */
+	ni_writel(boardtype.ai_fifo_depth / 2, AIFIFO_Flag_6143);
+
+	/* Strobe Relay disable bit */
+	devpriv->ai_calib_source_enabled = 0;
+	ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOff,
+		  Calibration_Channel_6143);
+	ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143);
+}
+
+static int pcimio_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int ret, bus, slot, i, irq;
+	struct mite_struct *mite = NULL;
+	struct ni_board_struct *board = NULL;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	for(i = 0; i < n_pcimio_boards && mite == NULL; i++) {
+		mite = a4l_mite_find_device(bus, slot, ni_boards[i].device_id);
+		board = &ni_boards[i];
+	}
+
+	if(mite == 0)
+		return -ENOENT;
+
+	devpriv->irq_polarity = PCIMIO_IRQ_POLARITY;
+	devpriv->irq_pin = 0;
+
+	devpriv->mite = mite;
+	devpriv->board_ptr = board;
+
+	devpriv->ai_mite_ring = mite_alloc_ring(mite);
+	devpriv->ao_mite_ring = mite_alloc_ring(mite);
+	devpriv->cdo_mite_ring = mite_alloc_ring(mite);
+	devpriv->gpct_mite_ring[0] = mite_alloc_ring(mite);
+	devpriv->gpct_mite_ring[1] = mite_alloc_ring(mite);
+
+	if(devpriv->ai_mite_ring == NULL ||
+	   devpriv->ao_mite_ring == NULL ||
+	   devpriv->cdo_mite_ring == NULL ||
+	   devpriv->gpct_mite_ring[0] == NULL ||
+	   devpriv->gpct_mite_ring[1] == NULL)
+		return -ENOMEM;
+
+	a4l_info(dev, "found %s board\n", boardtype.name);
+
+	if(boardtype.reg_type & ni_reg_m_series_mask)
+	{
+		devpriv->stc_writew = &m_series_stc_writew;
+		devpriv->stc_readw = &m_series_stc_readw;
+		devpriv->stc_writel = &m_series_stc_writel;
+		devpriv->stc_readl = &m_series_stc_readl;
+	}else
+	{
+		devpriv->stc_writew = &e_series_win_out;
+		devpriv->stc_readw = &e_series_win_in;
+		devpriv->stc_writel = &win_out2;
+		devpriv->stc_readl = &win_in2;
+	}
+
+	ret = a4l_mite_setup(devpriv->mite, 0);
+	if(ret < 0)
+	{
+		a4l_err(dev, "pcmio_attach: error setting up mite\n");
+		return ret;
+	}
+
+	if(boardtype.reg_type & ni_reg_m_series_mask)
+		m_series_init_eeprom_buffer(dev);
+	if(boardtype.reg_type == ni_reg_6143)
+		init_6143(dev);
+
+	irq = mite_irq(devpriv->mite);
+
+	if(irq == 0){
+		a4l_warn(dev, "pcimio_attach: unknown irq (bad)\n\n");
+	}else{
+		a4l_info(dev, "found irq %u\n", irq);
+		ret = a4l_request_irq(dev,
+				      irq,
+				      a4l_ni_E_interrupt, RTDM_IRQTYPE_SHARED, dev);
+		if(ret < 0)
+			a4l_err(dev, "pcimio_attach: irq not available\n");
+	}
+
+	ret = a4l_ni_E_init(dev);
+	if(ret < 0)
+		return ret;
+
+	dev->driver->driver_name = devpriv->board_ptr->name;
+
+	return ret;
+}
+
+static int pcimio_detach(struct a4l_device *dev)
+{
+	if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED){
+		a4l_free_irq(dev,a4l_get_irq(dev));
+	}
+
+	if(dev->priv != NULL && devpriv->mite != NULL)
+	{
+		mite_free_ring(devpriv->ai_mite_ring);
+		mite_free_ring(devpriv->ao_mite_ring);
+		mite_free_ring(devpriv->gpct_mite_ring[0]);
+		mite_free_ring(devpriv->gpct_mite_ring[1]);
+		a4l_mite_unsetup(devpriv->mite);
+	}
+
+	dev->driver->driver_name = NULL;
+
+	return 0;
+}
+
+static struct a4l_driver pcimio_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_ni_pcimio",
+	.driver_name = NULL,
+	.attach = pcimio_attach,
+	.detach = pcimio_detach,
+	.privdata_size = sizeof(ni_private),
+};
+
+static int __init pcimio_init(void)
+{
+	return a4l_register_drv(&pcimio_drv);
+}
+
+static void __exit pcimio_cleanup(void)
+{
+	a4l_unregister_drv(&pcimio_drv);
+}
+
+MODULE_DESCRIPTION("Analogy driver for NI PCI-MIO series cards");
+MODULE_LICENSE("GPL");
+
+module_init(pcimio_init);
+module_exit(pcimio_cleanup);
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_mio.h	2022-03-21 12:58:31.153871760 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_670x.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __ANALOGY_NI_MIO_H__
+#define __ANALOGY_NI_MIO_H__
+
+/* Debug stuff */
+
+#ifdef CONFIG_DEBUG_MIO
+#define MDPRINTK(fmt, args...) rtdm_printk(format, ##args)
+#else /* !CONFIG_DEBUG_MIO */
+#define MDPRINTK(fmt, args...)
+#endif /* CONFIG_DEBUG_MIO */
+
+/* Subdevice related defines */
+
+#define AIMODE_NONE		0
+#define AIMODE_HALF_FULL	1
+#define AIMODE_SCAN		2
+#define AIMODE_SAMPLE		3
+
+#define NI_AI_SUBDEV		0
+#define NI_AO_SUBDEV		1
+#define NI_DIO_SUBDEV		2
+#define NI_8255_DIO_SUBDEV	3
+#define NI_UNUSED_SUBDEV	4
+#define NI_CALIBRATION_SUBDEV	5
+#define NI_EEPROM_SUBDEV	6
+#define NI_PFI_DIO_SUBDEV	7
+#define NI_CS5529_CALIBRATION_SUBDEV 8
+#define NI_SERIAL_SUBDEV	9
+#define NI_RTSI_SUBDEV		10
+#define NI_GPCT0_SUBDEV		11
+#define NI_GPCT1_SUBDEV		12
+#define NI_FREQ_OUT_SUBDEV	13
+#define NI_NUM_SUBDEVICES	14
+
+#define NI_GPCT_SUBDEV(x)	((x == 1) ? NI_GPCT1_SUBDEV : NI_GPCT0_SUBDEV)
+
+#define TIMEBASE_1_NS		50
+#define TIMEBASE_2_NS		10000
+
+#define SERIAL_DISABLED		0
+#define SERIAL_600NS		600
+#define SERIAL_1_2US		1200
+#define SERIAL_10US		10000
+
+/* PFI digital filtering options for ni m-series for use with
+   INSN_CONFIG_FILTER. */
+#define NI_PFI_FILTER_OFF	0x0
+#define NI_PFI_FILTER_125ns	0x1
+#define NI_PFI_FILTER_6425ns	0x2
+#define NI_PFI_FILTER_2550us	0x3
+
+/* Signals which can be routed to an NI PFI pin on an m-series board
+   with INSN_CONFIG_SET_ROUTING. These numbers are also returned by
+   INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their
+   routing cannot be changed. The numbers assigned are not arbitrary,
+   they correspond to the bits required to program the board. */
+#define NI_PFI_OUTPUT_PFI_DEFAULT	0
+#define NI_PFI_OUTPUT_AI_START1		1
+#define NI_PFI_OUTPUT_AI_START2		2
+#define NI_PFI_OUTPUT_AI_CONVERT	3
+#define NI_PFI_OUTPUT_G_SRC1		4
+#define NI_PFI_OUTPUT_G_GATE1		5
+#define NI_PFI_OUTPUT_AO_UPDATE_N	6
+#define NI_PFI_OUTPUT_AO_START1		7
+#define NI_PFI_OUTPUT_AI_START_PULSE	8
+#define NI_PFI_OUTPUT_G_SRC0		9
+#define NI_PFI_OUTPUT_G_GATE0		10
+#define NI_PFI_OUTPUT_EXT_STROBE	11
+#define NI_PFI_OUTPUT_AI_EXT_MUX_CLK	12
+#define NI_PFI_OUTPUT_GOUT0		13
+#define NI_PFI_OUTPUT_GOUT1		14
+#define NI_PFI_OUTPUT_FREQ_OUT		15
+#define NI_PFI_OUTPUT_PFI_DO		16
+#define NI_PFI_OUTPUT_I_ATRIG		17
+#define NI_PFI_OUTPUT_RTSI0		18
+#define NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN 26
+#define NI_PFI_OUTPUT_SCXI_TRIG1	27
+#define NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI 28
+#define NI_PFI_OUTPUT_CDI_SAMPLE	29
+#define NI_PFI_OUTPUT_CDO_UPDATE	30
+
+static inline unsigned int NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel) {
+	return NI_PFI_OUTPUT_RTSI0 + rtsi_channel;
+}
+
+/* Ranges declarations */
+
+extern struct a4l_rngdesc a4l_range_ni_E_ai;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_limited;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_limited14;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_611x;
+extern struct a4l_rngdesc range_ni_E_ai_622x;
+extern struct a4l_rngdesc range_ni_E_ai_628x;
+extern struct a4l_rngdesc a4l_range_ni_S_ai_6143;
+extern struct a4l_rngdesc a4l_range_ni_E_ao_ext;
+
+/* Misc functions declarations */
+
+int a4l_ni_E_interrupt(unsigned int irq, void *d);
+int a4l_ni_E_init(struct a4l_device *dev);
+
+
+#endif /* !__ANALOGY_NI_MIO_H__ */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_670x.c	2022-03-21 12:58:31.145871838 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+    comedi/drivers/ni_670x.c
+    Hardware driver for NI 670x devices
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+/*
+Driver: ni_670x
+Description: National Instruments 670x
+Author: Bart Joris <bjoris@advalvas.be>
+Updated: Wed, 11 Dec 2002 18:25:35 -0800
+Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
+Status: unknown
+
+Commands are not supported.
+*/
+
+/*
+	Bart Joris <bjoris@advalvas.be> Last updated on 20/08/2001
+
+	Manuals:
+
+	322110a.pdf	PCI/PXI-6704 User Manual
+	322110b.pdf	PCI/PXI-6703/6704 User Manual
+*/
+
+/*
+ * Integration with Xenomai/Analogy layer based on the
+ * comedi driver. Adaptation made by
+ *   Julien Delange <julien.delange@esa.int>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_mio.h"
+#include "mite.h"
+
+#define PCIMIO_IRQ_POLARITY 1
+
+#define  AO_VALUE_OFFSET         0x00
+#define  AO_CHAN_OFFSET          0x0c
+#define  AO_STATUS_OFFSET        0x10
+#define  AO_CONTROL_OFFSET       0x10
+#define  DIO_PORT0_DIR_OFFSET    0x20
+#define  DIO_PORT0_DATA_OFFSET   0x24
+#define  DIO_PORT1_DIR_OFFSET    0x28
+#define  DIO_PORT1_DATA_OFFSET   0x2c
+#define  MISC_STATUS_OFFSET      0x14
+#define  MISC_CONTROL_OFFSET     0x14
+
+/* Board description*/
+
+struct ni_670x_board {
+	unsigned short device_id;
+	const char *name;
+	unsigned short ao_chans;
+	unsigned short ao_bits;
+};
+
+#define thisboard ((struct ni_670x_board *)dev->board_ptr)
+
+struct ni_670x_private {
+	struct mite_struct *mite;
+	int boardtype;
+	int dio;
+	unsigned int ao_readback[32];
+
+	/*
+	 * Added when porting to xenomai
+	 */
+	int irq_polarity;
+	int irq_pin;
+	int irq;
+	struct ni_670x_board *board_ptr;
+	/*
+	 * END OF ADDED when porting to xenomai
+	 */
+};
+
+struct ni_670x_subd_priv {
+	int io_bits;
+	unsigned int state;
+	uint16_t readback[2];
+	uint16_t config;
+	void* counter;
+};
+
+static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+
+static struct a4l_channels_desc ni_670x_desc_dio = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc ni_670x_desc_ao = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 0, /* initialized later according to the board found */
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 16},
+	},
+};
+
+
+static struct a4l_rngtab range_0_20mA = { 1, {RANGE_mA(0, 20)} };
+static struct a4l_rngtab rng_bipolar10 = { 1, {RANGE_V(-10, 10) }};
+
+struct a4l_rngtab *range_table_list[32] = {
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA};
+
+static A4L_RNGDESC(32) ni670x_ao_desc;
+
+static void setup_subd_ao(struct a4l_subdevice *subd)
+{
+	int i;
+	int nchans;
+
+	nchans = ((struct ni_670x_private*)(subd->dev->priv))->board_ptr->ao_chans;
+	subd->flags                = A4L_SUBD_AO;
+	subd->chan_desc            = &ni_670x_desc_ao;
+	subd->chan_desc->length    = nchans;
+	if (nchans == 32) {
+
+		subd->rng_desc = (struct a4l_rngdesc*) &ni670x_ao_desc;
+		subd->rng_desc->mode = A4L_RNG_PERCHAN_RNGDESC;
+		for (i = 0 ; i < 16 ; i++) {
+			subd->rng_desc->rngtabs[i] =&rng_bipolar10;
+			subd->rng_desc->rngtabs[16+i] =&range_0_20mA;
+		}
+	} else
+		subd->rng_desc = &a4l_range_bipolar10;
+
+	subd->insn_write = &ni_670x_ao_winsn;
+	subd->insn_read = &ni_670x_ao_rinsn;
+}
+
+static void setup_subd_dio(struct a4l_subdevice *s)
+{
+	/* Digital i/o subdevice */
+	s->flags = A4L_SUBD_DIO;
+	s->chan_desc = &ni_670x_desc_dio;
+	s->rng_desc = &range_digital;
+	s->insn_bits = ni_670x_dio_insn_bits;
+	s->insn_config = ni_670x_dio_insn_config;
+}
+
+struct setup_subd {
+	void (*setup_func) (struct a4l_subdevice *);
+	int sizeof_priv;
+};
+
+static struct setup_subd setup_subds[2] = {
+	{
+		.setup_func = setup_subd_ao,
+		.sizeof_priv = sizeof(struct ni_670x_subd_priv),
+	},
+	{
+		.setup_func = setup_subd_dio,
+		.sizeof_priv = sizeof(struct ni_670x_subd_priv),
+	},
+};
+
+static const struct ni_670x_board ni_670x_boards[] = {
+	{
+		.device_id = 0x2c90,
+		.name = "PCI-6703",
+		.ao_chans = 16,
+		.ao_bits = 16,
+	},
+	{
+		.device_id = 0x1920,
+		.name = "PXI-6704",
+		.ao_chans = 32,
+		.ao_bits = 16,
+	},
+	{
+		.device_id = 0x1290,
+		.name = "PCI-6704",
+		.ao_chans = 32,
+		.ao_bits = 16,
+	 },
+};
+
+#define n_ni_670x_boards ((sizeof(ni_670x_boards)/sizeof(ni_670x_boards[0])))
+
+static const struct pci_device_id ni_670x_pci_table[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c90)},
+	{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1920)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, ni_670x_pci_table);
+
+#define devpriv ((struct ni_670x_private *)dev->priv)
+
+static inline struct ni_670x_private *private(struct a4l_device *dev)
+{
+	return (struct ni_670x_private*) dev->priv;
+}
+
+
+static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg);
+static int ni_670x_detach(struct a4l_device *dev);
+
+static struct a4l_driver ni_670x_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_ni_670x",
+	.driver_name = "ni_670x",
+	.attach = ni_670x_attach,
+	.detach = ni_670x_detach,
+	.privdata_size = sizeof(struct ni_670x_private),
+};
+
+static int __init driver_ni_670x_init_module(void)
+{
+	return a4l_register_drv (&ni_670x_drv);
+}
+
+static void __exit driver_ni_670x_cleanup_module(void)
+{
+	a4l_unregister_drv (&ni_670x_drv);
+}
+
+module_init(driver_ni_670x_init_module);
+module_exit(driver_ni_670x_cleanup_module);
+
+static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int ret, bus, slot, i, irq;
+	struct mite_struct *mite;
+	struct ni_670x_board* board = NULL;
+	int err;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	a4l_info(dev, "ni670x attach procedure started(bus=%d/slot=%d)...\n",
+		 bus, slot);
+
+	mite = NULL;
+
+	for(i = 0; i <  n_ni_670x_boards && mite == NULL; i++) {
+		mite = a4l_mite_find_device(bus,
+					    slot, ni_670x_boards[i].device_id);
+		board = (struct ni_670x_board*) &ni_670x_boards[i];
+	}
+
+	if(mite == NULL) {
+		a4l_err(dev, "%s: cannot find the MITE device\n", __FUNCTION__);
+		return -ENOENT;
+	}
+
+	a4l_info(dev, "Found device %d %s\n", i, ni_670x_boards[i].name);
+
+	devpriv->irq_polarity = PCIMIO_IRQ_POLARITY;
+	devpriv->irq_pin = 0;
+
+	devpriv->mite = mite;
+	devpriv->board_ptr = board;
+
+	ret = a4l_mite_setup(devpriv->mite, 0);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__);
+		return ret;
+	}
+
+	irq = mite_irq(devpriv->mite);
+	devpriv->irq = irq;
+
+	a4l_info(dev, "found %s board\n", board->name);
+
+	for (i = 0; i < 2; i++) {
+		struct a4l_subdevice *subd =
+			a4l_alloc_subd(setup_subds[i].sizeof_priv, NULL);
+
+		if (subd == NULL) {
+			a4l_err(dev,
+				"%s: cannot allocate subdevice\n",
+				__FUNCTION__);
+			return -ENOMEM;
+		}
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i) {
+			a4l_err(dev,
+				"%s: cannot add subdevice\n",
+				__FUNCTION__);
+			return err;
+		}
+
+		setup_subds[i].setup_func (subd);
+	}
+
+	/* Config of misc registers */
+	writel(0x10, devpriv->mite->daq_io_addr + MISC_CONTROL_OFFSET);
+	/* Config of ao registers */
+	writel(0x00, devpriv->mite->daq_io_addr + AO_CONTROL_OFFSET);
+
+	a4l_info(dev, "ni670x attached\n");
+
+	return 0;
+}
+
+static int ni_670x_detach(struct a4l_device *dev)
+{
+	a4l_info(dev, "ni670x detach procedure started...\n");
+
+	if(dev->priv != NULL && devpriv->mite != NULL)
+		a4l_mite_unsetup(devpriv->mite);
+
+	a4l_info(dev, "ni670x detach procedure succeeded...\n");
+
+	return 0;
+}
+
+
+static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	struct ni_670x_subd_priv *subdpriv =
+		(struct ni_670x_subd_priv *)subd->priv;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subdpriv->io_bits |= 1 << chan;
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subdpriv->io_bits &= ~(1 << chan);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (subdpriv->io_bits & (1 << chan)) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	writel(subdpriv->io_bits,
+	       devpriv->mite->daq_io_addr + DIO_PORT0_DIR_OFFSET);
+
+	return 0;
+}
+
+static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	int i;
+	unsigned int tmp;
+	unsigned int* dtmp;
+	int chan;
+	dtmp = (unsigned int*)insn->data;
+	chan = CR_CHAN(insn->chan_desc);
+
+	/* Channel number mapping :
+
+	   NI 6703/ NI 6704     | NI 6704 Only
+	   ----------------------------------------------------
+	   vch(0)       :       0       | ich(16)       :       1
+	   vch(1)       :       2       | ich(17)       :       3
+	   .    :       .       |   .                   .
+	   .    :       .       |   .                   .
+	   .    :       .       |   .                   .
+	   vch(15)      :       30      | ich(31)       :       31 */
+
+	for (i = 0; i < insn->data_size / sizeof(unsigned int); i++) {
+
+		tmp = dtmp[i];
+
+		/* First write in channel register which channel to use */
+		writel(((chan & 15) << 1) | ((chan & 16) >> 4),
+		       private (subd->dev)->mite->daq_io_addr + AO_CHAN_OFFSET);
+
+		/* write channel value */
+		writel(dtmp[i],
+		       private(subd->dev)->mite->daq_io_addr + AO_VALUE_OFFSET);
+		private(subd->dev)->ao_readback[chan] = tmp;
+	}
+
+   return 0;
+}
+
+static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	int i;
+	unsigned int* dtmp;
+	int chan = CR_CHAN(insn->chan_desc);
+
+	dtmp = (unsigned int*)insn->data;
+
+	for (i = 0; i < insn->data_size / sizeof(unsigned int); i++)
+		dtmp[i] = private(subd->dev)->ao_readback[chan];
+
+	return 0;
+}
+
+
+static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	return -ENOSYS;
+}
+
+MODULE_DESCRIPTION("Analogy driver for NI670x series cards");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/analogy/national_instruments/Kconfig	2022-03-21 12:58:31.138871906 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/mio_common.c	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_NI_MITE
+	depends on XENO_DRIVERS_ANALOGY && PCI
+	tristate "NI MITE driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_TIO
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "NI TIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_MIO       
+	depends on XENO_DRIVERS_ANALOGY && XENO_DRIVERS_ANALOGY_NI_TIO && PCI
+	tristate "NI MIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_PCIMIO       
+	depends on XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI PCIMIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_670x       
+	depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI 670X driver (EXPERIMENTAL)"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_660x       
+	depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI 660X driver (EXPERIMENTAL)"
+	default n
+++ linux-patched/drivers/xenomai/analogy/national_instruments/mio_common.c	2022-03-21 12:58:31.130871984 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/mite.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for DAQ-STC based boards
+ *
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: DAQ-STC systems
+ *
+ * References:
+ * 340747b.pdf  AT-MIO E series Register-Level Programmer Manual
+ * 341079b.pdf  PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf  DAQ-STC reference manual
+ * 322080b.pdf  6711/6713/6715 User Manual
+ * 320945c.pdf  PCI E Series User Manual
+ * 322138a.pdf  PCI-6052E and DAQPad-6052E User Manual
+ * 320517c.pdf  AT E Series User manual (obsolete)
+ * 320517f.pdf  AT E Series User manual
+ * 320906c.pdf  Maximum signal ratings
+ * 321066a.pdf  About 16x
+ * 321791a.pdf  Discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf  About at-mio-16e-10 rev P
+ * 321837a.pdf  Discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf  About at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - The interrupt routine needs to be cleaned up
+ * - S-Series PCI-6143 support has been added but is not fully tested
+ *   as yet. Terry Barnaby, BEAM Ltd.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "../intel/8255.h"
+#include "mite.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+
+#define NI_TIMEOUT 1000
+
+/* Note: this table must match the ai_gain_* definitions */
+static const short ni_gainlkup[][16] = {
+	/* ai_gain_16 */
+	{0, 1, 2, 3, 4, 5, 6, 7, 0x100, 0x101, 0x102, 0x103, 0x104, 0x105,
+	 0x106, 0x107},
+	/* ai_gain_8 */
+	{1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107},
+	/* ai_gain_14 */
+	{1, 2, 3, 4, 5, 6, 7, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106,
+	 0x107},
+	/* ai_gain_4 */
+	{0, 1, 4, 7},
+	/* ai_gain_611x */
+	{0x00a, 0x00b, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006},
+	/* ai_gain_622x */
+	{0, 1, 4, 5},
+	/* ai_gain_628x */
+	{1, 2, 3, 4, 5, 6, 7},
+	/* ai_gain_6143 */
+	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+};
+
+struct a4l_rngtab rng_ni_E_ai = {16, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2.5, 2.5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.25, 0.25),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(-0.05, 0.05),
+	RANGE_V(0, 20),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 2),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.5),
+	RANGE_V(0, 0.2),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai =
+	RNG_GLOBAL(rng_ni_E_ai);
+
+struct a4l_rngtab rng_ni_E_ai_limited = {8, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_limited =
+	RNG_GLOBAL(rng_ni_E_ai_limited);
+
+struct a4l_rngtab rng_ni_E_ai_limited14 = {14, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 2),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.5),
+	RANGE_V(0, 0.2),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_limited14 =
+	RNG_GLOBAL(rng_ni_E_ai_limited14);
+
+struct a4l_rngtab rng_ni_E_ai_bipolar4 = {4, {
+	RANGE_V(-10,10),
+	RANGE_V(-5, 5),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.05, 0.05),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4 =
+	RNG_GLOBAL(rng_ni_E_ai_bipolar4);
+
+struct a4l_rngtab rng_ni_E_ai_611x = {8, {
+	RANGE_V(-50, 50),
+	RANGE_V(-20, 20),
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_611x =
+	RNG_GLOBAL(rng_ni_E_ai_611x);
+
+struct a4l_rngtab rng_ni_M_ai_622x = {4, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.2, 0.2),
+}};
+struct a4l_rngdesc a4l_range_ni_M_ai_622x =
+	RNG_GLOBAL(rng_ni_M_ai_622x);
+
+struct a4l_rngtab rng_ni_M_ai_628x = {7, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+	RANGE_V(-0.1, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_M_ai_628x =
+	RNG_GLOBAL(rng_ni_M_ai_628x);
+
+struct a4l_rngtab rng_ni_S_ai_6143 = {1, {
+	RANGE_V(-5, 5),
+}};
+struct a4l_rngdesc a4l_range_ni_S_ai_6143 =
+	RNG_GLOBAL(rng_ni_S_ai_6143);
+
+
+struct a4l_rngtab rng_ni_E_ao_ext = {4, {
+	RANGE_V(-10, 10),
+	RANGE_V(0, 10),
+	RANGE_ext(-1, 1),
+	RANGE_ext(0, 1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ao_ext =
+	RNG_GLOBAL(rng_ni_E_ao_ext);
+
+struct a4l_rngdesc *ni_range_lkup[] = {
+	&a4l_range_ni_E_ai,
+	&a4l_range_ni_E_ai_limited,
+	&a4l_range_ni_E_ai_limited14,
+	&a4l_range_ni_E_ai_bipolar4,
+	&a4l_range_ni_E_ai_611x,
+	&a4l_range_ni_M_ai_622x,
+	&a4l_range_ni_M_ai_628x,
+	&a4l_range_ni_S_ai_6143
+};
+
+static const int num_adc_stages_611x = 3;
+
+static void ni_handle_fifo_dregs(struct a4l_subdevice *subd);
+static void get_last_sample_611x(struct a4l_subdevice *subd);
+static void get_last_sample_6143(struct a4l_subdevice *subd);
+static void handle_cdio_interrupt(struct a4l_device *dev);
+static void ni_load_channelgain_list(struct a4l_device *dev,
+				     unsigned int n_chan, unsigned int *list);
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+static void ni_handle_fifo_half_full(struct a4l_subdevice *subd);
+static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd);
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static inline void ni_set_bitfield(struct a4l_device *dev,
+				   int reg,
+				   unsigned int bit_mask,
+				   unsigned int bit_values)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags);
+	switch (reg) {
+	case Interrupt_A_Enable_Register:
+		devpriv->int_a_enable_reg &= ~bit_mask;
+		devpriv->int_a_enable_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->int_a_enable_reg,
+				    Interrupt_A_Enable_Register);
+		break;
+	case Interrupt_B_Enable_Register:
+		devpriv->int_b_enable_reg &= ~bit_mask;
+		devpriv->int_b_enable_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->int_b_enable_reg,
+				    Interrupt_B_Enable_Register);
+		break;
+	case IO_Bidirection_Pin_Register:
+		devpriv->io_bidirection_pin_reg &= ~bit_mask;
+		devpriv->io_bidirection_pin_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->io_bidirection_pin_reg,
+				    IO_Bidirection_Pin_Register);
+		break;
+	case AI_AO_Select:
+		devpriv->ai_ao_select_reg &= ~bit_mask;
+		devpriv->ai_ao_select_reg |= bit_values & bit_mask;
+		ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
+		break;
+	case G0_G1_Select:
+		devpriv->g0_g1_select_reg &= ~bit_mask;
+		devpriv->g0_g1_select_reg |= bit_values & bit_mask;
+		ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
+		break;
+	default:
+		a4l_err(dev,
+			"Warning %s() called with invalid register\n",
+			__FUNCTION__);
+		a4l_err(dev,"reg is %d\n", reg);
+		break;
+	}
+
+	mmiowb();
+	rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags);
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_drain_dma(struct a4l_subdevice *subd);
+
+static inline void ni_set_ai_dma_channel(struct a4l_device * dev, int channel)
+{
+	unsigned bitfield;
+
+	if (channel >= 0) {
+		bitfield =
+			(ni_stc_dma_channel_select_bitfield(channel) <<
+			 AI_DMA_Select_Shift) & AI_DMA_Select_Mask;
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield);
+}
+
+static inline void ni_set_ao_dma_channel(struct a4l_device * dev, int channel)
+{
+	unsigned bitfield;
+
+	if (channel >= 0) {
+		bitfield =
+			(ni_stc_dma_channel_select_bitfield(channel) <<
+			 AO_DMA_Select_Shift) & AO_DMA_Select_Mask;
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield);
+}
+
+static inline void ni_set_gpct_dma_channel(struct a4l_device * dev,
+					   unsigned gpct_index, int mite_channel)
+{
+	unsigned bitfield;
+
+	if (mite_channel >= 0) {
+		bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel);
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index),
+			bitfield);
+}
+
+static inline void ni_set_cdo_dma_channel(struct a4l_device * dev, int mite_channel)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags);
+	devpriv->cdio_dma_select_reg &= ~CDO_DMA_Select_Mask;
+	if (mite_channel >= 0) {
+		/*XXX just guessing
+		  ni_stc_dma_channel_select_bitfield() returns the right
+		  bits, under the assumption the cdio dma selection
+		  works just like ai/ao/gpct. Definitely works for dma
+		  channels 0 and 1. */
+		devpriv->cdio_dma_select_reg |=
+			(ni_stc_dma_channel_select_bitfield(mite_channel) <<
+			 CDO_DMA_Select_Shift) & CDO_DMA_Select_Mask;
+	}
+	ni_writeb(devpriv->cdio_dma_select_reg, M_Offset_CDIO_DMA_Select);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags);
+}
+
+static int ni_request_ai_mite_channel(struct a4l_device * dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->ai_mite_chan);
+	devpriv->ai_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
+	if (devpriv->ai_mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_ai_mite_channel: "
+			"failed to reserve mite dma channel for analog input.");
+		return -EBUSY;
+	}
+	devpriv->ai_mite_chan->dir = A4L_INPUT;
+	ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_ao_mite_channel(struct a4l_device * dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->ao_mite_chan);
+	devpriv->ao_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
+	if (devpriv->ao_mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_ao_mite_channel: "
+			"failed to reserve mite dma channel for analog outut.");
+		return -EBUSY;
+	}
+	devpriv->ao_mite_chan->dir = A4L_OUTPUT;
+	ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_gpct_mite_channel(struct a4l_device * dev,
+					unsigned gpct_index, int direction)
+{
+	unsigned long flags;
+	struct mite_channel *mite_chan;
+
+	BUG_ON(gpct_index >= NUM_GPCT);
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->counter_dev->counters[gpct_index]->mite_chan);
+	mite_chan = mite_request_channel(devpriv->mite,
+					 devpriv->gpct_mite_ring[gpct_index]);
+	if (mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_gpct_mite_channel: "
+			"failed to reserve mite dma channel for counter.");
+		return -EBUSY;
+	}
+	mite_chan->dir = direction;
+	a4l_ni_tio_set_mite_channel(devpriv->counter_dev->counters[gpct_index],
+				mite_chan);
+	ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_cdo_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+	int err = 0;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	/* No channel should be allocated... */
+	BUG_ON(devpriv->cdo_mite_chan);
+	/* ...until now */
+	devpriv->cdo_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
+
+	if (devpriv->cdo_mite_chan) {
+		devpriv->cdo_mite_chan->dir = A4L_OUTPUT;
+		ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
+	} else {
+		err = -EBUSY;
+		a4l_err(dev,
+			"ni_request_cdo_mite_channel: "
+			"failed to reserve mite dma channel "
+			"for correlated digital outut.");
+	}
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+void ni_release_ai_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan) {
+		ni_set_ai_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->ai_mite_chan);
+		devpriv->ai_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_ao_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ao_mite_chan) {
+		ni_set_ao_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->ao_mite_chan);
+		devpriv->ao_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_gpct_mite_channel(struct a4l_device *dev, unsigned gpct_index)
+{
+	unsigned long flags;
+
+	BUG_ON(gpct_index >= NUM_GPCT);
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->counter_dev->counters[gpct_index]->mite_chan) {
+		struct mite_channel *mite_chan =
+			devpriv->counter_dev->counters[gpct_index]->mite_chan;
+
+		ni_set_gpct_dma_channel(dev, gpct_index, -1);
+		a4l_ni_tio_set_mite_channel(devpriv->counter_dev->
+					counters[gpct_index], NULL);
+		a4l_mite_release_channel(mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_cdo_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->cdo_mite_chan) {
+		ni_set_cdo_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->cdo_mite_chan);
+		devpriv->cdo_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_sync_ai_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan)
+		a4l_mite_sync_input_dma(devpriv->ai_mite_chan, subd);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+}
+
+void mite_handle_b_linkc(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ao_mite_chan)
+		a4l_mite_sync_output_dma(devpriv->ao_mite_chan, subd);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+}
+
+static int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd)
+{
+	static const int timeout = 10000;
+
+	struct a4l_device *dev = subd->dev;
+	struct a4l_buffer *buf = subd->buf;
+
+	int i;
+
+	for (i = 0; i < timeout; i++) {
+
+		int buffer_filled;
+		unsigned short b_status;
+
+		b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
+
+		buffer_filled = test_bit(A4L_BUF_EOA_NR, &buf->flags);
+		buffer_filled |= (b_status & AO_FIFO_Half_Full_St);
+
+		if (buffer_filled)
+			break;
+
+		/* If we poll too often, the pci bus activity seems
+		   to slow the dma transfer down */
+		a4l_udelay(10);
+	}
+
+	if (i == timeout) {
+		a4l_err(dev,
+			"ni_ao_wait_for_dma_load: "
+			"timed out waiting for dma load");
+		return -EPIPE;
+	}
+
+	return 0;
+}
+
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static inline int ni_ai_drain_dma(struct a4l_subdevice *subd)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_ai_mite_channel(struct a4l_device * dev)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_ao_mite_channel(struct a4l_device * dev)
+{
+	return -ENOTSUPP;
+}
+
+static inline
+int ni_request_gpct_mite_channel(struct a4l_device * dev,
+				 unsigned gpct_index, int direction)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_cdo_mite_channel(struct a4l_device *dev)
+{
+	return -ENOTSUPP;
+}
+
+#define ni_release_ai_mite_channel(x) do { } while (0)
+#define ni_release_ao_mite_channel(x) do { } while (0)
+#define ni_release_gpct_mite_channel(x) do { } while (0)
+#define ni_release_cdo_mite_channel(x) do { } while (0)
+#define ni_sync_ai_dma(x) do { } while (0)
+#define mite_handle_b_linkc(x) do { } while (0)
+
+static inline int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd)
+{
+	return -ENOTSUPP;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+/* E-series boards use the second irq signals to generate dma requests
+   for their counters */
+void ni_e_series_enable_second_irq(struct a4l_device *dev,
+				   unsigned gpct_index, short enable)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return;
+	switch (gpct_index) {
+	case 0:
+		if (enable) {
+			devpriv->stc_writew(dev, G0_Gate_Second_Irq_Enable,
+					    Second_IRQ_A_Enable_Register);
+		} else {
+			devpriv->stc_writew(dev, 0,
+					    Second_IRQ_A_Enable_Register);
+		}
+		break;
+	case 1:
+		if (enable) {
+			devpriv->stc_writew(dev, G1_Gate_Second_Irq_Enable,
+					    Second_IRQ_B_Enable_Register);
+		} else {
+			devpriv->stc_writew(dev, 0,
+					    Second_IRQ_B_Enable_Register);
+		}
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+void ni_clear_ai_fifo(struct a4l_device *dev)
+{
+	if (boardtype.reg_type == ni_reg_6143) {
+		/* Flush the 6143 data FIFO */
+		ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */
+		ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */
+		while (ni_readl(AIFIFO_Status_6143) & 0x10); /* Wait for complete */
+	} else {
+		devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
+		if (boardtype.reg_type == ni_reg_625x) {
+			ni_writeb(0, M_Offset_Static_AI_Control(0));
+			ni_writeb(1, M_Offset_Static_AI_Control(0));
+		}
+	}
+}
+
+#define ao_win_out(data, addr) ni_ao_win_outw(dev, data, addr)
+static inline void ni_ao_win_outw(struct a4l_device *dev, uint16_t data, int addr)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	ni_writew(data, AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static inline void ni_ao_win_outl(struct a4l_device *dev, uint32_t data, int addr)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	ni_writel(data, AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static inline unsigned short ni_ao_win_inw(struct a4l_device *dev, int addr)
+{
+	unsigned long flags;
+	unsigned short data;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	data = ni_readw(AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+	return data;
+}
+
+/*
+ * ni_set_bits( ) allows different parts of the ni_mio_common driver
+ * to share registers (such as Interrupt_A_Register) without interfering
+ * with each other.
+ *
+ * NOTE: the switch/case statements are optimized out for a constant
+ * argument so this is actually quite fast--- If you must wrap another
+ * function around this make it inline to avoid a large speed penalty.
+ *
+ * value should only be 1 or 0.
+ */
+
+static inline void ni_set_bits(struct a4l_device *dev,
+			       int reg, unsigned bits, unsigned value)
+{
+	unsigned bit_values;
+
+	if (value)
+		bit_values = bits;
+	else
+		bit_values = 0;
+
+	ni_set_bitfield(dev, reg, bits, bit_values);
+}
+
+static void shutdown_ai_command(struct a4l_subdevice *subd)
+{
+	ni_ai_drain_dma(subd);
+	ni_handle_fifo_dregs(subd);
+	get_last_sample_611x(subd);
+	get_last_sample_6143(subd);
+
+	/* TODO: stop the acquisiton */
+}
+
+static void ni_handle_eos(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (devpriv->aimode == AIMODE_SCAN) {
+		static const int timeout = 10;
+		int i;
+
+		for (i = 0; i < timeout; i++) {
+			ni_sync_ai_dma(subd);
+			/* TODO: stop when the transfer is really over */
+			a4l_udelay(1);
+		}
+	}
+
+	/* Handle special case of single scan using AI_End_On_End_Of_Scan */
+	if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+		shutdown_ai_command(subd);
+	}
+}
+
+static void ni_event(struct a4l_subdevice * subd)
+{
+	/* Temporary hack */
+	struct a4l_buffer *buf = subd->buf;
+
+	if(test_bit(A4L_BUF_ERROR_NR, &buf->flags)) {
+		if (subd->cancel != NULL)
+			subd->cancel(subd);
+	}
+
+	a4l_buf_evt(subd, 0);
+
+}
+
+static void handle_gpct_interrupt(struct a4l_device *dev, unsigned short counter_index)
+{
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	struct ni_gpct *counter = devpriv->counter_dev->counters[counter_index];
+	a4l_ni_tio_handle_interrupt(counter, dev);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+}
+
+#ifdef CONFIG_DEBUG_MIO_COMMON
+static const char *const status_a_strings[] = {
+	"passthru0", "fifo", "G0_gate", "G0_TC",
+	"stop", "start", "sc_tc", "start1",
+	"start2", "sc_tc_error", "overflow", "overrun",
+	"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a"
+};
+
+static void ni_mio_print_status_a(int status)
+{
+	int i;
+
+	__a4l_info("A status:");
+	for (i = 15; i >= 0; i--) {
+		if (status & (1 << i)) {
+			__a4l_info(" %s", status_a_strings[i]);
+		}
+	}
+	__a4l_info("\n");
+}
+
+static const char *const status_b_strings[] = {
+	"passthru1", "fifo", "G1_gate", "G1_TC",
+	"UI2_TC", "UPDATE", "UC_TC", "BC_TC",
+	"start1", "overrun", "start", "bc_tc_error",
+	"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b"
+};
+
+static void ni_mio_print_status_b(int status)
+{
+	int i;
+
+	__a4l_info("B status:");
+	for (i = 15; i >= 0; i--) {
+		if (status & (1 << i)) {
+			__a4l_info(" %s", status_b_strings[i]);
+		}
+	}
+	__a4l_info("\n");
+}
+
+#else /* !CONFIG_DEBUG_MIO_COMMON */
+
+#define ni_mio_print_status_a(x)
+#define ni_mio_print_status_b(x)
+
+#endif /* CONFIG_DEBUG_MIO_COMMON */
+
+static void ack_a_interrupt(struct a4l_device *dev, unsigned short a_status)
+{
+	unsigned short ack = 0;
+
+	if (a_status & AI_SC_TC_St) {
+		ack |= AI_SC_TC_Interrupt_Ack;
+	}
+	if (a_status & AI_START1_St) {
+		ack |= AI_START1_Interrupt_Ack;
+	}
+	if (a_status & AI_START_St) {
+		ack |= AI_START_Interrupt_Ack;
+	}
+	if (a_status & AI_STOP_St) {
+		/* not sure why we used to ack the START here also,
+		   instead of doing it independently. Frank Hess
+		   2007-07-06 */
+		ack |= AI_STOP_Interrupt_Ack;
+	}
+	if (ack)
+		devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
+}
+
+static void handle_a_interrupt(struct a4l_device *dev,
+			       unsigned short status,unsigned int ai_mite_status)
+{
+
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AI_SUBDEV);
+
+	/* 67xx boards don't have ai subdevice, but their gpct0
+	   might generate an a interrupt. */
+
+	if((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED)
+		return;
+
+	a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+		"a_status=%04x ai_mite_status=%08x\n",status, ai_mite_status);
+	ni_mio_print_status_a(status);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	if (ai_mite_status & CHSR_LINKC)
+		ni_sync_ai_dma(subd);
+
+	if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
+			       CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
+			       CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
+		a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+			"unknown mite interrupt, ack! (ai_mite_status=%08x)\n",
+			ai_mite_status);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	/* Test for all uncommon interrupt events at the same time */
+	if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St |
+		      AI_SC_TC_St | AI_START1_St)) {
+		if (status == 0xffff) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+				"a_status=0xffff.  Card removed?\n");
+			/* TODO: we probably aren't even running a command now,
+			   so it's a good idea to be careful.
+			   we should check the transfer status */
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			ni_event(subd);
+			return;
+		}
+		if (status & (AI_Overrun_St | AI_Overflow_St |
+			      AI_SC_TC_Error_St)) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+				"ai error a_status=%04x\n", status);
+			ni_mio_print_status_a(status);
+
+			shutdown_ai_command(subd);
+
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			ni_event(subd);
+
+			return;
+		}
+		if (status & AI_SC_TC_St) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: SC_TC interrupt\n");
+			if (!devpriv->ai_continuous) {
+				shutdown_ai_command(subd);
+			}
+		}
+	}
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (status & AI_FIFO_Half_Full_St) {
+		int i;
+		static const int timeout = 10;
+		/* PCMCIA cards (at least 6036) seem to stop producing
+		   interrupts if we fail to get the fifo less than half
+		   full, so loop to be sure. */
+		for (i = 0; i < timeout; ++i) {
+			ni_handle_fifo_half_full(subd);
+			if ((devpriv->stc_readw(dev, AI_Status_1_Register) &
+			     AI_FIFO_Half_Full_St) == 0)
+				break;
+		}
+	}
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	if ((status & AI_STOP_St)) {
+		ni_handle_eos(subd);
+	}
+
+	ni_event(subd);
+
+	status = devpriv->stc_readw(dev, AI_Status_1_Register);
+	if (status & Interrupt_A_St)
+		a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+			" didn't clear interrupt? status=0x%x\n", status);
+}
+
+static void ack_b_interrupt(struct a4l_device *dev, unsigned short b_status)
+{
+	unsigned short ack = 0;
+	if (b_status & AO_BC_TC_St) {
+		ack |= AO_BC_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_Overrun_St) {
+		ack |= AO_Error_Interrupt_Ack;
+	}
+	if (b_status & AO_START_St) {
+		ack |= AO_START_Interrupt_Ack;
+	}
+	if (b_status & AO_START1_St) {
+		ack |= AO_START1_Interrupt_Ack;
+	}
+	if (b_status & AO_UC_TC_St) {
+		ack |= AO_UC_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_UI2_TC_St) {
+		ack |= AO_UI2_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_UPDATE_St) {
+		ack |= AO_UPDATE_Interrupt_Ack;
+	}
+	if (ack)
+		devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
+}
+
+static void handle_b_interrupt(struct a4l_device * dev,
+			       unsigned short b_status, unsigned int ao_mite_status)
+{
+
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV);
+
+	a4l_dbg(1, drv_dbg, dev,
+		"ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n",
+		b_status, ao_mite_status);
+
+	ni_mio_print_status_b(b_status);
+
+	if (b_status == 0xffff)
+		return;
+
+	if (b_status & AO_Overrun_St) {
+		a4l_err(dev,
+			"ni_mio_common: interrupt: "
+			"AO FIFO underrun status=0x%04x status2=0x%04x\n",
+			b_status,
+			devpriv->stc_readw(dev, AO_Status_2_Register));
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+
+	if (b_status & AO_BC_TC_St) {
+		a4l_dbg(1, drv_dbg, dev,
+			"ni_mio_common: interrupt: "
+			"AO BC_TC status=0x%04x status2=0x%04x\n",
+			b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
+		a4l_buf_evt(subd, A4L_BUF_EOA);
+	}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (ao_mite_status & CHSR_STOPS) {
+		a4l_dbg(1, drv_dbg, dev,
+			"ni_mio_common: interrupt: MITE transfer stopped\n");
+	} else if (ao_mite_status & CHSR_LINKC) {
+		/* Currently, mite.c requires us to handle LINKC */
+		mite_handle_b_linkc(subd);
+	}
+
+	if (ao_mite_status &
+	    ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
+	      CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
+	      CHSR_SABORT | CHSR_STOPS | CHSR_XFERR | CHSR_LxERR_mask)) {
+		a4l_err(dev,
+			"unknown mite interrupt, ack! (ao_mite_status=%08x)\n",
+			 ao_mite_status);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (b_status & AO_FIFO_Request_St) {
+		int ret;
+
+		ret = ni_ao_fifo_half_empty(subd);
+		if (!ret) {
+			a4l_err(dev,
+				"ni_mio_common: "
+				"interrupt: AO buffer underrun\n");
+			ni_set_bits(dev, Interrupt_B_Enable_Register,
+				    AO_FIFO_Interrupt_Enable |
+				    AO_Error_Interrupt_Enable, 0);
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+		}
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	ni_event(subd);
+}
+
+int a4l_ni_E_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	unsigned short a_status;
+	unsigned short b_status;
+	unsigned int ai_mite_status = 0;
+	unsigned int ao_mite_status = 0;
+	unsigned long flags;
+	struct mite_struct *mite = devpriv->mite;
+
+	/* Make sure dev->attached is checked before handler does
+	   anything else. */
+	smp_mb();
+
+	/* lock to avoid race with a4l_poll */
+	rtdm_lock_get_irqsave(&dev->lock, flags);
+	a_status = devpriv->stc_readw(dev, AI_Status_1_Register);
+	b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
+	if (mite) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		rtdm_lock_get(&devpriv->mite_channel_lock);
+		if (devpriv->ai_mite_chan) {
+			ai_mite_status = a4l_mite_get_status(devpriv->ai_mite_chan);
+			if (ai_mite_status & CHSR_LINKC)
+				writel(CHOR_CLRLC,
+				       devpriv->mite->mite_io_addr +
+				       MITE_CHOR(devpriv->ai_mite_chan->channel));
+		}
+		if (devpriv->ao_mite_chan) {
+			ao_mite_status = a4l_mite_get_status(devpriv->ao_mite_chan);
+			if (ao_mite_status & CHSR_LINKC)
+				writel(CHOR_CLRLC,
+				       mite->mite_io_addr +
+				       MITE_CHOR(devpriv->ao_mite_chan->channel));
+		}
+		rtdm_lock_put(&devpriv->mite_channel_lock);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	}
+	ack_a_interrupt(dev, a_status);
+	ack_b_interrupt(dev, b_status);
+	if ((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT))
+		handle_a_interrupt(dev, a_status, ai_mite_status);
+	if ((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT))
+		handle_b_interrupt(dev, b_status, ao_mite_status);
+	handle_gpct_interrupt(dev, 0);
+	handle_gpct_interrupt(dev, 1);
+	handle_cdio_interrupt(dev);
+
+	rtdm_lock_put_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static void ni_ao_fifo_load(struct a4l_subdevice *subd, int n)
+{
+	struct a4l_device *dev = subd->dev;
+	sampl_t d;
+	u32 packed_data;
+	int i, err = 1;
+
+	for (i = 0; i < n; i++) {
+		err = a4l_buf_get(subd, &d, sizeof(sampl_t));
+		if (err != 0)
+			break;
+
+		if (boardtype.reg_type & ni_reg_6xxx_mask) {
+			packed_data = d & 0xffff;
+			/* 6711 only has 16 bit wide ao fifo */
+			if (boardtype.reg_type != ni_reg_6711) {
+				err = a4l_buf_get(subd, &d, sizeof(sampl_t));
+				if (err != 0)
+					break;
+				i++;
+				packed_data |= (d << 16) & 0xffff0000;
+			}
+			ni_writel(packed_data, DAC_FIFO_Data_611x);
+		} else {
+			ni_writew(d, DAC_FIFO_Data);
+		}
+	}
+	if (err != 0) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+}
+
+/*
+ *  There's a small problem if the FIFO gets really low and we
+ *  don't have the data to fill it.  Basically, if after we fill
+ *  the FIFO with all the data available, the FIFO is _still_
+ *  less than half full, we never clear the interrupt.  If the
+ *  IRQ is in edge mode, we never get another interrupt, because
+ *  this one wasn't cleared.  If in level mode, we get flooded
+ *  with interrupts that we can't fulfill, because nothing ever
+ *  gets put into the buffer.
+ *
+ *  This kind of situation is recoverable, but it is easier to
+ *  just pretend we had a FIFO underrun, since there is a good
+ *  chance it will happen anyway.  This is _not_ the case for
+ *  RT code, as RT code might purposely be running close to the
+ *  metal.  Needs to be fixed eventually.
+ */
+static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	int n;
+
+	n = a4l_buf_count(subd);
+	if (n == 0) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+		return 0;
+	}
+
+	n /= sizeof(sampl_t);
+	if (n > boardtype.ao_fifo_depth / 2)
+		n = boardtype.ao_fifo_depth / 2;
+
+	ni_ao_fifo_load(subd, n);
+
+	return 1;
+}
+
+static int ni_ao_prep_fifo(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	int n;
+
+	/* Reset fifo */
+	devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
+	if (boardtype.reg_type & ni_reg_6xxx_mask)
+		ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
+
+	/* Load some data */
+	n = a4l_buf_count(subd);
+	if (n == 0)
+		return 0;
+
+	n /= sizeof(sampl_t);
+	if (n > boardtype.ao_fifo_depth)
+		n = boardtype.ao_fifo_depth;
+
+	ni_ao_fifo_load(subd, n);
+
+	return n;
+}
+
+static void ni_ai_fifo_read(struct a4l_subdevice *subd, int n)
+{
+	struct a4l_device *dev = subd->dev;
+	int i;
+
+	if (boardtype.reg_type == ni_reg_611x) {
+		sampl_t data[2];
+		u32 dl;
+
+		for (i = 0; i < n / 2; i++) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16) & 0xffff;
+			data[1] = dl & 0xffff;
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+		/* Check if there's a single sample stuck in the FIFO */
+		if (n % 2) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+			data[0] = dl & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		sampl_t data[2];
+		u32 dl;
+
+		/* This just reads the FIFO assuming the data is
+		   present, no checks on the FIFO status are performed */
+		for (i = 0; i < n / 2; i++) {
+			dl = ni_readl(AIFIFO_Data_6143);
+
+			data[0] = (dl >> 16) & 0xffff;
+			data[1] = dl & 0xffff;
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+		if (n % 2) {
+			/* Assume there is a single sample stuck in the FIFO.
+			   Get stranded sample into FIFO */
+			ni_writel(0x01, AIFIFO_Control_6143);
+			dl = ni_readl(AIFIFO_Data_6143);
+			data[0] = (dl >> 16) & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+	} else {
+		if (n > sizeof(devpriv->ai_fifo_buffer) /
+		    sizeof(devpriv->ai_fifo_buffer[0])) {
+			a4l_err(dev,
+				"ni_ai_fifo_read: "
+				"bug! ai_fifo_buffer too small");
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			return;
+		}
+		for (i = 0; i < n; i++) {
+			devpriv->ai_fifo_buffer[i] =
+				ni_readw(ADC_FIFO_Data_Register);
+		}
+		a4l_buf_put(subd,
+			    devpriv->ai_fifo_buffer,
+			    n * sizeof(devpriv->ai_fifo_buffer[0]));
+	}
+}
+
+static void ni_handle_fifo_half_full(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	ni_ai_fifo_read(subd, boardtype.ai_fifo_depth / 2);
+}
+
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_drain_dma(struct a4l_subdevice *subd)
+{
+	int i;
+	static const int timeout = 10000;
+	unsigned long flags;
+	int retval = 0;
+	struct a4l_device *dev = subd->dev;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan) {
+		for (i = 0; i < timeout; i++) {
+			if ((devpriv->stc_readw(dev,
+						AI_Status_1_Register) &
+			     AI_FIFO_Empty_St)
+			    && a4l_mite_bytes_in_transit(devpriv->
+						     ai_mite_chan) == 0)
+				break;
+			a4l_udelay(5);
+		}
+		if (i == timeout) {
+			a4l_info(dev, "wait for dma drain timed out\n");
+
+			a4l_info(dev, "a4l_mite_bytes_in_transit=%i, "
+				 "AI_Status1_Register=0x%x\n",
+				 a4l_mite_bytes_in_transit(devpriv->ai_mite_chan),
+				 devpriv->stc_readw(dev, AI_Status_1_Register));
+			retval = -1;
+		}
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	ni_sync_ai_dma(subd);
+
+	return retval;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+/* Empties the AI fifo */
+static void ni_handle_fifo_dregs(struct a4l_subdevice *subd)
+{
+	sampl_t data[2];
+	u32 dl;
+	short fifo_empty;
+	int i;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type == ni_reg_611x) {
+		while ((devpriv->stc_readw(dev,
+					   AI_Status_1_Register) &
+			AI_FIFO_Empty_St) == 0) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16);
+			data[1] = (dl & 0xffff);
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		i = 0;
+		while (ni_readl(AIFIFO_Status_6143) & 0x04) {
+			dl = ni_readl(AIFIFO_Data_6143);
+
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16);
+			data[1] = (dl & 0xffff);
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+			i += 2;
+		}
+		// Check if stranded sample is present
+		if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+			ni_writel(0x01, AIFIFO_Control_6143);	// Get stranded sample into FIFO
+			dl = ni_readl(AIFIFO_Data_6143);
+			data[0] = (dl >> 16) & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+
+	} else {
+		fifo_empty =
+			devpriv->stc_readw(dev,
+					   AI_Status_1_Register) & AI_FIFO_Empty_St;
+		while (fifo_empty == 0) {
+			for (i = 0;
+			     i <
+				     sizeof(devpriv->ai_fifo_buffer) /
+				     sizeof(devpriv->ai_fifo_buffer[0]); i++) {
+				fifo_empty =
+					devpriv->stc_readw(dev,
+							   AI_Status_1_Register) &
+					AI_FIFO_Empty_St;
+				if (fifo_empty)
+					break;
+				devpriv->ai_fifo_buffer[i] =
+					ni_readw(ADC_FIFO_Data_Register);
+			}
+			a4l_buf_put(subd,
+				    devpriv->ai_fifo_buffer,
+				    i * sizeof(devpriv->ai_fifo_buffer[0]));
+		}
+	}
+}
+
+static void get_last_sample_611x(struct a4l_subdevice *subd)
+{
+	sampl_t data;
+	u32 dl;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type != ni_reg_611x)
+		return;
+
+	/* Check if there's a single sample stuck in the FIFO */
+	if (ni_readb(XXX_Status) & 0x80) {
+		dl = ni_readl(ADC_FIFO_Data_611x);
+		data = (dl & 0xffff);
+		a4l_buf_put(subd, &data, sizeof(sampl_t));
+	}
+}
+
+static void get_last_sample_6143(struct a4l_subdevice *subd)
+{
+	sampl_t data;
+	u32 dl;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type != ni_reg_6143)
+		return;
+
+	/* Check if there's a single sample stuck in the FIFO */
+	if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+		/* Get stranded sample into FIFO */
+		ni_writel(0x01, AIFIFO_Control_6143);
+		dl = ni_readl(AIFIFO_Data_6143);
+
+		/* This may get the hi/lo data in the wrong order */
+		data = (dl >> 16) & 0xffff;
+		a4l_buf_put(subd, &data, sizeof(sampl_t));
+	}
+}
+
+static void ni_ai_munge16(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	unsigned int i;
+	sampl_t *array = buf;
+
+	for (i = 0; i < size / sizeof(sampl_t); i++) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		array[i] = le16_to_cpu(array[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+		array[i] += devpriv->ai_offset[chan_idx];
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+static void ni_ai_munge32(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	unsigned int i;
+	lsampl_t *larray = buf;
+
+	for (i = 0; i < size / sizeof(lsampl_t); i++) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		larray[i] = le32_to_cpu(larray[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+		larray[i] += devpriv->ai_offset[chan_idx];
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_ai_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	err = a4l_mite_buf_change(devpriv->ai_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	switch (boardtype.reg_type) {
+	case ni_reg_611x:
+	case ni_reg_6143:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 16);
+		break;
+	case ni_reg_628x:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 32);
+		break;
+	default:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 16, 16);
+		break;
+	};
+
+	/* start the MITE */
+	a4l_mite_dma_arm(devpriv->ai_mite_chan);
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return 0;
+}
+
+static int ni_ao_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_ao_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	err = a4l_mite_buf_change(devpriv->ao_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	if (devpriv->ao_mite_chan) {
+
+		if (boardtype.reg_type & (ni_reg_611x | ni_reg_6713)) {
+			a4l_mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
+		} else {
+			/* Doing 32 instead of 16 bit wide transfers
+			   from memory makes the mite do 32 bit pci
+			   transfers, doubling pci bandwidth. */
+			a4l_mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
+		}
+		a4l_mite_dma_arm(devpriv->ao_mite_chan);
+	} else
+		err = -EIO;
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+static int ni_cdo_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_cdo_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	/* No need to get a lock to setup the ring buffer */
+	err = a4l_mite_buf_change(devpriv->cdo_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	/* This test should be useless but one never knows */
+	if (devpriv->cdo_mite_chan) {
+		/* Configure the DMA transfer */
+		a4l_mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
+		a4l_mite_dma_arm(devpriv->cdo_mite_chan);
+	} else
+		err = -EIO;
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static void ni_ai_reset(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	ni_release_ai_mite_channel(dev);
+
+	/* ai configuration */
+	devpriv->stc_writew(dev, AI_Configuration_Start | AI_Reset,
+			    Joint_Reset_Register);
+
+	ni_set_bits(dev, Interrupt_A_Enable_Register,
+		    AI_SC_TC_Interrupt_Enable | AI_START1_Interrupt_Enable |
+		    AI_START2_Interrupt_Enable | AI_START_Interrupt_Enable |
+		    AI_STOP_Interrupt_Enable | AI_Error_Interrupt_Enable |
+		    AI_FIFO_Interrupt_Enable, 0);
+
+	ni_clear_ai_fifo(dev);
+
+	if (boardtype.reg_type != ni_reg_6143)
+		ni_writeb(0, Misc_Command);
+
+	devpriv->stc_writew(dev, AI_Disarm, AI_Command_1_Register);	/* reset pulses */
+	devpriv->stc_writew(dev,
+			    AI_Start_Stop | AI_Mode_1_Reserved /*| AI_Trigger_Once */ ,
+			    AI_Mode_1_Register);
+	devpriv->stc_writew(dev, 0x0000, AI_Mode_2_Register);
+	/* generate FIFO interrupts on non-empty */
+	devpriv->stc_writew(dev, (0 << 6) | 0x0000, AI_Mode_3_Register);
+	if (boardtype.reg_type == ni_reg_611x) {
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) |
+				    AI_EXTMUX_CLK_Output_Select(0) |
+				    AI_LOCALMUX_CLK_Output_Select(2) |
+				    AI_SC_TC_Output_Select(3) |
+				    AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_High),
+				    AI_Output_Control_Register);
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) |
+				    AI_EXTMUX_CLK_Output_Select(0) |
+				    AI_LOCALMUX_CLK_Output_Select(2) |
+				    AI_SC_TC_Output_Select(3) |
+				    AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_Low),
+				    AI_Output_Control_Register);
+	} else {
+		unsigned int ai_output_control_bits;
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_CONVERT_Pulse_Width |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		ai_output_control_bits = AI_SCAN_IN_PROG_Output_Select(3) |
+			AI_EXTMUX_CLK_Output_Select(0) |
+			AI_LOCALMUX_CLK_Output_Select(2) |
+			AI_SC_TC_Output_Select(3);
+		if (boardtype.reg_type == ni_reg_622x)
+			ai_output_control_bits |=
+				AI_CONVERT_Output_Select
+				(AI_CONVERT_Output_Enable_High);
+		else
+			ai_output_control_bits |=
+				AI_CONVERT_Output_Select
+				(AI_CONVERT_Output_Enable_Low);
+		devpriv->stc_writew(dev, ai_output_control_bits,
+				    AI_Output_Control_Register);
+	}
+
+	/* the following registers should not be changed, because there
+	 * are no backup registers in devpriv.  If you want to change
+	 * any of these, add a backup register and other appropriate code:
+	 *      AI_Mode_1_Register
+	 *      AI_Mode_3_Register
+	 *      AI_Personal_Register
+	 *      AI_Output_Control_Register
+	 */
+
+	/* clear interrupts */
+	devpriv->stc_writew(dev, AI_SC_TC_Error_Confirm | AI_START_Interrupt_Ack |
+			    AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack |
+			    AI_SC_TC_Interrupt_Ack | AI_Error_Interrupt_Ack |
+			    AI_STOP_Interrupt_Ack, Interrupt_A_Ack_Register);
+
+	devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
+}
+
+static int ni_ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	const unsigned int mask = (1 << boardtype.adbits) - 1;
+	int i, n;
+	unsigned int signbits;
+	unsigned short d;
+	unsigned long dl;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	ni_load_channelgain_list(dev, 1, &insn->chan_desc);
+
+	ni_clear_ai_fifo(dev);
+
+	signbits = devpriv->ai_offset[0];
+	if (boardtype.reg_type == ni_reg_611x) {
+		for (n = 0; n < num_adc_stages_611x; n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			a4l_udelay(1);
+		}
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			/* The 611x has screwy 32-bit FIFOs. */
+			d = 0;
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (ni_readb(XXX_Status) & 0x80) {
+					d = (ni_readl(ADC_FIFO_Data_611x) >> 16)
+						& 0xffff;
+					break;
+				}
+				if (!(devpriv->stc_readw(dev,
+							 AI_Status_1_Register) &
+				      AI_FIFO_Empty_St)) {
+					d = ni_readl(ADC_FIFO_Data_611x) &
+						0xffff;
+					break;
+				}
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in 611x ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			d += signbits;
+			data[n] = d;
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+
+			/* The 6143 has 32-bit FIFOs.
+			   You need to strobe a bit to move a single
+			   16bit stranded sample into the FIFO */
+			dl = 0;
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+					ni_writel(0x01, AIFIFO_Control_6143);	// Get stranded sample into FIFO
+					dl = ni_readl(AIFIFO_Data_6143);
+					break;
+				}
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in 6143 ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
+		}
+	} else {
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (!(devpriv->stc_readw(dev,
+							 AI_Status_1_Register) &
+				      AI_FIFO_Empty_St))
+					break;
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			if (boardtype.reg_type & ni_reg_m_series_mask) {
+				data[n] = ni_readl(M_Offset_AI_FIFO_Data) & mask;
+			} else {
+				d = ni_readw(ADC_FIFO_Data_Register);
+				/* subtle: needs to be short addition */
+				d += signbits;
+				data[n] = d;
+			}
+		}
+	}
+	return 0;
+}
+
+void ni_prime_channelgain_list(struct a4l_device *dev)
+{
+	int i;
+	devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register);
+	for (i = 0; i < NI_TIMEOUT; ++i) {
+		if (!(devpriv->stc_readw(dev,
+					 AI_Status_1_Register) &
+		      AI_FIFO_Empty_St)) {
+			devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
+			return;
+		}
+		a4l_udelay(1);
+	}
+	a4l_warn(dev, "ni_mio_common: timeout loading channel/gain list\n");
+}
+
+static void ni_m_series_load_channelgain_list(struct a4l_device *dev,
+					      unsigned int n_chan,
+					      unsigned int *list)
+{
+	unsigned int chan, range, aref;
+	unsigned int i;
+	unsigned offset;
+	unsigned int dither;
+	unsigned range_code;
+
+	devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
+
+	if ((list[0] & CR_ALT_SOURCE)) {
+		unsigned bypass_bits;
+		chan = CR_CHAN(list[0]);
+		range = CR_RNG(list[0]);
+		range_code = ni_gainlkup[boardtype.gainlkup][range];
+		dither = ((list[0] & CR_ALT_FILTER) != 0);
+		bypass_bits = MSeries_AI_Bypass_Config_FIFO_Bit;
+		bypass_bits |= chan;
+		bypass_bits |=
+			(devpriv->
+			 ai_calib_source) & (MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
+					     MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
+					     MSeries_AI_Bypass_Mode_Mux_Mask |
+					     MSeries_AO_Bypass_AO_Cal_Sel_Mask);
+		bypass_bits |= MSeries_AI_Bypass_Gain_Bits(range_code);
+		if (dither)
+			bypass_bits |= MSeries_AI_Bypass_Dither_Bit;
+		// don't use 2's complement encoding
+		bypass_bits |= MSeries_AI_Bypass_Polarity_Bit;
+		ni_writel(bypass_bits, M_Offset_AI_Config_FIFO_Bypass);
+	} else {
+		ni_writel(0, M_Offset_AI_Config_FIFO_Bypass);
+	}
+	offset = 0;
+	for (i = 0; i < n_chan; i++) {
+		unsigned config_bits = 0;
+		chan = CR_CHAN(list[i]);
+		aref = CR_AREF(list[i]);
+		range = CR_RNG(list[i]);
+		dither = ((list[i] & CR_ALT_FILTER) != 0);
+
+		range_code = ni_gainlkup[boardtype.gainlkup][range];
+		devpriv->ai_offset[i] = offset;
+		switch (aref) {
+		case AREF_DIFF:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Differential_Bits;
+			break;
+		case AREF_COMMON:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Common_Ref_Bits;
+			break;
+		case AREF_GROUND:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Ground_Ref_Bits;
+			break;
+		case AREF_OTHER:
+			break;
+		}
+		config_bits |= MSeries_AI_Config_Channel_Bits(chan);
+		config_bits |=
+			MSeries_AI_Config_Bank_Bits(boardtype.reg_type, chan);
+		config_bits |= MSeries_AI_Config_Gain_Bits(range_code);
+		if (i == n_chan - 1)
+			config_bits |= MSeries_AI_Config_Last_Channel_Bit;
+		if (dither)
+			config_bits |= MSeries_AI_Config_Dither_Bit;
+		// don't use 2's complement encoding
+		config_bits |= MSeries_AI_Config_Polarity_Bit;
+		ni_writew(config_bits, M_Offset_AI_Config_FIFO_Data);
+	}
+	ni_prime_channelgain_list(dev);
+}
+
+/*
+ * Notes on the 6110 and 6111:
+ * These boards a slightly different than the rest of the series, since
+ * they have multiple A/D converters.
+ * From the driver side, the configuration memory is a
+ * little different.
+ * Configuration Memory Low:
+ *   bits 15-9: same
+ *   bit 8: unipolar/bipolar (should be 0 for bipolar)
+ *   bits 0-3: gain.  This is 4 bits instead of 3 for the other boards
+ *       1001 gain=0.1 (+/- 50)
+ *       1010 0.2
+ *       1011 0.1
+ *       0001 1
+ *       0010 2
+ *       0011 5
+ *       0100 10
+ *       0101 20
+ *       0110 50
+ * Configuration Memory High:
+ *   bits 12-14: Channel Type
+ *       001 for differential
+ *       000 for calibration
+ *   bit 11: coupling  (this is not currently handled)
+ *       1 AC coupling
+ *       0 DC coupling
+ *   bits 0-2: channel
+ *       valid channels are 0-3
+ */
+static void ni_load_channelgain_list(struct a4l_device *dev,
+				     unsigned int n_chan, unsigned int *list)
+{
+	unsigned int chan, range, aref;
+	unsigned int i;
+	unsigned int hi, lo;
+	unsigned offset;
+	unsigned int dither;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		ni_m_series_load_channelgain_list(dev, n_chan, list);
+		return;
+	}
+	if (n_chan == 1 && (boardtype.reg_type != ni_reg_611x)
+	    && (boardtype.reg_type != ni_reg_6143)) {
+		if (devpriv->changain_state
+		    && devpriv->changain_spec == list[0]) {
+			/* ready to go. */
+			return;
+		}
+		devpriv->changain_state = 1;
+		devpriv->changain_spec = list[0];
+	} else {
+		devpriv->changain_state = 0;
+	}
+
+	devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
+
+	/* Set up Calibration mode if required */
+	if (boardtype.reg_type == ni_reg_6143) {
+		if ((list[0] & CR_ALT_SOURCE)
+		    && !devpriv->ai_calib_source_enabled) {
+			/* Strobe Relay enable bit */
+			ni_writew(devpriv->
+				  ai_calib_source |
+				  Calibration_Channel_6143_RelayOn,
+				  Calibration_Channel_6143);
+			ni_writew(devpriv->ai_calib_source,
+				  Calibration_Channel_6143);
+			devpriv->ai_calib_source_enabled = 1;
+			/* Allow relays to change */
+			if(rtdm_in_rt_context())
+				rtdm_task_sleep(100*1000000);
+			else
+				msleep_interruptible(100);
+		} else if (!(list[0] & CR_ALT_SOURCE)
+			   && devpriv->ai_calib_source_enabled) {
+			/* Strobe Relay disable bit */
+			ni_writew(devpriv->
+				  ai_calib_source |
+				  Calibration_Channel_6143_RelayOff,
+				  Calibration_Channel_6143);
+			ni_writew(devpriv->ai_calib_source,
+				  Calibration_Channel_6143);
+			devpriv->ai_calib_source_enabled = 0;
+			/* Allow relays to change */
+			if(rtdm_in_rt_context())
+				rtdm_task_sleep(100*1000000);
+			else
+				msleep_interruptible(100);
+		}
+	}
+
+	offset = 1 << (boardtype.adbits - 1);
+	for (i = 0; i < n_chan; i++) {
+		if ((boardtype.reg_type != ni_reg_6143)
+		    && (list[i] & CR_ALT_SOURCE)) {
+			chan = devpriv->ai_calib_source;
+		} else {
+			chan = CR_CHAN(list[i]);
+		}
+		aref = CR_AREF(list[i]);
+		range = CR_RNG(list[i]);
+		dither = ((list[i] & CR_ALT_FILTER) != 0);
+
+		/* fix the external/internal range differences */
+		range = ni_gainlkup[boardtype.gainlkup][range];
+		if (boardtype.reg_type == ni_reg_611x)
+			devpriv->ai_offset[i] = offset;
+		else
+			devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset;
+
+		hi = 0;
+		if ((list[i] & CR_ALT_SOURCE)) {
+			if (boardtype.reg_type == ni_reg_611x)
+				ni_writew(CR_CHAN(list[i]) & 0x0003,
+					  Calibration_Channel_Select_611x);
+		} else {
+			if (boardtype.reg_type == ni_reg_611x)
+				aref = AREF_DIFF;
+			else if (boardtype.reg_type == ni_reg_6143)
+				aref = AREF_OTHER;
+			switch (aref) {
+			case AREF_DIFF:
+				hi |= AI_DIFFERENTIAL;
+				break;
+			case AREF_COMMON:
+				hi |= AI_COMMON;
+				break;
+			case AREF_GROUND:
+				hi |= AI_GROUND;
+				break;
+			case AREF_OTHER:
+				break;
+			}
+		}
+		hi |= AI_CONFIG_CHANNEL(chan);
+
+		ni_writew(hi, Configuration_Memory_High);
+
+		if (boardtype.reg_type != ni_reg_6143) {
+			lo = range;
+			if (i == n_chan - 1)
+				lo |= AI_LAST_CHANNEL;
+			if (dither)
+				lo |= AI_DITHER;
+
+			ni_writew(lo, Configuration_Memory_Low);
+		}
+	}
+
+	/* prime the channel/gain list */
+	if ((boardtype.reg_type != ni_reg_611x)
+	    && (boardtype.reg_type != ni_reg_6143)) {
+		ni_prime_channelgain_list(dev);
+	}
+}
+
+static int ni_ns_to_timer(const struct a4l_device *dev,
+			  unsigned int nanosec, int round_mode)
+{
+	int divider;
+	switch (round_mode) {
+	case TRIG_ROUND_NEAREST:
+	default:
+		divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns;
+		break;
+	case TRIG_ROUND_DOWN:
+		divider = (nanosec) / devpriv->clock_ns;
+		break;
+	case TRIG_ROUND_UP:
+		divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns;
+		break;
+	}
+	return divider - 1;
+}
+
+static unsigned int ni_timer_to_ns(const struct a4l_device *dev, int timer)
+{
+	return devpriv->clock_ns * (timer + 1);
+}
+
+static unsigned int ni_min_ai_scan_period_ns(struct a4l_device *dev,
+					     unsigned int num_channels)
+{
+	switch (boardtype.reg_type) {
+	case ni_reg_611x:
+	case ni_reg_6143:
+		/* simultaneously-sampled inputs */
+		return boardtype.ai_speed;
+		break;
+	default:
+		/* multiplexed inputs */
+		break;
+	};
+	return boardtype.ai_speed * num_channels;
+}
+
+static struct a4l_cmd_desc mio_ai_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT | TRIG_EXT,
+	.scan_begin_src = TRIG_TIMER | TRIG_EXT,
+	.convert_src = TRIG_TIMER | TRIG_EXT | TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+int ni_ai_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (trignum != 0)
+		return -EINVAL;
+
+	devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
+			    AI_Command_2_Register);
+
+	return 1;
+}
+
+#define cfc_check_trigger_arg_is(a,b) __cfc_check_trigger_arg_is(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_is(unsigned int *arg,
+	                                     unsigned int val,
+					     struct a4l_device *dev,
+	                                     unsigned int line)
+{
+	if (*arg != val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) != val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_is_unique(a) __cfc_check_trigger_is_unique(a, dev, __LINE__)
+static inline int __cfc_check_trigger_is_unique(unsigned int src,
+					        struct a4l_device *dev,
+	                                        unsigned int line)
+{
+	/* this test is true if more than one _src bit is set */
+	if ((src & (src - 1)) != 0) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: src (%d) \n", line, src);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_src(a,b) __cfc_check_trigger_src(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_src(unsigned int *src,
+	                                  unsigned int flags,
+					  struct a4l_device *dev,
+	                                  unsigned int line)
+{
+	unsigned int orig_src = *src;
+
+	*src = orig_src & flags;
+	if (*src == 0 || *src != orig_src){
+		a4l_dbg(1, drv_dbg, dev, "line %d: *src (%d)  orig_src (%d) flags(%d) \n",
+			line, *src, orig_src, flags);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define cfc_check_trigger_arg_min(a,b) __cfc_check_trigger_arg_min(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_min(unsigned int *arg,
+					      unsigned int val,
+					      struct a4l_device *dev,
+	                                      unsigned int line)
+{
+	if (*arg < val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) < val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_arg_max(a,b) __cfc_check_trigger_arg_max(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_max(unsigned int *arg,
+					      unsigned int val,
+					      struct a4l_device *dev,
+	                                      unsigned int line)
+{
+	if (*arg > val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) > val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ni_ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int sources;
+	int tmp, err = 0;
+
+	/* Step 1 : check if triggers are trivially valid */
+	err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT | TRIG_EXT);
+	err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT);
+
+	sources = TRIG_TIMER | TRIG_EXT;
+	if (boardtype.reg_type == ni_reg_611x || boardtype.reg_type == ni_reg_6143)
+		sources |= TRIG_NOW;
+
+	err |= cfc_check_trigger_src(&cmd->convert_src, sources);
+	err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+	err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(1))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 1 \n");
+		return -EINVAL;
+	}
+
+	/* Step 2a : make sure trigger sources are unique */
+	err |= cfc_check_trigger_is_unique(cmd->start_src);
+	err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
+	err |= cfc_check_trigger_is_unique(cmd->convert_src);
+	err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+	/* Step 2b : and mutually compatible */
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(2))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 2 \n");
+		return -EINVAL;
+	}
+
+	/* Step 3: check if arguments are trivially valid */
+
+	if (cmd->start_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->start_arg);
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
+		err |= cfc_check_trigger_arg_is(&cmd->start_arg, tmp);
+
+	} else {
+		/* true for both TRIG_NOW and TRIG_INT */
+		err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+	}
+
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
+			ni_min_ai_scan_period_ns(dev, cmd->nb_chan));
+
+		err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
+						 devpriv->clock_ns * 0xffffff);
+	} else if (cmd->scan_begin_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->scan_begin_arg);
+
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE));
+		err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
+
+	} else {		/* TRIG_OTHER */
+		err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+
+	}
+
+	if (cmd->convert_src == TRIG_TIMER) {
+		if ((boardtype.reg_type == ni_reg_611x)
+		    || (boardtype.reg_type == ni_reg_6143)) {
+			err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+
+		} else {
+			err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
+							 boardtype.ai_speed);
+			err |= cfc_check_trigger_arg_max(&cmd->convert_arg,
+						devpriv->clock_ns * 0xffff);
+		}
+	} else if (cmd->convert_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->convert_arg);
+
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT));
+		err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp);
+	} else if (cmd->convert_src == TRIG_NOW) {
+		err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+	}
+
+	err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->nb_chan);
+
+	if (cmd->stop_src == TRIG_COUNT) {
+		unsigned int max_count = 0x01000000;
+
+		if (boardtype.reg_type == ni_reg_611x)
+			max_count -= num_adc_stages_611x;
+		err |= cfc_check_trigger_arg_max(&cmd->stop_arg, max_count);
+		err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+
+	} else {
+		/* TRIG_NONE */
+		err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+	}
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(3))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 3 \n");
+		return 3;
+	}
+
+	/* step 4: fix up any arguments */
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		tmp = cmd->scan_begin_arg;
+		cmd->scan_begin_arg =
+		    ni_timer_to_ns(dev, ni_ns_to_timer(dev,
+						       cmd->scan_begin_arg,
+						       cmd->flags &
+						       TRIG_ROUND_MASK));
+		if (tmp != cmd->scan_begin_arg)
+			err++;
+	}
+	if (cmd->convert_src == TRIG_TIMER) {
+		if ((boardtype.reg_type != ni_reg_611x)
+		    && (boardtype.reg_type != ni_reg_6143)) {
+			tmp = cmd->convert_arg;
+			cmd->convert_arg =
+			    ni_timer_to_ns(dev, ni_ns_to_timer(dev,
+							       cmd->convert_arg,
+							       cmd->
+							       flags &
+							       TRIG_ROUND_MASK));
+			if (tmp != cmd->convert_arg)
+				err++;
+			if (cmd->scan_begin_src == TRIG_TIMER &&
+			    cmd->scan_begin_arg <
+			    cmd->convert_arg * cmd->scan_end_arg) {
+				cmd->scan_begin_arg =
+				    cmd->convert_arg * cmd->scan_end_arg;
+				err++;
+			}
+		}
+	}
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(4))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 4 \n");
+		return -EINVAL;
+	}
+
+	return 0;
+
+
+}
+
+static int ni_ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	int timer;
+	int mode1 = 0;		/* mode1 is needed for both stop and convert */
+	int mode2 = 0;
+	int start_stop_select = 0;
+	unsigned int stop_count;
+	int interrupt_a_enable = 0;
+
+	a4l_info(dev, "start\n");
+
+	if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) {
+		a4l_err(dev, "ni_ai_cmd: cannot run command without an irq");
+		return -EIO;
+	}
+	ni_clear_ai_fifo(dev);
+
+	ni_load_channelgain_list(dev, cmd->nb_chan, cmd->chan_descs);
+
+	/* start configuration */
+	devpriv->stc_writew(dev, AI_Configuration_Start, Joint_Reset_Register);
+
+	/* disable analog triggering for now, since it
+	 * interferes with the use of pfi0 */
+	devpriv->an_trig_etc_reg &= ~Analog_Trigger_Enable;
+	devpriv->stc_writew(dev, devpriv->an_trig_etc_reg,
+			    Analog_Trigger_Etc_Register);
+
+	switch (cmd->start_src) {
+	case TRIG_INT:
+	case TRIG_NOW:
+		devpriv->stc_writew(dev, AI_START2_Select(0) |
+				    AI_START1_Sync | AI_START1_Edge | AI_START1_Select(0),
+				    AI_Trigger_Select_Register);
+		break;
+	case TRIG_EXT:
+	{
+		int chan = CR_CHAN(cmd->start_arg);
+		unsigned int bits = AI_START2_Select(0) |
+			AI_START1_Sync | AI_START1_Select(chan + 1);
+
+		if (cmd->start_arg & CR_INVERT)
+			bits |= AI_START1_Polarity;
+		if (cmd->start_arg & CR_EDGE)
+			bits |= AI_START1_Edge;
+		devpriv->stc_writew(dev, bits,
+				    AI_Trigger_Select_Register);
+		break;
+	}
+	}
+
+	mode2 &= ~AI_Pre_Trigger;
+	mode2 &= ~AI_SC_Initial_Load_Source;
+	mode2 &= ~AI_SC_Reload_Mode;
+	devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+	if (cmd->nb_chan == 1 || (boardtype.reg_type == ni_reg_611x)
+	    || (boardtype.reg_type == ni_reg_6143)) {
+		start_stop_select |= AI_STOP_Polarity;
+		start_stop_select |= AI_STOP_Select(31);/* logic low */
+		start_stop_select |= AI_STOP_Sync;
+	} else {
+		start_stop_select |= AI_STOP_Select(19);/* ai configuration memory */
+	}
+	devpriv->stc_writew(dev, start_stop_select,
+			    AI_START_STOP_Select_Register);
+
+	devpriv->ai_cmd2 = 0;
+	switch (cmd->stop_src) {
+	case TRIG_COUNT:
+		stop_count = cmd->stop_arg - 1;
+
+		if (boardtype.reg_type == ni_reg_611x) {
+			/* have to take 3 stage adc pipeline into account */
+			stop_count += num_adc_stages_611x;
+		}
+		/* stage number of scans */
+		devpriv->stc_writel(dev, stop_count, AI_SC_Load_A_Registers);
+
+		mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Trigger_Once;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+		/* load SC (Scan Count) */
+		devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
+
+		devpriv->ai_continuous = 0;
+		if (stop_count == 0) {
+			devpriv->ai_cmd2 |= AI_End_On_End_Of_Scan;
+			interrupt_a_enable |= AI_STOP_Interrupt_Enable;
+			/* this is required to get the last sample
+			   for nb_chan > 1, not sure why */
+			if (cmd->nb_chan > 1)
+				start_stop_select |=
+					AI_STOP_Polarity | AI_STOP_Edge;
+		}
+		break;
+	case TRIG_NONE:
+		/* stage number of scans */
+		devpriv->stc_writel(dev, 0, AI_SC_Load_A_Registers);
+
+		mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Continuous;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+
+		/* load SC (Scan Count) */
+		devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
+
+		devpriv->ai_continuous = 1;
+
+		break;
+	}
+
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		/*
+		  stop bits for non 611x boards
+		  AI_SI_Special_Trigger_Delay=0
+		  AI_Pre_Trigger=0
+		  AI_START_STOP_Select_Register:
+		  AI_START_Polarity=0 (?)      rising edge
+		  AI_START_Edge=1              edge triggered
+		  AI_START_Sync=1 (?)
+		  AI_START_Select=0            SI_TC
+		  AI_STOP_Polarity=0           rising edge
+		  AI_STOP_Edge=0               level
+		  AI_STOP_Sync=1
+		  AI_STOP_Select=19            external pin (configuration mem)
+		*/
+		start_stop_select |= AI_START_Edge | AI_START_Sync;
+		devpriv->stc_writew(dev, start_stop_select,
+				    AI_START_STOP_Select_Register);
+
+		mode2 |= AI_SI_Reload_Mode(0);
+		/* AI_SI_Initial_Load_Source=A */
+		mode2 &= ~AI_SI_Initial_Load_Source;
+
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		/* load SI */
+		timer = ni_ns_to_timer(dev, cmd->scan_begin_arg,
+				       TRIG_ROUND_NEAREST);
+		devpriv->stc_writel(dev, timer, AI_SI_Load_A_Registers);
+		devpriv->stc_writew(dev, AI_SI_Load, AI_Command_1_Register);
+		break;
+	case TRIG_EXT:
+		if (cmd->scan_begin_arg & CR_EDGE)
+			start_stop_select |= AI_START_Edge;
+		/* AI_START_Polarity==1 is falling edge */
+		if (cmd->scan_begin_arg & CR_INVERT)
+			start_stop_select |= AI_START_Polarity;
+		if (cmd->scan_begin_src != cmd->convert_src ||
+		    (cmd->scan_begin_arg & ~CR_EDGE) !=
+		    (cmd->convert_arg & ~CR_EDGE))
+			start_stop_select |= AI_START_Sync;
+		start_stop_select |=
+			AI_START_Select(1 + CR_CHAN(cmd->scan_begin_arg));
+		devpriv->stc_writew(dev, start_stop_select,
+				    AI_START_STOP_Select_Register);
+		break;
+	}
+
+	switch (cmd->convert_src) {
+	case TRIG_TIMER:
+	case TRIG_NOW:
+		if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW)
+			timer = 1;
+		else
+			timer = ni_ns_to_timer(dev, cmd->convert_arg,
+					       TRIG_ROUND_NEAREST);
+		devpriv->stc_writew(dev, 1, AI_SI2_Load_A_Register);	/* 0,0 does not work. */
+		devpriv->stc_writew(dev, timer, AI_SI2_Load_B_Register);
+
+		/* AI_SI2_Reload_Mode = alternate */
+		/* AI_SI2_Initial_Load_Source = A */
+		mode2 &= ~AI_SI2_Initial_Load_Source;
+		mode2 |= AI_SI2_Reload_Mode;
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		/* AI_SI2_Load */
+		devpriv->stc_writew(dev, AI_SI2_Load, AI_Command_1_Register);
+
+		mode2 |= AI_SI2_Reload_Mode; /* alternate */
+		mode2 |= AI_SI2_Initial_Load_Source; /* B */
+
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+		break;
+	case TRIG_EXT:
+		mode1 |= AI_CONVERT_Source_Select(1 + cmd->convert_arg);
+		if ((cmd->convert_arg & CR_INVERT) == 0)
+			mode1 |= AI_CONVERT_Source_Polarity;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+
+		mode2 |= AI_Start_Stop_Gate_Enable | AI_SC_Gate_Enable;
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		break;
+	}
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+
+		/* interrupt on FIFO, errors, SC_TC */
+		interrupt_a_enable |= AI_Error_Interrupt_Enable |
+			AI_SC_TC_Interrupt_Enable;
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		interrupt_a_enable |= AI_FIFO_Interrupt_Enable;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		if (cmd->flags & TRIG_WAKE_EOS
+		    || (devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+			/* wake on end-of-scan */
+			devpriv->aimode = AIMODE_SCAN;
+		} else {
+			devpriv->aimode = AIMODE_HALF_FULL;
+		}
+
+		switch (devpriv->aimode) {
+		case AIMODE_HALF_FULL:
+			/* generate FIFO interrupts and DMA requests on half-full */
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF_to_E,
+					    AI_Mode_3_Register);
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
+					    AI_Mode_3_Register);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			break;
+		case AIMODE_SAMPLE:
+			/* generate FIFO interrupts on non-empty */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
+					    AI_Mode_3_Register);
+			break;
+		case AIMODE_SCAN:
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+			devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
+					    AI_Mode_3_Register);
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
+					    AI_Mode_3_Register);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			interrupt_a_enable |= AI_STOP_Interrupt_Enable;
+			break;
+		default:
+			break;
+		}
+
+		/* Clear interrupts */
+		devpriv->stc_writew(dev,
+				    AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack |
+				    AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack |
+				    AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack |
+				    AI_SC_TC_Error_Confirm, Interrupt_A_Ack_Register);	/* clear interrupts */
+
+		ni_set_bits(dev, Interrupt_A_Enable_Register,
+			    interrupt_a_enable, 1);
+
+		a4l_info(dev, "Interrupt_A_Enable_Register = 0x%04x\n",
+			 devpriv->int_a_enable_reg);
+	} else {
+		/* interrupt on nothing */
+		ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0);
+
+		/* XXX start polling if necessary */
+		a4l_warn(dev, "ni_ai_cmd: interrupting on nothing\n");
+	}
+
+	/* end configuration */
+	devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
+
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		devpriv->stc_writew(dev,
+				    AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm,
+				    AI_Command_1_Register);
+		break;
+	case TRIG_EXT:
+		/* XXX AI_SI_Arm? */
+		devpriv->stc_writew(dev,
+				    AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm,
+				    AI_Command_1_Register);
+		break;
+	}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	{
+		int retval = ni_ai_setup_MITE_dma(subd);
+		if (retval)
+			return retval;
+	}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	switch (cmd->start_src) {
+	case TRIG_NOW:
+		/* AI_START1_Pulse */
+		devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
+				    AI_Command_2_Register);
+		break;
+	case TRIG_EXT:
+		/* TODO: set trigger callback field to NULL */
+		break;
+	case TRIG_INT:
+		/* TODO: set trigger callback field to ni_ai_inttrig */
+		break;
+	}
+
+	a4l_info(dev, "exit\n");
+
+	return 0;
+}
+
+int ni_ai_config_analog_trig(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int a, b, modebits;
+	int err = 0;
+	uint32_t *data = (uint32_t *)insn->data;
+
+	/* data[1] is flags
+	 * data[2] is analog line
+	 * data[3] is set level
+	 * data[4] is reset level */
+	if (!boardtype.has_analog_trig)
+		return -EINVAL;
+
+	if ((data[1] & 0xffff0000) != A4L_EV_SCAN_BEGIN) {
+		data[1] &= (A4L_EV_SCAN_BEGIN | 0xffff);
+		err++;
+	}
+	if (data[2] >= boardtype.n_adchan) {
+		data[2] = boardtype.n_adchan - 1;
+		err++;
+	}
+	if (data[3] > 255) {	/* a */
+		data[3] = 255;
+		err++;
+	}
+	if (data[4] > 255) {	/* b */
+		data[4] = 255;
+		err++;
+	}
+	/*
+	 * 00 ignore
+	 * 01 set
+	 * 10 reset
+	 *
+	 * modes:
+	 *   1 level:                    +b-   +a-
+	 *     high mode                00 00 01 10
+	 *     low mode                 00 00 10 01
+	 *   2 level: (a<b)
+	 *     hysteresis low mode      10 00 00 01
+	 *     hysteresis high mode     01 00 00 10
+	 *     middle mode              10 01 01 10
+	 */
+
+	a = data[3];
+	b = data[4];
+	modebits = data[1] & 0xff;
+	if (modebits & 0xf0) {
+		/* two level mode */
+		if (b < a) {
+			/* swap order */
+			a = data[4];
+			b = data[3];
+			modebits = ((data[1] & 0xf) << 4) |
+				((data[1] & 0xf0) >> 4);
+		}
+		devpriv->atrig_low = a;
+		devpriv->atrig_high = b;
+		switch (modebits) {
+		case 0x81:	/* low hysteresis mode */
+			devpriv->atrig_mode = 6;
+			break;
+		case 0x42:	/* high hysteresis mode */
+			devpriv->atrig_mode = 3;
+			break;
+		case 0x96:	/* middle window mode */
+			devpriv->atrig_mode = 2;
+			break;
+		default:
+			data[1] &= ~0xff;
+			err++;
+		}
+	} else {
+		/* one level mode */
+		if (b != 0) {
+			data[4] = 0;
+			err++;
+		}
+		switch (modebits) {
+		case 0x06:	/* high window mode */
+			devpriv->atrig_high = a;
+			devpriv->atrig_mode = 0;
+			break;
+		case 0x09:	/* low window mode */
+			devpriv->atrig_low = a;
+			devpriv->atrig_mode = 1;
+			break;
+		default:
+			data[1] &= ~0xff;
+			err++;
+		}
+	}
+
+	if (err)
+		return -EAGAIN;
+
+	return 0;
+}
+
+int ni_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_ANALOG_TRIG:
+		return ni_ai_config_analog_trig(subd, insn);
+	case A4L_INSN_CONFIG_ALT_SOURCE:
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			if (data[1] & ~(MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
+					MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
+					MSeries_AI_Bypass_Mode_Mux_Mask |
+					MSeries_AO_Bypass_AO_Cal_Sel_Mask)) {
+				return -EINVAL;
+			}
+			devpriv->ai_calib_source = data[1];
+		} else if (boardtype.reg_type == ni_reg_6143) {
+			unsigned int calib_source;
+
+			calib_source = data[1] & 0xf;
+
+			if (calib_source > 0xF)
+				return -EINVAL;
+
+			devpriv->ai_calib_source = calib_source;
+			ni_writew(calib_source, Calibration_Channel_6143);
+		} else {
+			unsigned int calib_source;
+			unsigned int calib_source_adjust;
+
+			calib_source = data[1] & 0xf;
+			calib_source_adjust = (data[1] >> 4) & 0xff;
+
+			if (calib_source >= 8)
+				return -EINVAL;
+			devpriv->ai_calib_source = calib_source;
+			if (boardtype.reg_type == ni_reg_611x) {
+				ni_writeb(calib_source_adjust,
+					  Cal_Gain_Select_611x);
+			}
+		}
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+/* munge data from unsigned to 2's complement for analog output bipolar modes */
+static void ni_ao_munge(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	uint16_t *array = buf;
+	unsigned int i, range, offset;
+
+	offset = 1 << (boardtype.aobits - 1);
+	for (i = 0; i < size / sizeof(uint16_t); i++) {
+
+		range = CR_RNG(cmd->chan_descs[chan_idx]);
+		if (boardtype.ao_unipolar == 0 || (range & 1) == 0)
+			array[i] -= offset;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		array[i] = cpu_to_le16(array[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+static int ni_m_series_ao_config_chan_descs(struct a4l_subdevice *subd,
+					    unsigned int chanspec[],
+					    unsigned int n_chans, int timed)
+{
+	unsigned int range;
+	unsigned int chan;
+	unsigned int conf;
+	int i, invert = 0;
+	struct a4l_device *dev = subd->dev;
+
+	for (i = 0; i < boardtype.n_aochan; ++i) {
+		ni_writeb(0xf, M_Offset_AO_Waveform_Order(i));
+	}
+	for (i = 0; i < n_chans; i++) {
+		struct a4l_range *rng;
+		int idx;
+		chan = CR_CHAN(chanspec[i]);
+		range = CR_RNG(chanspec[i]);
+
+		/* TODO: this a huge hack!
+		   Something is missing in the kernel API. We must
+		   allow access on the proper range descriptor */
+		idx =  (subd->rng_desc->mode !=
+			A4L_RNG_GLOBAL_RNGDESC) ? chan : 0;
+		rng = &(subd->rng_desc->rngtabs[idx]->rngs[range]);
+
+		invert = 0;
+		conf = 0;
+		switch (rng->max - rng->min) {
+		case 20000000:
+			conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
+			ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 10000000:
+			conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
+			ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 4000000:
+			conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
+			ni_writeb(MSeries_Attenuate_x5_Bit,
+				  M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 2000000:
+			conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
+			ni_writeb(MSeries_Attenuate_x5_Bit,
+				  M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		default:
+			a4l_err(subd->dev,
+				"%s: bug! unhandled ao reference voltage\n",
+				__FUNCTION__);
+			break;
+		}
+		switch (rng->max + rng->min) {
+		case 0:
+			conf |= MSeries_AO_DAC_Offset_0V_Bits;
+			break;
+		case 10000000:
+			conf |= MSeries_AO_DAC_Offset_5V_Bits;
+			break;
+		default:
+			a4l_err(subd->dev,
+				"%s: bug! unhandled ao offset voltage\n",
+				__FUNCTION__);
+			break;
+		}
+		if (timed)
+			conf |= MSeries_AO_Update_Timed_Bit;
+		ni_writeb(conf, M_Offset_AO_Config_Bank(chan));
+		devpriv->ao_conf[chan] = conf;
+		ni_writeb(i, M_Offset_AO_Waveform_Order(chan));
+	}
+	return invert;
+}
+
+static int ni_old_ao_config_chan_descs(struct a4l_subdevice *subd,
+				       unsigned int chanspec[],
+				       unsigned int n_chans)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int range;
+	unsigned int chan;
+	unsigned int conf;
+	int i, invert = 0;
+
+	for (i = 0; i < n_chans; i++) {
+		chan = CR_CHAN(chanspec[i]);
+		range = CR_RNG(chanspec[i]);
+		conf = AO_Channel(chan);
+
+		if (boardtype.ao_unipolar) {
+			if ((range & 1) == 0) {
+				conf |= AO_Bipolar;
+				invert = (1 << (boardtype.aobits - 1));
+			} else {
+				invert = 0;
+			}
+			if (range & 2)
+				conf |= AO_Ext_Ref;
+		} else {
+			conf |= AO_Bipolar;
+			invert = (1 << (boardtype.aobits - 1));
+		}
+
+		/* not all boards can deglitch, but this shouldn't hurt */
+		if (chanspec[i] & CR_DEGLITCH)
+			conf |= AO_Deglitch;
+
+		/* analog reference */
+		/* AREF_OTHER connects AO ground to AI ground, i think */
+		conf |= (CR_AREF(chanspec[i]) ==
+			 AREF_OTHER) ? AO_Ground_Ref : 0;
+
+		ni_writew(conf, AO_Configuration);
+		devpriv->ao_conf[chan] = conf;
+	}
+	return invert;
+}
+
+static int ni_ao_config_chan_descs(struct a4l_subdevice *subd,
+				   unsigned int chanspec[],
+				   unsigned int n_chans, int timed)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_ao_config_chan_descs(subd,
+							chanspec,
+							n_chans, timed);
+	else
+		return ni_old_ao_config_chan_descs(subd, chanspec, n_chans);
+}
+
+int ni_ao_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	data[0] = devpriv->ao[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+int ni_ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan = CR_CHAN(insn->chan_desc);
+	uint16_t *data = (uint16_t *)insn->data;
+	unsigned int invert;
+
+	invert = ni_ao_config_chan_descs(subd,
+					 &insn->chan_desc, 1, 0);
+
+	devpriv->ao[chan] = data[0];
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		ni_writew(data[0], M_Offset_DAC_Direct_Data(chan));
+	} else
+		ni_writew(data[0] ^ invert,
+			  (chan) ? DAC1_Direct_Data : DAC0_Direct_Data);
+
+	return 0;
+}
+
+int ni_ao_insn_write_671x(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan = CR_CHAN(insn->chan_desc);
+	uint16_t *data = (uint16_t *)insn->data;
+	unsigned int invert;
+
+	ao_win_out(1 << chan, AO_Immediate_671x);
+	invert = 1 << (boardtype.aobits - 1);
+
+	ni_ao_config_chan_descs(subd, &insn->chan_desc, 1, 0);
+
+	devpriv->ao[chan] = data[0];
+	ao_win_out(data[0] ^ invert, DACx_Direct_Data_671x(chan));
+
+	return 0;
+}
+
+int ni_ao_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+	int ret, interrupt_b_bits, i;
+	static const int timeout = 1000;
+
+	if (trignum != 0)
+		return -EINVAL;
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	ni_set_bits(dev, Interrupt_B_Enable_Register,
+		    AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0);
+	interrupt_b_bits = AO_Error_Interrupt_Enable;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
+	if (boardtype.reg_type & ni_reg_6xxx_mask)
+		ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
+	ret = ni_ao_setup_MITE_dma(subd);
+	if (ret)
+		return ret;
+	ret = ni_ao_wait_for_dma_load(subd);
+	if (ret < 0)
+		return ret;
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	ret = ni_ao_prep_fifo(subd);
+	if (ret == 0)
+		return -EPIPE;
+
+	interrupt_b_bits |= AO_FIFO_Interrupt_Enable;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	devpriv->stc_writew(dev, devpriv->ao_mode3 | AO_Not_An_UPDATE,
+			    AO_Mode_3_Register);
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+	/* wait for DACs to be loaded */
+	for (i = 0; i < timeout; i++) {
+		a4l_udelay(1);
+		if ((devpriv->stc_readw(dev,Joint_Status_2_Register) &
+		     AO_TMRDACWRs_In_Progress_St) == 0)
+			break;
+	}
+	if (i == timeout) {
+		a4l_err(dev,
+			"ni_ao_inttrig: timed out "
+			"waiting for AO_TMRDACWRs_In_Progress_St to clear");
+		return -EIO;
+	}
+	/* stc manual says we are need to clear error interrupt after
+	   AO_TMRDACWRs_In_Progress_St clears */
+	devpriv->stc_writew(dev, AO_Error_Interrupt_Ack,
+			    Interrupt_B_Ack_Register);
+
+	ni_set_bits(dev, Interrupt_B_Enable_Register, interrupt_b_bits, 1);
+
+	devpriv->stc_writew(dev,
+			    devpriv->ao_cmd1 |
+			    AO_UI_Arm | AO_UC_Arm |
+			    AO_BC_Arm | AO_DAC1_Update_Mode |
+			    AO_DAC0_Update_Mode,
+			    AO_Command_1_Register);
+
+	devpriv->stc_writew(dev,
+			    devpriv->ao_cmd2 | AO_START1_Pulse,
+			    AO_Command_2_Register);
+
+	return 0;
+}
+
+int ni_ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	int bits;
+	int i;
+	unsigned trigvar;
+
+	if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) {
+		a4l_err(dev, "ni_ao_cmd: cannot run command without an irq");
+		return -EIO;
+	}
+
+	devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
+
+	devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
+
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ao_win_out(CLEAR_WG, AO_Misc_611x);
+
+		bits = 0;
+		for (i = 0; i < cmd->nb_chan; i++) {
+			int chan;
+
+			chan = CR_CHAN(cmd->chan_descs[i]);
+			bits |= 1 << chan;
+			ao_win_out(chan, AO_Waveform_Generation_611x);
+		}
+		ao_win_out(bits, AO_Timed_611x);
+	}
+
+	ni_ao_config_chan_descs(subd, cmd->chan_descs, cmd->nb_chan, 1);
+
+	if (cmd->stop_src == TRIG_NONE) {
+		devpriv->ao_mode1 |= AO_Continuous;
+		devpriv->ao_mode1 &= ~AO_Trigger_Once;
+	} else {
+		devpriv->ao_mode1 &= ~AO_Continuous;
+		devpriv->ao_mode1 |= AO_Trigger_Once;
+	}
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_trigger_select &=
+		~(AO_START1_Polarity | AO_START1_Select(-1));
+	devpriv->ao_trigger_select |= AO_START1_Edge | AO_START1_Sync;
+	devpriv->stc_writew(dev, devpriv->ao_trigger_select,
+			    AO_Trigger_Select_Register);
+	devpriv->ao_mode3 &= ~AO_Trigger_Length;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	if (cmd->stop_src == TRIG_NONE) {
+		devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register);
+	} else {
+		devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register);
+	}
+	devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register);
+	devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	switch (cmd->stop_src) {
+	case TRIG_COUNT:
+		devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, cmd->stop_arg - 1,
+				    AO_UC_Load_A_Register);
+		break;
+	case TRIG_NONE:
+		devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
+		break;
+	default:
+		devpriv->stc_writel(dev, 0, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
+	}
+
+	devpriv->ao_mode1 &=
+		~(AO_UI_Source_Select(0x1f) | AO_UI_Source_Polarity |
+		  AO_UPDATE_Source_Select(0x1f) | AO_UPDATE_Source_Polarity);
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		devpriv->ao_cmd2 &= ~AO_BC_Gate_Enable;
+		trigvar =
+			ni_ns_to_timer(dev, cmd->scan_begin_arg,
+				       TRIG_ROUND_NEAREST);
+		devpriv->stc_writel(dev, 1, AO_UI_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UI_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, trigvar, AO_UI_Load_A_Register);
+		break;
+	case TRIG_EXT:
+		devpriv->ao_mode1 |=
+			AO_UPDATE_Source_Select(cmd->scan_begin_arg);
+		if (cmd->scan_begin_arg & CR_INVERT)
+			devpriv->ao_mode1 |= AO_UPDATE_Source_Polarity;
+		devpriv->ao_cmd2 |= AO_BC_Gate_Enable;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 &=
+		~(AO_UI_Reload_Mode(3) | AO_UI_Initial_Load_Source);
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+
+	if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
+		if (cmd->scan_end_arg > 1) {
+			devpriv->ao_mode1 |= AO_Multiple_Channels;
+			devpriv->stc_writew(dev,
+					    AO_Number_Of_Channels(cmd->scan_end_arg - 1) |
+					    AO_UPDATE_Output_Select
+					    (AO_Update_Output_High_Z),
+					    AO_Output_Control_Register);
+		} else {
+			unsigned int bits;
+			devpriv->ao_mode1 &= ~AO_Multiple_Channels;
+			bits = AO_UPDATE_Output_Select(AO_Update_Output_High_Z);
+			if (boardtype.reg_type & ni_reg_m_series_mask) {
+				bits |= AO_Number_Of_Channels(0);
+			} else {
+				bits |= AO_Number_Of_Channels(CR_CHAN(cmd->
+								      chan_descs[0]));
+			}
+			devpriv->stc_writew(dev, bits,
+					    AO_Output_Control_Register);
+		}
+		devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	}
+
+	devpriv->stc_writew(dev, AO_DAC0_Update_Mode | AO_DAC1_Update_Mode,
+			    AO_Command_1_Register);
+
+	devpriv->ao_mode3 |= AO_Stop_On_Overrun_Error;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+
+	devpriv->ao_mode2 &= ~AO_FIFO_Mode_Mask;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	devpriv->ao_mode2 |= AO_FIFO_Mode_HF_to_F;
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	devpriv->ao_mode2 |= AO_FIFO_Mode_HF;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	devpriv->ao_mode2 &= ~AO_FIFO_Retransmit_Enable;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+
+	bits = AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
+		AO_TMRDACWR_Pulse_Width;
+	if (boardtype.ao_fifo_depth)
+		bits |= AO_FIFO_Enable;
+	else
+		bits |= AO_DMA_PIO_Control;
+#if 0
+	/* F Hess: windows driver does not set AO_Number_Of_DAC_Packages bit for 6281,
+	   verified with bus analyzer. */
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		bits |= AO_Number_Of_DAC_Packages;
+#endif
+	devpriv->stc_writew(dev, bits, AO_Personal_Register);
+	/* enable sending of ao dma requests */
+	devpriv->stc_writew(dev, AO_AOFREQ_Enable, AO_Start_Select_Register);
+
+	devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
+
+	if (cmd->stop_src == TRIG_COUNT) {
+		devpriv->stc_writew(dev, AO_BC_TC_Interrupt_Ack,
+				    Interrupt_B_Ack_Register);
+		ni_set_bits(dev, Interrupt_B_Enable_Register,
+			    AO_BC_TC_Interrupt_Enable, 1);
+	}
+
+	return 0;
+}
+
+struct a4l_cmd_desc mio_ao_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_INT,
+	.scan_begin_src = TRIG_TIMER | TRIG_EXT,
+	.convert_src = TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+int ni_ao_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	/* Make sure trigger sources are unique and mutually compatible */
+
+	if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
+		return -EINVAL;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		if (cmd->scan_begin_arg < boardtype.ao_speed) {
+			cmd->scan_begin_arg = boardtype.ao_speed;
+			return -EINVAL;
+		}
+		if (cmd->scan_begin_arg > devpriv->clock_ns * 0xffffff) {
+			/* XXX check */
+			cmd->scan_begin_arg = devpriv->clock_ns * 0xffffff;
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		cmd->scan_end_arg = cmd->nb_chan;
+		return -EINVAL;
+	}
+	if (cmd->stop_src == TRIG_COUNT) {
+		/* XXX check */
+		if (cmd->stop_arg > 0x00ffffff) {
+			cmd->stop_arg = 0x00ffffff;
+			return -EINVAL;
+		}
+	} else {
+		/* TRIG_NONE */
+		if (cmd->stop_arg != 0) {
+			cmd->stop_arg = 0;
+			return -EINVAL;
+		}
+	}
+
+	/* step 4: fix up any arguments */
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+
+		if(cmd->scan_begin_arg !=
+		   ni_timer_to_ns(dev,
+				  ni_ns_to_timer(dev,
+						 cmd->scan_begin_arg,
+						 cmd->flags & TRIG_ROUND_MASK)))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+void ni_ao_reset(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	ni_release_ao_mite_channel(dev);
+
+	devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
+	devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
+	ni_set_bits(dev, Interrupt_B_Enable_Register, ~0, 0);
+	devpriv->stc_writew(dev, AO_BC_Source_Select, AO_Personal_Register);
+	devpriv->stc_writew(dev, 0x3f98, Interrupt_B_Ack_Register);
+	devpriv->stc_writew(dev, AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
+			    AO_TMRDACWR_Pulse_Width, AO_Personal_Register);
+	devpriv->stc_writew(dev, 0, AO_Output_Control_Register);
+	devpriv->stc_writew(dev, 0, AO_Start_Select_Register);
+	devpriv->ao_cmd1 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_cmd1, AO_Command_1_Register);
+	devpriv->ao_cmd2 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
+	devpriv->ao_mode1 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		devpriv->ao_mode3 = AO_Last_Gate_Disable;
+	else
+		devpriv->ao_mode3 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+	devpriv->ao_trigger_select = 0;
+	devpriv->stc_writew(dev, devpriv->ao_trigger_select,
+			    AO_Trigger_Select_Register);
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ao_win_out(0x3, AO_Immediate_671x);
+		ao_win_out(CLEAR_WG, AO_Misc_611x);
+	}
+	devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
+}
+
+/* digital io */
+
+int ni_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]);
+#endif /* CONFIG_DEBUG_DIO */
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc));
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bits &
+			   (1 << CR_CHAN(insn->chan_desc))) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	devpriv->dio_control &= ~DIO_Pins_Dir_Mask;
+	devpriv->dio_control |= DIO_Pins_Dir(devpriv->io_bits);
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+
+	return 1;
+}
+
+int ni_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		/* Perform check to make sure we're not using the
+		   serial part of the dio */
+		if ((data[0] & (DIO_SDIN | DIO_SDOUT))
+		    && devpriv->serial_interval_ns)
+			return -EBUSY;
+
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		devpriv->dio_output &= ~DIO_Parallel_Data_Mask;
+		devpriv->dio_output |=
+			DIO_Parallel_Data_Out(devpriv->dio_state);
+		devpriv->stc_writew(dev, devpriv->dio_output,
+				    DIO_Output_Register);
+	}
+
+	data[1] = (uint8_t)
+		devpriv->stc_readw(dev, DIO_Parallel_Input_Register);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]);
+#endif
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc));
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bits &
+			   (1 << CR_CHAN(insn->chan_desc))) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ni_writel(devpriv->io_bits, M_Offset_DIO_Direction);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_bits_8(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output);
+	}
+
+	data[1] = (uint8_t) ni_readl(M_Offset_Static_Digital_Input);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_bits_32(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint32_t *data = (uint32_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint32_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output);
+	}
+
+	data[1] = ni_readl(M_Offset_Static_Digital_Input);
+
+	return 0;
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+struct a4l_cmd_desc mio_dio_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_INT,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+int ni_cdio_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	unsigned int i;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+
+	if ((cmd->scan_begin_arg &
+	     PACK_FLAGS(CDO_Sample_Source_Select_Mask, 0, 0, CR_INVERT)) !=
+	    cmd->scan_begin_arg)
+		return -EINVAL;
+
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		cmd->scan_end_arg = cmd->nb_chan;
+		return -EINVAL;
+	}
+
+	if (cmd->stop_arg != 0) {
+		cmd->stop_arg = 0;
+		return -EINVAL;
+	}
+
+	/* Check chan_descs */
+
+	for (i = 0; i < cmd->nb_chan; ++i) {
+		if (cmd->chan_descs[i] != i)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ni_cdio_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned cdo_mode_bits = CDO_FIFO_Mode_Bit | CDO_Halt_On_Error_Bit;
+
+	ni_writel(CDO_Reset_Bit, M_Offset_CDIO_Command);
+	switch (cmd->scan_begin_src) {
+	case TRIG_EXT:
+		cdo_mode_bits |=
+			CR_CHAN(cmd->scan_begin_arg) &
+			CDO_Sample_Source_Select_Mask;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	if (cmd->scan_begin_arg & CR_INVERT)
+		cdo_mode_bits |= CDO_Polarity_Bit;
+	ni_writel(cdo_mode_bits, M_Offset_CDO_Mode);
+
+	if (devpriv->io_bits) {
+		ni_writel(devpriv->dio_state, M_Offset_CDO_FIFO_Data);
+		ni_writel(CDO_SW_Update_Bit, M_Offset_CDIO_Command);
+		ni_writel(devpriv->io_bits, M_Offset_CDO_Mask_Enable);
+	} else {
+		a4l_err(dev,
+			"ni_cdio_cmd: attempted to run digital "
+			"output command with no lines configured as outputs");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void ni_cdio_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	ni_writel(CDO_Disarm_Bit | CDO_Error_Interrupt_Enable_Clear_Bit |
+		  CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit |
+		  CDO_FIFO_Request_Interrupt_Enable_Clear_Bit,
+		  M_Offset_CDIO_Command);
+
+	ni_writel(0, M_Offset_CDO_Mask_Enable);
+	ni_release_cdo_mite_channel(dev);
+}
+
+int ni_cdo_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+	int err;
+	unsigned i;
+	const unsigned timeout = 1000;
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	err = ni_cdo_setup_MITE_dma(subd);
+	if (err < 0)
+		return err;
+
+	/* wait for dma to fill output fifo */
+	for (i = 0; i < timeout; ++i) {
+		if (ni_readl(M_Offset_CDIO_Status) & CDO_FIFO_Full_Bit)
+			break;
+		a4l_udelay(10);
+	}
+
+	if (i == timeout) {
+		a4l_err(dev, "ni_cdo_inttrig: dma failed to fill cdo fifo!");
+		ni_cdio_cancel(subd);
+		return -EIO;
+	}
+
+	ni_writel(CDO_Arm_Bit |
+		  CDO_Error_Interrupt_Enable_Set_Bit |
+		  CDO_Empty_FIFO_Interrupt_Enable_Set_Bit,
+		  M_Offset_CDIO_Command);
+
+	return 0;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static void handle_cdio_interrupt(struct a4l_device *dev)
+{
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	unsigned cdio_status;
+	unsigned long flags;
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_DIO_SUBDEV);
+
+	if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
+		return;
+	}
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->cdo_mite_chan) {
+		unsigned cdo_mite_status =
+			a4l_mite_get_status(devpriv->cdo_mite_chan);
+		if (cdo_mite_status & CHSR_LINKC) {
+			writel(CHOR_CLRLC,
+			       devpriv->mite->mite_io_addr +
+			       MITE_CHOR(devpriv->cdo_mite_chan->channel));
+		}
+		a4l_mite_sync_output_dma(devpriv->cdo_mite_chan, subd);
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	cdio_status = ni_readl(M_Offset_CDIO_Status);
+	if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) {
+		/* XXX just guessing this is needed and does something useful */
+		ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	if (cdio_status & CDO_FIFO_Empty_Bit) {
+		ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit,
+			  M_Offset_CDIO_Command);
+	}
+	a4l_buf_evt(subd, 0);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+}
+
+static int ni_serial_hw_readwrite8(struct a4l_device * dev,
+				   unsigned char data_out, unsigned char *data_in)
+{
+	unsigned int status1;
+	int err = 0, count = 20;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "outputting 0x%x\n", data_out);
+#endif
+
+	devpriv->dio_output &= ~DIO_Serial_Data_Mask;
+	devpriv->dio_output |= DIO_Serial_Data_Out(data_out);
+	devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register);
+
+	status1 = devpriv->stc_readw(dev, Joint_Status_1_Register);
+	if (status1 & DIO_Serial_IO_In_Progress_St) {
+		err = -EBUSY;
+		goto Error;
+	}
+
+	devpriv->dio_control |= DIO_HW_Serial_Start;
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+	devpriv->dio_control &= ~DIO_HW_Serial_Start;
+
+	/* Wait until STC says we're done, but don't loop infinitely. */
+	while ((status1 =
+		devpriv->stc_readw(dev,
+				   Joint_Status_1_Register)) &
+	       DIO_Serial_IO_In_Progress_St) {
+		/* Delay one bit per loop */
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+		if (--count < 0) {
+			a4l_err(dev,
+				"ni_serial_hw_readwrite8: "
+				"SPI serial I/O didn't finish in time!\n");
+			err = -ETIME;
+			goto Error;
+		}
+	}
+
+	/* Delay for last bit. This delay is absolutely necessary, because
+	   DIO_Serial_IO_In_Progress_St goes high one bit too early. */
+	a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+
+	if (data_in != NULL) {
+		*data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register);
+#ifdef CONFIG_DEBUG_DIO
+		a4l_info(dev, "inputted 0x%x\n", *data_in);
+#endif
+	}
+
+Error:
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+
+	return err;
+}
+
+static int ni_serial_sw_readwrite8(struct a4l_device * dev,
+				   unsigned char data_out, unsigned char *data_in)
+{
+	unsigned char mask, input = 0;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "outputting 0x%x\n", data_out);
+#endif
+
+	/* Wait for one bit before transfer */
+	a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+
+	for (mask = 0x80; mask; mask >>= 1) {
+		/* Output current bit; note that we cannot touch devpriv->dio_state
+		   because it is a per-subdevice field, and serial is
+		   a separate subdevice from DIO. */
+		devpriv->dio_output &= ~DIO_SDOUT;
+		if (data_out & mask) {
+			devpriv->dio_output |= DIO_SDOUT;
+		}
+		devpriv->stc_writew(dev, devpriv->dio_output,
+				    DIO_Output_Register);
+
+		/* Assert SDCLK (active low, inverted), wait for half of
+		   the delay, deassert SDCLK, and wait for the other half. */
+		devpriv->dio_control |= DIO_Software_Serial_Control;
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 2000);
+
+		devpriv->dio_control &= ~DIO_Software_Serial_Control;
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 2000);
+
+		/* Input current bit */
+		if (devpriv->stc_readw(dev,
+				       DIO_Parallel_Input_Register) & DIO_SDIN) {
+			input |= mask;
+		}
+	}
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "inputted 0x%x\n", input);
+#endif
+	if (data_in)
+		*data_in = input;
+
+	return 0;
+}
+
+int ni_serial_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	int err = 0;
+	unsigned char byte_out, byte_in = 0;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SERIAL_CLOCK:
+
+#ifdef CONFIG_DEBUG_DIO
+		a4l_info(dev, "SPI serial clock Config %d\n", data[1]);
+#endif
+
+		devpriv->serial_hw_mode = 1;
+		devpriv->dio_control |= DIO_HW_Serial_Enable;
+
+		if (data[1] == SERIAL_DISABLED) {
+			devpriv->serial_hw_mode = 0;
+			devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
+						  DIO_Software_Serial_Control);
+			data[1] = SERIAL_DISABLED;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_600NS) {
+			/* Warning: this clock speed is too fast to reliably
+			   control SCXI. */
+			devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase;
+			devpriv->clock_and_fout &= ~DIO_Serial_Out_Divide_By_2;
+			data[1] = SERIAL_600NS;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_1_2US) {
+			devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase |
+				DIO_Serial_Out_Divide_By_2;
+			data[1] = SERIAL_1_2US;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_10US) {
+			devpriv->dio_control |= DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase |
+				DIO_Serial_Out_Divide_By_2;
+			/* Note: DIO_Serial_Out_Divide_By_2 only affects
+			   600ns/1.2us. If you turn divide_by_2 off with the
+			   slow clock, you will still get 10us, except then
+			   all your delays are wrong. */
+			data[1] = SERIAL_10US;
+			devpriv->serial_interval_ns = data[1];
+		} else {
+			devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
+						  DIO_Software_Serial_Control);
+			devpriv->serial_hw_mode = 0;
+			data[1] = (data[1] / 1000) * 1000;
+			devpriv->serial_interval_ns = data[1];
+		}
+
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+		devpriv->stc_writew(dev, devpriv->clock_and_fout,
+				    Clock_and_FOUT_Register);
+		return 0;
+
+		break;
+
+	case A4L_INSN_CONFIG_BIDIRECTIONAL_DATA:
+
+		if (devpriv->serial_interval_ns == 0) {
+			return -EINVAL;
+		}
+
+		byte_out = data[1] & 0xFF;
+
+		if (devpriv->serial_hw_mode) {
+			err = ni_serial_hw_readwrite8(dev, byte_out, &byte_in);
+		} else if (devpriv->serial_interval_ns > 0) {
+			err = ni_serial_sw_readwrite8(dev, byte_out, &byte_in);
+		} else {
+			a4l_err(dev,
+				"ni_serial_insn_config: serial disabled!\n");
+			return -EINVAL;
+		}
+		if (err < 0)
+			return err;
+		data[1] = byte_in & 0xFF;
+		return 0;
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return -EINVAL;
+}
+
+void mio_common_detach(struct a4l_device * dev)
+{
+	if (dev->priv) {
+		if (devpriv->counter_dev) {
+			a4l_ni_gpct_device_destroy(devpriv->counter_dev);
+		}
+	}
+}
+
+static void init_ao_67xx(struct a4l_device * dev)
+{
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV);
+	int i;
+
+	if (subd == NULL) {
+		a4l_err(dev, "%s: unable to find AO subdevice\n", __FUNCTION__);
+		return;
+	}
+
+	for (i = 0; i < subd->chan_desc->length; i++)
+		ni_ao_win_outw(dev, AO_Channel(i) | 0x0,
+			       AO_Configuration_2_67xx);
+}
+
+static unsigned int ni_gpct_to_stc_register(enum ni_gpct_register reg)
+{
+	unsigned stc_register;
+	switch (reg) {
+	case NITIO_G0_Autoincrement_Reg:
+		stc_register = G_Autoincrement_Register(0);
+		break;
+	case NITIO_G1_Autoincrement_Reg:
+		stc_register = G_Autoincrement_Register(1);
+		break;
+	case NITIO_G0_Command_Reg:
+		stc_register = G_Command_Register(0);
+		break;
+	case NITIO_G1_Command_Reg:
+		stc_register = G_Command_Register(1);
+		break;
+	case NITIO_G0_HW_Save_Reg:
+		stc_register = G_HW_Save_Register(0);
+		break;
+	case NITIO_G1_HW_Save_Reg:
+		stc_register = G_HW_Save_Register(1);
+		break;
+	case NITIO_G0_SW_Save_Reg:
+		stc_register = G_Save_Register(0);
+		break;
+	case NITIO_G1_SW_Save_Reg:
+		stc_register = G_Save_Register(1);
+		break;
+	case NITIO_G0_Mode_Reg:
+		stc_register = G_Mode_Register(0);
+		break;
+	case NITIO_G1_Mode_Reg:
+		stc_register = G_Mode_Register(1);
+		break;
+	case NITIO_G0_LoadA_Reg:
+		stc_register = G_Load_A_Register(0);
+		break;
+	case NITIO_G1_LoadA_Reg:
+		stc_register = G_Load_A_Register(1);
+		break;
+	case NITIO_G0_LoadB_Reg:
+		stc_register = G_Load_B_Register(0);
+		break;
+	case NITIO_G1_LoadB_Reg:
+		stc_register = G_Load_B_Register(1);
+		break;
+	case NITIO_G0_Input_Select_Reg:
+		stc_register = G_Input_Select_Register(0);
+		break;
+	case NITIO_G1_Input_Select_Reg:
+		stc_register = G_Input_Select_Register(1);
+		break;
+	case NITIO_G01_Status_Reg:
+		stc_register = G_Status_Register;
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		stc_register = Joint_Reset_Register;
+		break;
+	case NITIO_G01_Joint_Status1_Reg:
+		stc_register = Joint_Status_1_Register;
+		break;
+	case NITIO_G01_Joint_Status2_Reg:
+		stc_register = Joint_Status_2_Register;
+		break;
+	case NITIO_G0_Interrupt_Acknowledge_Reg:
+		stc_register = Interrupt_A_Ack_Register;
+		break;
+	case NITIO_G1_Interrupt_Acknowledge_Reg:
+		stc_register = Interrupt_B_Ack_Register;
+		break;
+	case NITIO_G0_Status_Reg:
+		stc_register = AI_Status_1_Register;
+		break;
+	case NITIO_G1_Status_Reg:
+		stc_register = AO_Status_1_Register;
+		break;
+	case NITIO_G0_Interrupt_Enable_Reg:
+		stc_register = Interrupt_A_Enable_Register;
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		stc_register = Interrupt_B_Enable_Register;
+		break;
+	default:
+		__a4l_err("%s: unhandled register 0x%x in switch.\n",
+			  __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return stc_register;
+}
+
+static void ni_gpct_write_register(struct ni_gpct *counter,
+				   unsigned int bits, enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	unsigned stc_register;
+	/* bits in the join reset register which are relevant to counters */
+	static const unsigned gpct_joint_reset_mask = G0_Reset | G1_Reset;
+	static const unsigned gpct_interrupt_a_enable_mask =
+		G0_Gate_Interrupt_Enable | G0_TC_Interrupt_Enable;
+	static const unsigned gpct_interrupt_b_enable_mask =
+		G1_Gate_Interrupt_Enable | G1_TC_Interrupt_Enable;
+
+	switch (reg) {
+		/* m-series-only registers */
+	case NITIO_G0_Counting_Mode_Reg:
+		ni_writew(bits, M_Offset_G0_Counting_Mode);
+		break;
+	case NITIO_G1_Counting_Mode_Reg:
+		ni_writew(bits, M_Offset_G1_Counting_Mode);
+		break;
+	case NITIO_G0_Second_Gate_Reg:
+		ni_writew(bits, M_Offset_G0_Second_Gate);
+		break;
+	case NITIO_G1_Second_Gate_Reg:
+		ni_writew(bits, M_Offset_G1_Second_Gate);
+		break;
+	case NITIO_G0_DMA_Config_Reg:
+		ni_writew(bits, M_Offset_G0_DMA_Config);
+		break;
+	case NITIO_G1_DMA_Config_Reg:
+		ni_writew(bits, M_Offset_G1_DMA_Config);
+		break;
+	case NITIO_G0_ABZ_Reg:
+		ni_writew(bits, M_Offset_G0_MSeries_ABZ);
+		break;
+	case NITIO_G1_ABZ_Reg:
+		ni_writew(bits, M_Offset_G1_MSeries_ABZ);
+		break;
+
+		/* 32 bit registers */
+	case NITIO_G0_LoadA_Reg:
+	case NITIO_G1_LoadA_Reg:
+	case NITIO_G0_LoadB_Reg:
+	case NITIO_G1_LoadB_Reg:
+		stc_register = ni_gpct_to_stc_register(reg);
+		devpriv->stc_writel(dev, bits, stc_register);
+		break;
+
+		/* 16 bit registers */
+	case NITIO_G0_Interrupt_Enable_Reg:
+		BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
+		ni_set_bitfield(dev, Interrupt_A_Enable_Register,
+				gpct_interrupt_a_enable_mask, bits);
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
+		ni_set_bitfield(dev, Interrupt_B_Enable_Register,
+				gpct_interrupt_b_enable_mask, bits);
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		BUG_ON(bits & ~gpct_joint_reset_mask);
+		fallthrough;
+	default:
+		stc_register = ni_gpct_to_stc_register(reg);
+		devpriv->stc_writew(dev, bits, stc_register);
+	}
+}
+
+static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
+					  enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	unsigned int stc_register;
+	switch (reg) {
+		/* m-series only registers */
+	case NITIO_G0_DMA_Status_Reg:
+		return ni_readw(M_Offset_G0_DMA_Status);
+		break;
+	case NITIO_G1_DMA_Status_Reg:
+		return ni_readw(M_Offset_G1_DMA_Status);
+		break;
+
+		/* 32 bit registers */
+	case NITIO_G0_HW_Save_Reg:
+	case NITIO_G1_HW_Save_Reg:
+	case NITIO_G0_SW_Save_Reg:
+	case NITIO_G1_SW_Save_Reg:
+		stc_register = ni_gpct_to_stc_register(reg);
+		return devpriv->stc_readl(dev, stc_register);
+		break;
+
+		/* 16 bit registers */
+	default:
+		stc_register = ni_gpct_to_stc_register(reg);
+		return devpriv->stc_readw(dev, stc_register);
+		break;
+	}
+	return 0;
+}
+
+int ni_freq_out_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = FOUT_Divider(devpriv->clock_and_fout);
+
+	return 0;
+}
+
+int ni_freq_out_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	devpriv->clock_and_fout &= ~FOUT_Enable;
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+	devpriv->clock_and_fout &= ~FOUT_Divider_mask;
+	devpriv->clock_and_fout |= FOUT_Divider(data[0]);
+	devpriv->clock_and_fout |= FOUT_Enable;
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	return 0;
+}
+
+static int ni_set_freq_out_clock(struct a4l_device * dev, lsampl_t clock_source)
+{
+	switch (clock_source) {
+	case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC:
+		devpriv->clock_and_fout &= ~FOUT_Timebase_Select;
+		break;
+	case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC:
+		devpriv->clock_and_fout |= FOUT_Timebase_Select;
+		break;
+	default:
+		return -EINVAL;
+	}
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	return 0;
+}
+
+static void ni_get_freq_out_clock(struct a4l_device * dev,
+				  unsigned int * clock_source,
+				  unsigned int * clock_period_ns)
+{
+	if (devpriv->clock_and_fout & FOUT_Timebase_Select) {
+		*clock_source = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC;
+		*clock_period_ns = TIMEBASE_2_NS;
+	} else {
+		*clock_source = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC;
+		*clock_period_ns = TIMEBASE_1_NS * 2;
+	}
+}
+
+int ni_freq_out_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SET_CLOCK_SRC:
+		return ni_set_freq_out_clock(dev, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_CLOCK_SRC:
+		ni_get_freq_out_clock(dev, &data[1], &data[2]);
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int ni_8255_callback(int dir, int port, int data, unsigned long arg)
+{
+	struct a4l_device *dev = (struct a4l_device *) arg;
+
+	if (dir) {
+		ni_writeb(data, Port_A + 2 * port);
+		return 0;
+	} else {
+		return ni_readb(Port_A + 2 * port);
+	}
+}
+
+/*
+  reads bytes out of eeprom
+*/
+
+static int ni_read_eeprom(struct a4l_device *dev, int addr)
+{
+	int bit;
+	int bitstring;
+
+	bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff);
+	ni_writeb(0x04, Serial_Command);
+	for (bit = 0x8000; bit; bit >>= 1) {
+		ni_writeb(0x04 | ((bit & bitstring) ? 0x02 : 0),
+			  Serial_Command);
+		ni_writeb(0x05 | ((bit & bitstring) ? 0x02 : 0),
+			  Serial_Command);
+	}
+	bitstring = 0;
+	for (bit = 0x80; bit; bit >>= 1) {
+		ni_writeb(0x04, Serial_Command);
+		ni_writeb(0x05, Serial_Command);
+		bitstring |= ((ni_readb(XXX_Status) & PROMOUT) ? bit : 0);
+	}
+	ni_writeb(0x00, Serial_Command);
+
+	return bitstring;
+}
+
+/*
+  presents the EEPROM as a subdevice
+*/
+
+static int ni_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chan_desc));
+
+	return 0;
+}
+
+
+static int ni_m_series_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+static int ni_get_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	data[1] = devpriv->pwm_up_count * devpriv->clock_ns;
+	data[2] = devpriv->pwm_down_count * devpriv->clock_ns;
+
+	return 0;
+}
+
+static int ni_m_series_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int up_count, down_count;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_PWM_OUTPUT:
+		switch (data[1]) {
+		case TRIG_ROUND_NEAREST:
+			up_count =
+				(data[2] +
+				 devpriv->clock_ns / 2) / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			up_count = data[2] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			up_count =(data[2] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		switch (data[3]) {
+		case TRIG_ROUND_NEAREST:
+			down_count = (data[4] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			down_count = data[4] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			down_count =
+				(data[4] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		if (up_count * devpriv->clock_ns != data[2] ||
+		    down_count * devpriv->clock_ns != data[4]) {
+			data[2] = up_count * devpriv->clock_ns;
+			data[4] = down_count * devpriv->clock_ns;
+			return -EAGAIN;
+		}
+		ni_writel(MSeries_Cal_PWM_High_Time_Bits(up_count) |
+			  MSeries_Cal_PWM_Low_Time_Bits(down_count),
+			  M_Offset_Cal_PWM);
+		devpriv->pwm_up_count = up_count;
+		devpriv->pwm_down_count = down_count;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_PWM_OUTPUT:
+		return ni_get_pwm_config(subd, insn);
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int ni_6143_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	unsigned up_count, down_count;
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_PWM_OUTPUT:
+		switch (data[1]) {
+		case TRIG_ROUND_NEAREST:
+			up_count =
+				(data[2] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			up_count = data[2] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			up_count = (data[2] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		switch (data[3]) {
+		case TRIG_ROUND_NEAREST:
+			down_count = (data[4] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			down_count = data[4] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			down_count = (data[4] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		if (up_count * devpriv->clock_ns != data[2] ||
+		    down_count * devpriv->clock_ns != data[4]) {
+			data[2] = up_count * devpriv->clock_ns;
+			data[4] = down_count * devpriv->clock_ns;
+			return -EAGAIN;
+		}
+		ni_writel(up_count, Calibration_HighTime_6143);
+		devpriv->pwm_up_count = up_count;
+		ni_writel(down_count, Calibration_LowTime_6143);
+		devpriv->pwm_down_count = down_count;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_PWM_OUTPUT:
+		return ni_get_pwm_config(subd, insn);
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int pack_mb88341(int addr, int val, int *bitstring)
+{
+	/*
+	  Fujitsu MB 88341
+	  Note that address bits are reversed.  Thanks to
+	  Ingo Keen for noticing this.
+
+	  Note also that the 88341 expects address values from
+	  1-12, whereas we use channel numbers 0-11.  The NI
+	  docs use 1-12, also, so be careful here.
+	*/
+	addr++;
+	*bitstring = ((addr & 0x1) << 11) |
+		((addr & 0x2) << 9) |
+		((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff);
+	return 12;
+}
+
+static int pack_dac8800(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr & 0x7) << 8) | (val & 0xff);
+	return 11;
+}
+
+static int pack_dac8043(int addr, int val, int *bitstring)
+{
+	*bitstring = val & 0xfff;
+	return 12;
+}
+
+static int pack_ad8522(int addr, int val, int *bitstring)
+{
+	*bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000);
+	return 16;
+}
+
+static int pack_ad8804(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr & 0xf) << 8) | (val & 0xff);
+	return 12;
+}
+
+static int pack_ad8842(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr + 1) << 8) | (val & 0xff);
+	return 12;
+}
+
+struct caldac_struct {
+	int n_chans;
+	int n_bits;
+	int (*packbits) (int, int, int *);
+};
+
+static struct caldac_struct caldacs[] = {
+	[mb88341] = {12, 8, pack_mb88341},
+	[dac8800] = {8, 8, pack_dac8800},
+	[dac8043] = {1, 12, pack_dac8043},
+	[ad8522] = {2, 12, pack_ad8522},
+	[ad8804] = {12, 8, pack_ad8804},
+	[ad8842] = {8, 8, pack_ad8842},
+	[ad8804_debug] = {16, 8, pack_ad8804},
+};
+
+static void ni_write_caldac(struct a4l_device * dev, int addr, int val)
+{
+	unsigned int loadbit = 0, bits = 0, bit, bitstring = 0;
+	int i;
+	int type;
+
+	if (devpriv->caldacs[addr] == val)
+		return;
+	devpriv->caldacs[addr] = val;
+
+	for (i = 0; i < 3; i++) {
+		type = boardtype.caldac[i];
+		if (type == caldac_none)
+			break;
+		if (addr < caldacs[type].n_chans) {
+			bits = caldacs[type].packbits(addr, val, &bitstring);
+			loadbit = SerDacLd(i);
+			break;
+		}
+		addr -= caldacs[type].n_chans;
+	}
+
+	for (bit = 1 << (bits - 1); bit; bit >>= 1) {
+		ni_writeb(((bit & bitstring) ? 0x02 : 0), Serial_Command);
+		a4l_udelay(1);
+		ni_writeb(1 | ((bit & bitstring) ? 0x02 : 0), Serial_Command);
+		a4l_udelay(1);
+	}
+	ni_writeb(loadbit, Serial_Command);
+	a4l_udelay(1);
+	ni_writeb(0, Serial_Command);
+}
+
+static void caldac_setup(struct a4l_device *dev, struct a4l_subdevice *subd)
+{
+	int i, j;
+	int n_dacs;
+	int n_chans = 0;
+	int n_bits;
+	int diffbits = 0;
+	int type;
+	int chan;
+
+	type = boardtype.caldac[0];
+	if (type == caldac_none)
+		return;
+	n_bits = caldacs[type].n_bits;
+	for (i = 0; i < 3; i++) {
+		type = boardtype.caldac[i];
+		if (type == caldac_none)
+			break;
+		if (caldacs[type].n_bits != n_bits)
+			diffbits = 1;
+		n_chans += caldacs[type].n_chans;
+	}
+	n_dacs = i;
+
+	if (diffbits) {
+
+		if (n_chans > MAX_N_CALDACS) {
+			a4l_err(dev, "BUG! MAX_N_CALDACS too small\n");
+		}
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  n_chans * sizeof(struct a4l_channel), GFP_KERNEL);
+
+		memset(subd->chan_desc,
+		       0,
+		       sizeof(struct a4l_channels_desc) + n_chans * sizeof(struct a4l_channel));
+
+		subd->chan_desc->length = n_chans;
+		subd->chan_desc->mode = A4L_CHAN_PERCHAN_CHANDESC;
+
+		chan = 0;
+		for (i = 0; i < n_dacs; i++) {
+			type = boardtype.caldac[i];
+			for (j = 0; j < caldacs[type].n_chans; j++) {
+
+				subd->chan_desc->chans[chan].nb_bits =
+					caldacs[type].n_bits;
+
+				chan++;
+			}
+		}
+
+		for (chan = 0; chan < n_chans; chan++) {
+			unsigned long tmp =
+				(1 << subd->chan_desc->chans[chan].nb_bits) / 2;
+			ni_write_caldac(dev, chan, tmp);
+		}
+	} else {
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		memset(subd->chan_desc,
+		       0, sizeof(struct a4l_channels_desc) + sizeof(struct a4l_channel));
+
+		subd->chan_desc->length = n_chans;
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+
+		type = boardtype.caldac[0];
+
+		subd->chan_desc->chans[0].nb_bits = caldacs[type].n_bits;
+
+		for (chan = 0; chan < n_chans; chan++)
+			ni_write_caldac(dev,
+					chan,
+					(1 << subd->chan_desc->chans[0].nb_bits) / 2);
+	}
+}
+
+static int ni_calib_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	ni_write_caldac(dev, CR_CHAN(insn->chan_desc), data[0]);
+	return 0;
+}
+
+static int ni_calib_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	data[0] = devpriv->caldacs[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+static int ni_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_insn_config(counter, insn);
+}
+
+static int ni_gpct_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_rinsn(counter, insn);
+}
+
+static int ni_gpct_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_winsn(counter, insn);
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_gpct_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	int retval;
+	struct a4l_device *dev = subd->dev;
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	struct mite_dma_descriptor_ring *ring;
+
+	retval = ni_request_gpct_mite_channel(dev,
+					      counter->counter_index,
+					      A4L_INPUT);
+	if (retval) {
+		a4l_err(dev,
+			"ni_gpct_cmd: "
+			"no dma channel available for use by counter\n");
+		return retval;
+	}
+
+	ring = devpriv->gpct_mite_ring[counter->counter_index];
+	retval = a4l_mite_buf_change(ring, subd);
+	if (retval) {
+		a4l_err(dev,
+			"ni_gpct_cmd: "
+			"dma ring configuration failed\n");
+		return retval;
+
+	}
+
+	a4l_ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
+	ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
+	retval = a4l_ni_tio_cmd(counter, cmd);
+
+	return retval;
+}
+
+static int ni_gpct_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_cmdtest(counter, cmd);
+}
+
+static void ni_gpct_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+
+	a4l_ni_tio_cancel(counter);
+	ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
+	ni_release_gpct_mite_channel(dev, counter->counter_index);
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+
+/*
+ *
+ *  Programmable Function Inputs
+ *
+ */
+
+static int ni_m_series_set_pfi_routing(struct a4l_device *dev,
+				       unsigned int chan, unsigned int source)
+{
+	unsigned int pfi_reg_index;
+	unsigned int array_offset;
+
+	if ((source & 0x1f) != source)
+		return -EINVAL;
+	pfi_reg_index = 1 + chan / 3;
+	array_offset = pfi_reg_index - 1;
+	devpriv->pfi_output_select_reg[array_offset] &=
+		~MSeries_PFI_Output_Select_Mask(chan);
+	devpriv->pfi_output_select_reg[array_offset] |=
+		MSeries_PFI_Output_Select_Bits(chan, source);
+	ni_writew(devpriv->pfi_output_select_reg[array_offset],
+		  M_Offset_PFI_Output_Select(pfi_reg_index));
+	return 2;
+}
+
+static unsigned int ni_old_get_pfi_routing(struct a4l_device *dev,
+					   unsigned int chan)
+{
+	/* pre-m-series boards have fixed signals on pfi pins */
+
+	switch (chan) {
+	case 0:
+		return NI_PFI_OUTPUT_AI_START1;
+		break;
+	case 1:
+		return NI_PFI_OUTPUT_AI_START2;
+		break;
+	case 2:
+		return NI_PFI_OUTPUT_AI_CONVERT;
+		break;
+	case 3:
+		return NI_PFI_OUTPUT_G_SRC1;
+		break;
+	case 4:
+		return NI_PFI_OUTPUT_G_GATE1;
+		break;
+	case 5:
+		return NI_PFI_OUTPUT_AO_UPDATE_N;
+		break;
+	case 6:
+		return NI_PFI_OUTPUT_AO_START1;
+		break;
+	case 7:
+		return NI_PFI_OUTPUT_AI_START_PULSE;
+		break;
+	case 8:
+		return NI_PFI_OUTPUT_G_SRC0;
+		break;
+	case 9:
+		return NI_PFI_OUTPUT_G_GATE0;
+		break;
+	default:
+		__a4l_err("%s: bug, unhandled case in switch.\n",
+			  __FUNCTION__);
+		break;
+	}
+	return 0;
+}
+
+static int ni_old_set_pfi_routing(struct a4l_device *dev,
+				  unsigned int chan, unsigned int source)
+{
+	/* pre-m-series boards have fixed signals on pfi pins */
+	if (source != ni_old_get_pfi_routing(dev, chan))
+		return -EINVAL;
+
+	return 2;
+}
+
+static int ni_set_pfi_routing(struct a4l_device *dev,
+			      unsigned int chan, unsigned int source)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_set_pfi_routing(dev, chan, source);
+	else
+		return ni_old_set_pfi_routing(dev, chan, source);
+}
+
+static unsigned int ni_m_series_get_pfi_routing(struct a4l_device *dev,
+						unsigned int chan)
+{
+	const unsigned int array_offset = chan / 3;
+	return MSeries_PFI_Output_Select_Source(chan,
+						devpriv->pfi_output_select_reg[array_offset]);
+}
+
+static unsigned int ni_get_pfi_routing(struct a4l_device *dev, unsigned int chan)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_get_pfi_routing(dev, chan);
+	else
+		return ni_old_get_pfi_routing(dev, chan);
+}
+
+static int ni_config_filter(struct a4l_device *dev,
+			    unsigned int pfi_channel, int filter)
+{
+	unsigned int bits;
+	if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
+		return -ENOTSUPP;
+	}
+	bits = ni_readl(M_Offset_PFI_Filter);
+	bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel);
+	bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter);
+	ni_writel(bits, M_Offset_PFI_Filter);
+	return 0;
+}
+
+static int ni_pfi_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	if (data[0]) {
+		devpriv->pfi_state &= ~data[0];
+		devpriv->pfi_state |= (data[0] & data[1]);
+		ni_writew(devpriv->pfi_state, M_Offset_PFI_DO);
+	}
+
+	data[1] = ni_readw(M_Offset_PFI_DI);
+
+	return 0;
+}
+
+static int ni_pfi_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan, *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	chan = CR_CHAN(insn->chan_desc);
+
+	switch (data[0]) {
+	case A4L_OUTPUT:
+		ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 1);
+		break;
+	case A4L_INPUT:
+		ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 0);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bidirection_pin_reg & (1 << chan)) ?
+			A4L_OUTPUT :	A4L_INPUT;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_ROUTING:
+		return ni_set_pfi_routing(dev, chan, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_ROUTING:
+		data[1] = ni_get_pfi_routing(dev, chan);
+		break;
+	case A4L_INSN_CONFIG_FILTER:
+		return ni_config_filter(dev, chan, data[1]);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ *
+ *  RTSI Bus Functions
+ *
+ */
+
+/* Find best multiplier/divider to try and get the PLL running at 80 MHz
+ * given an arbitrary frequency input clock */
+static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
+					 unsigned int *freq_divider,
+					 unsigned int *freq_multiplier,
+					 unsigned int *actual_period_ns)
+{
+	unsigned div;
+	unsigned best_div = 1;
+	static const unsigned max_div = 0x10;
+	unsigned mult;
+	unsigned best_mult = 1;
+	static const unsigned max_mult = 0x100;
+	static const unsigned pico_per_nano = 1000;
+
+	const unsigned reference_picosec = reference_period_ns * pico_per_nano;
+	/* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
+	 * 20 MHz for most timing clocks */
+	static const unsigned target_picosec = 12500;
+	static const unsigned fudge_factor_80_to_20Mhz = 4;
+	int best_period_picosec = 0;
+	for (div = 1; div <= max_div; ++div) {
+		for (mult = 1; mult <= max_mult; ++mult) {
+			unsigned new_period_ps =
+				(reference_picosec * div) / mult;
+			if (abs(new_period_ps - target_picosec) <
+			    abs(best_period_picosec - target_picosec)) {
+				best_period_picosec = new_period_ps;
+				best_div = div;
+				best_mult = mult;
+			}
+		}
+	}
+	if (best_period_picosec == 0) {
+		__a4l_err("%s: bug, failed to find pll parameters\n",
+			  __FUNCTION__);
+		return -EIO;
+	}
+	*freq_divider = best_div;
+	*freq_multiplier = best_mult;
+	*actual_period_ns =
+		(best_period_picosec * fudge_factor_80_to_20Mhz +
+		 (pico_per_nano / 2)) / pico_per_nano;
+	return 0;
+}
+
+static int ni_mseries_set_pll_master_clock(struct a4l_device * dev,
+					   unsigned int source,
+					   unsigned int period_ns)
+{
+	static const unsigned min_period_ns = 50;
+	static const unsigned max_period_ns = 1000;
+	static const unsigned timeout = 1000;
+	unsigned pll_control_bits;
+	unsigned freq_divider;
+	unsigned freq_multiplier;
+	unsigned i;
+	int retval;
+	if (source == NI_MIO_PLL_PXI10_CLOCK)
+		period_ns = 100;
+	/* These limits are somewhat arbitrary, but NI advertises 1 to
+	   20MHz range so we'll use that */
+	if (period_ns < min_period_ns || period_ns > max_period_ns) {
+		a4l_err(dev,
+			"%s: you must specify an input clock frequency "
+			"between %i and %i nanosec "
+			"for the phased-lock loop.\n",
+			__FUNCTION__, min_period_ns, max_period_ns);
+		return -EINVAL;
+	}
+	devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
+			    RTSI_Trig_Direction_Register);
+	pll_control_bits =
+		MSeries_PLL_Enable_Bit | MSeries_PLL_VCO_Mode_75_150MHz_Bits;
+	devpriv->clock_and_fout2 |=
+		MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit;
+	devpriv->clock_and_fout2 &= ~MSeries_PLL_In_Source_Select_Mask;
+	switch (source) {
+	case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK:
+		devpriv->clock_and_fout2 |=
+			MSeries_PLL_In_Source_Select_Star_Trigger_Bits;
+		retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
+						       &freq_multiplier, &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+		break;
+	case NI_MIO_PLL_PXI10_CLOCK:
+		/* pxi clock is 10MHz */
+		devpriv->clock_and_fout2 |=
+			MSeries_PLL_In_Source_Select_PXI_Clock10;
+		retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
+						       &freq_multiplier, &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+	{
+		unsigned rtsi_channel;
+		static const unsigned max_rtsi_channel = 7;
+		for (rtsi_channel = 0; rtsi_channel <= max_rtsi_channel;
+		     ++rtsi_channel) {
+			if (source ==
+			    NI_MIO_PLL_RTSI_CLOCK(rtsi_channel)) {
+				devpriv->clock_and_fout2 |=
+					MSeries_PLL_In_Source_Select_RTSI_Bits
+					(rtsi_channel);
+				break;
+			}
+		}
+		if (rtsi_channel > max_rtsi_channel)
+			return -EINVAL;
+		retval = ni_mseries_get_pll_parameters(period_ns,
+						       &freq_divider, &freq_multiplier,
+						       &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+	}
+	break;
+	}
+	ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2);
+	pll_control_bits |=
+		MSeries_PLL_Divisor_Bits(freq_divider) |
+		MSeries_PLL_Multiplier_Bits(freq_multiplier);
+	ni_writew(pll_control_bits, M_Offset_PLL_Control);
+	devpriv->clock_source = source;
+	/* It seems to typically take a few hundred microseconds for PLL to lock */
+	for (i = 0; i < timeout; ++i) {
+		if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) {
+			break;
+		}
+		udelay(1);
+	}
+	if (i == timeout) {
+		a4l_err(dev,
+			"%s: timed out waiting for PLL to lock "
+			"to reference clock source %i with period %i ns.\n",
+			__FUNCTION__, source, period_ns);
+		return -ETIMEDOUT;
+	}
+	return 3;
+}
+
+static int ni_set_master_clock(struct a4l_device *dev,
+			       unsigned int source, unsigned int period_ns)
+{
+	if (source == NI_MIO_INTERNAL_CLOCK) {
+		devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
+		devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
+				    RTSI_Trig_Direction_Register);
+		devpriv->clock_ns = TIMEBASE_1_NS;
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			devpriv->clock_and_fout2 &=
+				~(MSeries_Timebase1_Select_Bit |
+				  MSeries_Timebase3_Select_Bit);
+			ni_writew(devpriv->clock_and_fout2,
+				  M_Offset_Clock_and_Fout2);
+			ni_writew(0, M_Offset_PLL_Control);
+		}
+		devpriv->clock_source = source;
+	} else {
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			return ni_mseries_set_pll_master_clock(dev, source,
+							       period_ns);
+		} else {
+			if (source == NI_MIO_RTSI_CLOCK) {
+				devpriv->rtsi_trig_direction_reg |=
+					Use_RTSI_Clock_Bit;
+				devpriv->stc_writew(dev,
+						    devpriv->rtsi_trig_direction_reg,
+						    RTSI_Trig_Direction_Register);
+				if (devpriv->clock_ns == 0) {
+					a4l_err(dev,
+						"%s: we don't handle an "
+						"unspecified clock period "
+						"correctly yet, returning error.\n",
+						__FUNCTION__);
+					return -EINVAL;
+				} else {
+					devpriv->clock_ns = period_ns;
+				}
+				devpriv->clock_source = source;
+			} else
+				return -EINVAL;
+		}
+	}
+	return 3;
+}
+
+static void ni_rtsi_init(struct a4l_device * dev)
+{
+	/* Initialise the RTSI bus signal switch to a default state */
+
+	/* Set clock mode to internal */
+	devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit;
+	if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) {
+		a4l_err(dev, "ni_set_master_clock failed, bug?");
+	}
+
+	/* Default internal lines routing to RTSI bus lines */
+	devpriv->rtsi_trig_a_output_reg =
+		RTSI_Trig_Output_Bits(0, NI_RTSI_OUTPUT_ADR_START1) |
+		RTSI_Trig_Output_Bits(1, NI_RTSI_OUTPUT_ADR_START2) |
+		RTSI_Trig_Output_Bits(2, NI_RTSI_OUTPUT_SCLKG) |
+		RTSI_Trig_Output_Bits(3, NI_RTSI_OUTPUT_DACUPDN);
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
+			    RTSI_Trig_A_Output_Register);
+	devpriv->rtsi_trig_b_output_reg =
+		RTSI_Trig_Output_Bits(4, NI_RTSI_OUTPUT_DA_START1) |
+		RTSI_Trig_Output_Bits(5, NI_RTSI_OUTPUT_G_SRC0) |
+		RTSI_Trig_Output_Bits(6, NI_RTSI_OUTPUT_G_GATE0);
+
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		devpriv->rtsi_trig_b_output_reg |=
+			RTSI_Trig_Output_Bits(7, NI_RTSI_OUTPUT_RTSI_OSC);
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
+			    RTSI_Trig_B_Output_Register);
+}
+
+int a4l_ni_E_init(struct a4l_device *dev)
+{
+	int ret;
+	unsigned int j, counter_variant;
+	struct a4l_subdevice *subd;
+
+	if (boardtype.n_aochan > MAX_N_AO_CHAN) {
+		a4l_err(dev, "bug! boardtype.n_aochan > MAX_N_AO_CHAN\n");
+		return -EINVAL;
+	}
+
+	/* analog input subdevice */
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: starting attach procedure...\n");
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering AI subdevice...\n");
+
+	if (boardtype.n_adchan) {
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AI: %d channels\n", boardtype.n_adchan);
+
+		subd->flags = A4L_SUBD_AI | A4L_SUBD_CMD | A4L_SUBD_MMAP;
+		subd->rng_desc = ni_range_lkup[boardtype.gainlkup];
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_adchan;
+		subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_DIFF;
+		if (boardtype.reg_type != ni_reg_611x)
+			subd->chan_desc->chans[0].flags |= A4L_CHAN_AREF_GROUND |
+				A4L_CHAN_AREF_COMMON | A4L_CHAN_AREF_OTHER;
+		subd->chan_desc->chans[0].nb_bits = boardtype.adbits;
+
+		subd->insn_read = ni_ai_insn_read;
+		subd->insn_config = ni_ai_insn_config;
+		subd->do_cmdtest = ni_ai_cmdtest;
+		subd->do_cmd = ni_ai_cmd;
+		subd->cancel = ni_ai_reset;
+		subd->trigger = ni_ai_inttrig;
+
+		subd->munge = (boardtype.adbits > 16) ?
+			ni_ai_munge32 : ni_ai_munge16;
+
+		subd->cmd_mask = &mio_ai_cmd_mask;
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AI subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_AI_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AI subdevice registered\n");
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering AO subdevice...\n");
+
+	/* analog output subdevice */
+	if (boardtype.n_aochan) {
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AO: %d channels\n", boardtype.n_aochan);
+
+		subd->flags = A4L_SUBD_AO;
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_aochan;
+		subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND;
+		subd->chan_desc->chans[0].nb_bits = boardtype.aobits;
+
+		subd->rng_desc = boardtype.ao_range_table;
+
+		subd->insn_read = ni_ao_insn_read;
+		if (boardtype.reg_type & ni_reg_6xxx_mask)
+			subd->insn_write = &ni_ao_insn_write_671x;
+		else
+			subd->insn_write = &ni_ao_insn_write;
+
+
+		if (boardtype.ao_fifo_depth) {
+			subd->flags |= A4L_SUBD_CMD | A4L_SUBD_MMAP;
+			subd->do_cmd = &ni_ao_cmd;
+			subd->cmd_mask = &mio_ao_cmd_mask;
+			subd->do_cmdtest = &ni_ao_cmdtest;
+			subd->trigger = ni_ao_inttrig;
+			if ((boardtype.reg_type & ni_reg_m_series_mask) == 0)
+				subd->munge = &ni_ao_munge;
+		}
+
+		subd->cancel = &ni_ao_reset;
+
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AO subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_AO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AO subdevice registered\n");
+
+	if ((boardtype.reg_type & ni_reg_67xx_mask))
+		init_ao_67xx(dev);
+
+	/* digital i/o subdevice */
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering DIO subdevice...\n");
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: DIO: %d channels\n",
+		boardtype.num_p0_dio_channels);
+
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = boardtype.num_p0_dio_channels;
+	subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND;
+	subd->chan_desc->chans[0].nb_bits = 1;
+	devpriv->io_bits = 0; /* all bits input */
+
+	subd->rng_desc = &range_digital;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+
+		if (subd->chan_desc->length == 8)
+			subd->insn_bits = ni_m_series_dio_insn_bits_8;
+		else
+			subd->insn_bits = ni_m_series_dio_insn_bits_32;
+
+		subd->insn_config = ni_m_series_dio_insn_config;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: DIO: command feature available\n");
+
+		subd->flags |= A4L_SUBD_CMD;
+		subd->do_cmd = ni_cdio_cmd;
+		subd->do_cmdtest = ni_cdio_cmdtest;
+		subd->cmd_mask = &mio_dio_cmd_mask;
+		subd->cancel = ni_cdio_cancel;
+		subd->trigger = ni_cdo_inttrig;
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		ni_writel(CDO_Reset_Bit | CDI_Reset_Bit, M_Offset_CDIO_Command);
+		ni_writel(devpriv->io_bits, M_Offset_DIO_Direction);
+	} else {
+
+		subd->insn_bits = ni_dio_insn_bits;
+		subd->insn_config = ni_dio_insn_config;
+		devpriv->dio_control = DIO_Pins_Dir(devpriv->io_bits);
+		ni_writew(devpriv->dio_control, DIO_Control_Register);
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: DIO subdevice registered\n");
+
+	/* 8255 device */
+	subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering 8255 subdevice...\n");
+
+	if (boardtype.has_8255) {
+		devpriv->subd_8255.cb_arg = (unsigned long)dev;
+		devpriv->subd_8255.cb_func = ni_8255_callback;
+		a4l_subdev_8255_init(subd);
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: 8255 subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_8255_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: 8255 subdevice registered\n");
+
+	/* formerly general purpose counter/timer device, but no longer used */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	subd->flags = A4L_SUBD_UNUSED;
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_UNUSED_SUBDEV)
+		return ret;
+
+	/* calibration subdevice -- ai and ao */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering calib subdevice...\n");
+
+	subd->flags = A4L_SUBD_CALIB;
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		/* internal PWM analog output
+		   used for AI nonlinearity calibration */
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: M series calibration");
+		subd->insn_config = ni_m_series_pwm_config;
+		ni_writel(0x0, M_Offset_Cal_PWM);
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		/* internal PWM analog output
+		   used for AI nonlinearity calibration */
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: 6143 calibration");
+		subd->insn_config = ni_6143_pwm_config;
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: common calibration");
+		subd->insn_read = ni_calib_insn_read;
+		subd->insn_write = ni_calib_insn_write;
+		caldac_setup(dev, subd);
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_CALIBRATION_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: calib subdevice registered\n");
+
+	/* EEPROM */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering EEPROM subdevice...\n");
+
+	subd->flags = A4L_SUBD_MEMORY;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 8;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		subd->chan_desc->length = M_SERIES_EEPROM_SIZE;
+		subd->insn_read = ni_m_series_eeprom_insn_read;
+	} else {
+		subd->chan_desc->length = 512;
+		subd->insn_read = ni_eeprom_insn_read;
+	}
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: EEPROM: size = %lu\n", subd->chan_desc->length);
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_EEPROM_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: EEPROM subdevice registered\n");
+
+	/* PFI */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering PFI(DIO) subdevice...\n");
+
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 1;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		unsigned int i;
+		subd->chan_desc->length = 16;
+		ni_writew(devpriv->dio_state, M_Offset_PFI_DO);
+		for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
+			ni_writew(devpriv->pfi_output_select_reg[i],
+				  M_Offset_PFI_Output_Select(i + 1));
+		}
+	} else
+		subd->chan_desc->length = 10;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: PFI: %lu bits...\n", subd->chan_desc->length);
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		subd->insn_bits = ni_pfi_insn_bits;
+	}
+
+	subd->insn_config = ni_pfi_insn_config;
+	ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0);
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_PFI_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: PFI subdevice registered\n");
+
+	/* cs5529 calibration adc */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+#if 0 /* TODO: add subdevices callbacks */
+	subd->flags = A4L_SUBD_AI;
+
+	if (boardtype.reg_type & ni_reg_67xx_mask) {
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_aochan;
+		subd->chan_desc->chans[0].flags = 0;
+		subd->chan_desc->chans[0].nb_bits = 16;
+
+		/* one channel for each analog output channel */
+		subd->rng_desc = &a4l_range_unknown;	/* XXX */
+		s->insn_read = cs5529_ai_insn_read;
+		init_cs5529(dev);
+	} else
+#endif /* TODO: add subdevices callbacks */
+		subd->flags = A4L_SUBD_UNUSED;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_CS5529_CALIBRATION_SUBDEV)
+		return ret;
+
+	/* Serial */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering serial subdevice...\n");
+
+	subd->flags = A4L_SUBD_SERIAL;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 1;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 8;
+
+	subd->insn_config = ni_serial_insn_config;
+
+	devpriv->serial_interval_ns = 0;
+	devpriv->serial_hw_mode = 0;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_SERIAL_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: serial subdevice registered\n");
+
+	/* RTSI */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+#if 1 /* TODO: add RTSI subdevice */
+	subd->flags = A4L_SUBD_UNUSED;
+	ni_rtsi_init(dev);
+
+#else /* TODO: add RTSI subdevice */
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 8;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 1;
+
+	subd->insn_bits = ni_rtsi_insn_bits;
+	subd->insn_config = ni_rtsi_insn_config;
+	ni_rtsi_init(dev);
+
+#endif /* TODO: add RTSI subdevice */
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_RTSI_SUBDEV)
+		return ret;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		counter_variant = ni_gpct_variant_m_series;
+	} else {
+		counter_variant = ni_gpct_variant_e_series;
+	}
+	devpriv->counter_dev =
+		a4l_ni_gpct_device_construct(dev,
+					     &ni_gpct_write_register,
+					     &ni_gpct_read_register,
+					     counter_variant, NUM_GPCT);
+
+	/* General purpose counters */
+	for (j = 0; j < NUM_GPCT; ++j) {
+		struct ni_gpct *counter;
+
+		subd = a4l_alloc_subd(sizeof(struct ni_gpct), NULL);
+		if(subd == NULL)
+			return -ENOMEM;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: registering GPCT[%d] subdevice...\n", j);
+
+		subd->flags = A4L_SUBD_COUNTER;
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = 3;
+		subd->chan_desc->chans[0].flags = 0;
+
+		if (boardtype.reg_type & ni_reg_m_series_mask)
+			subd->chan_desc->chans[0].nb_bits = 32;
+		else
+			subd->chan_desc->chans[0].nb_bits = 24;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GPCT[%d]: %lu bits\n",
+			j, subd->chan_desc->chans[0].nb_bits);
+
+		subd->insn_read = ni_gpct_insn_read;
+		subd->insn_write = ni_gpct_insn_write;
+		subd->insn_config = ni_gpct_insn_config;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GPCT[%d]: command feature available\n", j);
+		subd->flags |= A4L_SUBD_CMD;
+		subd->cmd_mask = &a4l_ni_tio_cmd_mask;
+		subd->do_cmd = ni_gpct_cmd;
+		subd->do_cmdtest = ni_gpct_cmdtest;
+		subd->cancel = ni_gpct_cancel;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		counter = (struct ni_gpct *)subd->priv;
+		rtdm_lock_init(&counter->lock);
+		counter->chip_index = 0;
+		counter->counter_index = j;
+		counter->counter_dev = devpriv->counter_dev;
+		devpriv->counter_dev->counters[j] = counter;
+
+		a4l_ni_tio_init_counter(counter);
+
+		ret = a4l_add_subd(dev, subd);
+		if(ret != NI_GPCT_SUBDEV(j))
+			return ret;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GCPT[%d] subdevice registered\n", j);
+	}
+
+	/* Frequency output */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering counter subdevice...\n");
+
+	subd->flags = A4L_SUBD_COUNTER;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 1;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 4;
+
+	subd->insn_read = ni_freq_out_insn_read;
+	subd->insn_write = ni_freq_out_insn_write;
+	subd->insn_config = ni_freq_out_insn_config;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_FREQ_OUT_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: counter subdevice registered\n");
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: initializing AI...\n");
+
+	/* ai configuration */
+	ni_ai_reset(a4l_get_subd(dev, NI_AI_SUBDEV));
+	if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
+		// BEAM is this needed for PCI-6143 ??
+		devpriv->clock_and_fout =
+			Slow_Internal_Time_Divide_By_2 |
+			Slow_Internal_Timebase |
+			Clock_To_Board_Divide_By_2 |
+			Clock_To_Board |
+			AI_Output_Divide_By_2 | AO_Output_Divide_By_2;
+	} else {
+		devpriv->clock_and_fout =
+			Slow_Internal_Time_Divide_By_2 |
+			Slow_Internal_Timebase |
+			Clock_To_Board_Divide_By_2 | Clock_To_Board;
+	}
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AI initialization OK\n");
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: initializing A0...\n");
+
+	/* analog output configuration */
+	ni_ao_reset(a4l_get_subd(dev, NI_AO_SUBDEV));
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+		devpriv->stc_writew(dev,
+				    (devpriv->irq_polarity ? Interrupt_Output_Polarity : 0) |
+				    (Interrupt_Output_On_3_Pins & 0) | Interrupt_A_Enable |
+				    Interrupt_B_Enable |
+				    Interrupt_A_Output_Select(devpriv->irq_pin) |
+				    Interrupt_B_Output_Select(devpriv->irq_pin),
+				    Interrupt_Control_Register);
+	}
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: A0 initialization OK\n");
+
+	/* DMA setup */
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: DMA setup\n");
+
+	ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
+	ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
+
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ni_writeb(0, Magic_611x);
+	} else if (boardtype.reg_type & ni_reg_m_series_mask) {
+		int channel;
+		for (channel = 0; channel < boardtype.n_aochan; ++channel) {
+			ni_writeb(0xf, M_Offset_AO_Waveform_Order(channel));
+			ni_writeb(0x0,
+				  M_Offset_AO_Reference_Attenuation(channel));
+		}
+		ni_writeb(0x0, M_Offset_AO_Calibration);
+	}
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: attach procedure complete\n");
+
+	return 0;
+}
+
+MODULE_DESCRIPTION("Analogy support for NI DAQ-STC based boards");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited14);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_bipolar4);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_611x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_622x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_628x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_S_ai_6143);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ao_ext);
+EXPORT_SYMBOL_GPL(a4l_ni_E_interrupt);
+EXPORT_SYMBOL_GPL(a4l_ni_E_init);
+++ linux-patched/drivers/xenomai/analogy/national_instruments/mite.c	2022-03-21 12:58:31.123872052 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/mite.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ *
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The NI Mite driver was originally written by Tomasz Motylewski
+ * <...>, and ported to comedi by ds.
+ *
+ * References for specifications:
+ *
+ * 321747b.pdf  Register Level Programmer Manual (obsolete)
+ * 321747c.pdf  Register Level Programmer Manual (new)
+ * DAQ-STC reference manual
+ *
+ * Other possibly relevant info:
+ *
+ * 320517c.pdf  User manual (obsolete)
+ * 320517f.pdf  User manual (new)
+ * 320889a.pdf  delete
+ * 320906c.pdf  maximum signal ratings
+ * 321066a.pdf  about 16x
+ * 321791a.pdf  discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf  about at-mio-16e-10 rev P
+ * 321837a.pdf  discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf  about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ */
+
+#include <linux/module.h>
+#include "mite.h"
+
+#ifdef CONFIG_DEBUG_MITE
+#define MDPRINTK(fmt, args...) rtdm_printk(fmt, ##args)
+#else /* !CONFIG_DEBUG_MITE */
+#define MDPRINTK(fmt, args...)
+#endif /* CONFIG_DEBUG_MITE */
+
+static LIST_HEAD(mite_devices);
+
+static struct pci_device_id mite_id[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_ANY_ID), },
+	{0, }
+};
+
+static int mite_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int i, err = 0;
+	struct mite_struct *mite;
+
+	mite = kmalloc(sizeof(struct mite_struct), GFP_KERNEL);
+	if(mite == NULL)
+		return -ENOMEM;
+
+	memset(mite, 0, sizeof(struct mite_struct));
+
+	rtdm_lock_init(&mite->lock);
+
+	mite->pcidev = dev;
+	if (pci_enable_device(dev) < 0) {
+		__a4l_err("error enabling mite\n");
+		err = -EIO;
+		goto out;
+	}
+
+	for(i = 0; i < MAX_MITE_DMA_CHANNELS; i++) {
+		mite->channels[i].mite = mite;
+		mite->channels[i].channel = i;
+		mite->channels[i].done = 1;
+	}
+
+	list_add(&mite->list, &mite_devices);
+
+out:
+	if (err < 0)
+		kfree(mite);
+
+	return err;
+}
+
+static void mite_remove(struct pci_dev *dev)
+{
+	struct list_head *this;
+
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		if(mite->pcidev == dev) {
+			list_del(this);
+			kfree(mite);
+			break;
+		}
+	}
+}
+
+static struct pci_driver mite_driver = {
+	.name = "analogy_mite",
+	.id_table = mite_id,
+	.probe = mite_probe,
+	.remove = mite_remove,
+};
+
+int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1)
+{
+	unsigned long length;
+	resource_size_t addr;
+	int i;
+	u32 csigr_bits;
+	unsigned unknown_dma_burst_bits;
+
+	__a4l_dbg(1, drv_dbg, "starting setup...\n");
+
+	pci_set_master(mite->pcidev);
+
+	if (pci_request_regions(mite->pcidev, "mite")) {
+		__a4l_err("failed to request mite io regions\n");
+		return -EIO;
+	};
+
+	/* The PCI BAR0 is the Mite */
+	addr = pci_resource_start(mite->pcidev, 0);
+	length = pci_resource_len(mite->pcidev, 0);
+	mite->mite_phys_addr = addr;
+	mite->mite_io_addr = ioremap(addr, length);
+	if (!mite->mite_io_addr) {
+		__a4l_err("failed to remap mite io memory address\n");
+		pci_release_regions(mite->pcidev);
+		return -ENOMEM;
+	}
+
+	__a4l_dbg(1, drv_dbg, "bar0(mite) 0x%08llx mapped to %p\n",
+		  (unsigned long long)mite->mite_phys_addr,
+		  mite->mite_io_addr);
+
+
+	/* The PCI BAR1 is the DAQ */
+	addr = pci_resource_start(mite->pcidev, 1);
+	length = pci_resource_len(mite->pcidev, 1);
+	mite->daq_phys_addr = addr;
+	mite->daq_io_addr = ioremap(mite->daq_phys_addr, length);
+	if (!mite->daq_io_addr) {
+		__a4l_err("failed to remap daq io memory address\n");
+		pci_release_regions(mite->pcidev);
+		return -ENOMEM;
+	}
+
+	__a4l_dbg(1, drv_dbg, "bar0(daq) 0x%08llx mapped to %p\n",
+		  (unsigned long long)mite->daq_phys_addr,
+		  mite->daq_io_addr);
+
+	if (use_iodwbsr_1) {
+		__a4l_dbg(1, drv_dbg, "using I/O Window Base Size register 1\n");
+		writel(0, mite->mite_io_addr + MITE_IODWBSR);
+		writel(mite->
+		       daq_phys_addr | WENAB |
+		       MITE_IODWBSR_1_WSIZE_bits(length),
+		       mite->mite_io_addr + MITE_IODWBSR_1);
+		writel(0, mite->mite_io_addr + MITE_IODWCR_1);
+	} else {
+		writel(mite->daq_phys_addr | WENAB,
+		       mite->mite_io_addr + MITE_IODWBSR);
+	}
+
+	/* Make sure dma bursts work.  I got this from running a bus analyzer
+	   on a pxi-6281 and a pxi-6713.  6713 powered up with register value
+	   of 0x61f and bursts worked.  6281 powered up with register value of
+	   0x1f and bursts didn't work.  The NI windows driver reads the register,
+	   then does a bitwise-or of 0x600 with it and writes it back.
+	*/
+	unknown_dma_burst_bits =
+		readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+	unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
+	writel(unknown_dma_burst_bits,
+	       mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+
+	csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
+	mite->num_channels = mite_csigr_dmac(csigr_bits);
+	if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
+		__a4l_err("MITE: bug? chip claims to have %i dma channels. "
+			  "Setting to %i.\n",
+			  mite->num_channels, MAX_MITE_DMA_CHANNELS);
+		mite->num_channels = MAX_MITE_DMA_CHANNELS;
+	}
+
+	__a4l_dbg(1, drv_dbg, " version = %i, type = %i, mite mode = %i, "
+		  "interface mode = %i\n",
+		  mite_csigr_version(csigr_bits),
+		  mite_csigr_type(csigr_bits),
+		  mite_csigr_mmode(csigr_bits),
+		  mite_csigr_imode(csigr_bits));
+	__a4l_dbg(1, drv_dbg, " num channels = %i, write post fifo depth = %i, "
+		  "wins = %i, iowins = %i\n",
+		  mite_csigr_dmac(csigr_bits),
+		  mite_csigr_wpdep(csigr_bits),
+		  mite_csigr_wins(csigr_bits),
+		  mite_csigr_iowins(csigr_bits));
+
+	for (i = 0; i < mite->num_channels; i++) {
+		/* Registers the channel as a free one */
+		mite->channel_allocated[i] = 0;
+		/* Reset the channel */
+		writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
+		/* Disable interrupts */
+		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+		       CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+		       mite->mite_io_addr + MITE_CHCR(i));
+
+		__a4l_dbg(1, drv_dbg, "channel[%d] initialized\n", i);
+	}
+
+	mite->used = 1;
+
+	return 0;
+}
+
+void a4l_mite_unsetup(struct mite_struct *mite)
+{
+	if (!mite)
+		return;
+
+	if (mite->mite_io_addr) {
+		iounmap(mite->mite_io_addr);
+		mite->mite_io_addr = NULL;
+	}
+
+	if (mite->daq_io_addr) {
+		iounmap(mite->daq_io_addr);
+		mite->daq_io_addr = NULL;
+	}
+
+	if(mite->used)
+		pci_release_regions( mite->pcidev );
+
+	mite->used = 0;
+}
+
+void a4l_mite_list_devices(void)
+{
+	struct list_head *this;
+
+	printk("Analogy: MITE: Available NI device IDs:");
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		printk(" 0x%04x", mite_device_id(mite));
+		if(mite->used)
+			printk("(used)");
+	}
+
+	printk("\n");
+}
+
+
+
+struct mite_struct * a4l_mite_find_device(int bus, 
+					  int slot, unsigned short device_id)
+{
+	struct list_head *this;
+
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		if(mite->pcidev->device != device_id)
+			continue;
+
+		if((bus <= 0 && slot <= 0) ||
+		   (bus == mite->pcidev->bus->number &&
+		    slot == PCI_SLOT(mite->pcidev->devfn)))
+			return mite;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(a4l_mite_find_device);
+
+struct mite_channel *
+a4l_mite_request_channel_in_range(struct mite_struct *mite,
+				  struct mite_dma_descriptor_ring *ring,
+				  unsigned min_channel, unsigned max_channel)
+{
+	int i;
+	unsigned long flags;
+	struct mite_channel *channel = NULL;
+
+	__a4l_dbg(1, drv_dbg, " min_channel = %u, max_channel = %u\n",
+		  min_channel, max_channel);
+
+	/* spin lock so a4l_mite_release_channel can be called safely
+	   from interrupts */
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	for (i = min_channel; i <= max_channel; ++i) {
+
+	__a4l_dbg(1, drv_dbg, " channel[%d] allocated = %d\n",
+		  i, mite->channel_allocated[i]);
+
+		if (mite->channel_allocated[i] == 0) {
+			mite->channel_allocated[i] = 1;
+			channel = &mite->channels[i];
+			channel->ring = ring;
+			break;
+		}
+	}
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return channel;
+}
+
+void a4l_mite_release_channel(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned long flags;
+
+	/* Spin lock to prevent races with mite_request_channel */
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	if (mite->channel_allocated[mite_chan->channel]) {
+		/* disable all channel's interrupts */
+		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
+		       CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
+		       CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+		       mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+		a4l_mite_dma_disarm(mite_chan);
+		mite_dma_reset(mite_chan);
+		mite->channel_allocated[mite_chan->channel] = 0;
+		mite_chan->ring = NULL;
+		mmiowb();
+	}
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+}
+
+void a4l_mite_dma_arm(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	int chor;
+	unsigned long flags;
+
+	MDPRINTK("a4l_mite_dma_arm ch%i\n", mite_chan->channel);
+	/* Memory barrier is intended to insure any twiddling with the buffer
+	   is done before writing to the mite to arm dma transfer */
+	smp_mb();
+	/* arm */
+	chor = CHOR_START;
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	mite_chan->done = 0;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+	mmiowb();
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+}
+
+void a4l_mite_dma_disarm(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned chor;
+
+	/* disarm */
+	chor = CHOR_ABORT;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+}
+
+int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned int n_links;
+	int i;
+
+	if (ring->descriptors) {
+		pci_free_consistent(ring->pcidev,
+				    ring->n_links * sizeof(struct mite_dma_descriptor),
+				    ring->descriptors, ring->descriptors_dma_addr);
+	}
+	ring->descriptors = NULL;
+	ring->descriptors_dma_addr = 0;
+	ring->n_links = 0;
+
+	if (buf->size == 0) {
+		return 0;
+	}
+	n_links = buf->size >> PAGE_SHIFT;
+
+	MDPRINTK("ring->pcidev=%p, n_links=0x%04x\n", ring->pcidev, n_links);
+
+	ring->descriptors =
+		pci_alloc_consistent(ring->pcidev,
+				     n_links * sizeof(struct mite_dma_descriptor),
+				     &ring->descriptors_dma_addr);
+	if (!ring->descriptors) {
+		printk("MITE: ring buffer allocation failed\n");
+		return -ENOMEM;
+	}
+	ring->n_links = n_links;
+
+	for (i = 0; i < n_links; i++) {
+		ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
+		ring->descriptors[i].addr = cpu_to_le32(buf->pg_list[i]);
+		ring->descriptors[i].next =
+			cpu_to_le32(ring->descriptors_dma_addr +
+				    (i + 1) * sizeof(struct mite_dma_descriptor));
+	}
+
+	ring->descriptors[n_links - 1].next =
+		cpu_to_le32(ring->descriptors_dma_addr);
+
+	/* Barrier is meant to insure that all the writes to the dma descriptors
+	   have completed before the dma controller is commanded to read them */
+	smp_wmb();
+
+	return 0;
+}
+
+void a4l_mite_prep_dma(struct mite_channel *mite_chan,
+		   unsigned int num_device_bits, unsigned int num_memory_bits)
+{
+	unsigned int chor, chcr, mcr, dcr, lkcr;
+	struct mite_struct *mite = mite_chan->mite;
+
+	MDPRINTK("a4l_mite_prep_dma ch%i\n", mite_chan->channel);
+
+	/* reset DMA and FIFO */
+	chor = CHOR_DMARESET | CHOR_FRESET;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+
+	/* short link chaining mode */
+	chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
+		CHCR_BURSTEN;
+	/*
+	 * Link Complete Interrupt: interrupt every time a link
+	 * in MITE_RING is completed. This can generate a lot of
+	 * extra interrupts, but right now we update the values
+	 * of buf_int_ptr and buf_int_count at each interrupt.  A
+	 * better method is to poll the MITE before each user
+	 * "read()" to calculate the number of bytes available.
+	 */
+	chcr |= CHCR_SET_LC_IE;
+	if (num_memory_bits == 32 && num_device_bits == 16) {
+		/* Doing a combined 32 and 16 bit byteswap gets the 16
+		   bit samples into the fifo in the right order.
+		   Tested doing 32 bit memory to 16 bit device
+		   transfers to the analog out of a pxi-6281, which
+		   has mite version = 1, type = 4.  This also works
+		   for dma reads from the counters on e-series boards.
+		*/
+		chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
+	}
+
+	if (mite_chan->dir == A4L_INPUT) {
+		chcr |= CHCR_DEV_TO_MEM;
+	}
+	writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+
+	/* to/from memory */
+	mcr = CR_RL(64) | CR_ASEQUP;
+	switch (num_memory_bits) {
+	case 8:
+		mcr |= CR_PSIZE8;
+		break;
+	case 16:
+		mcr |= CR_PSIZE16;
+		break;
+	case 32:
+		mcr |= CR_PSIZE32;
+		break;
+	default:
+		__a4l_err("MITE: bug! "
+			  "invalid mem bit width for dma transfer\n");
+		break;
+	}
+	writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
+
+	/* from/to device */
+	dcr = CR_RL(64) | CR_ASEQUP;
+	dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
+	switch (num_device_bits) {
+	case 8:
+		dcr |= CR_PSIZE8;
+		break;
+	case 16:
+		dcr |= CR_PSIZE16;
+		break;
+	case 32:
+		dcr |= CR_PSIZE32;
+		break;
+	default:
+		__a4l_info("MITE: bug! "
+			   "invalid dev bit width for dma transfer\n");
+		break;
+	}
+	writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
+
+	/* reset the DAR */
+	writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+
+	/* the link is 32bits */
+	lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
+	writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
+
+	/* starting address for link chaining */
+	writel(mite_chan->ring->descriptors_dma_addr,
+	       mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
+
+	MDPRINTK("exit a4l_mite_prep_dma\n");
+}
+
+u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+}
+
+u32 a4l_mite_bytes_in_transit(struct mite_channel * mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	return readl(mite->mite_io_addr +
+		     MITE_FCR(mite_chan->channel)) & 0x000000FF;
+}
+
+/* Returns lower bound for number of bytes transferred from device to memory */
+u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel * mite_chan)
+{
+	u32 device_byte_count;
+
+	device_byte_count = mite_device_bytes_transferred(mite_chan);
+	return device_byte_count - a4l_mite_bytes_in_transit(mite_chan);
+}
+
+/* Returns upper bound for number of bytes transferred from device to memory */
+u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel * mite_chan)
+{
+	u32 in_transit_count;
+
+	in_transit_count = a4l_mite_bytes_in_transit(mite_chan);
+	return mite_device_bytes_transferred(mite_chan) - in_transit_count;
+}
+
+/* Returns lower bound for number of bytes read from memory for transfer to device */
+u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel * mite_chan)
+{
+	u32 device_byte_count;
+
+	device_byte_count = mite_device_bytes_transferred(mite_chan);
+	return device_byte_count + a4l_mite_bytes_in_transit(mite_chan);
+}
+
+/* Returns upper bound for number of bytes read from memory for transfer to device */
+u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel * mite_chan)
+{
+	u32 in_transit_count;
+
+	in_transit_count = a4l_mite_bytes_in_transit(mite_chan);
+	return mite_device_bytes_transferred(mite_chan) + in_transit_count;
+}
+
+int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd)
+{
+	unsigned int nbytes_lb, nbytes_ub;
+
+	nbytes_lb = a4l_mite_bytes_written_to_memory_lb(mite_chan);
+	nbytes_ub = a4l_mite_bytes_written_to_memory_ub(mite_chan);
+
+	if(a4l_buf_prepare_absput(subd, nbytes_ub) != 0) {
+		__a4l_err("MITE: DMA overwrite of free area\n");
+		return -EPIPE;
+	}
+
+	return a4l_buf_commit_absput(subd, nbytes_lb);
+}
+
+int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned int nbytes_ub, nbytes_lb;
+	int err;
+
+	nbytes_lb = a4l_mite_bytes_read_from_memory_lb(mite_chan);
+	nbytes_ub = a4l_mite_bytes_read_from_memory_ub(mite_chan);
+
+	err = a4l_buf_prepare_absget(subd, nbytes_ub);
+	if(err < 0) {
+		__a4l_info("MITE: DMA underrun\n");
+		return -EPIPE;
+	}
+
+	err = a4l_buf_commit_absget(subd, nbytes_lb);
+
+	/* If the MITE has already transfered more than required, we
+	   can disable it */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		writel(CHOR_STOP,
+		       mite_chan->mite->mite_io_addr +
+		       MITE_CHOR(mite_chan->channel));
+
+	return err;
+}
+
+u32 a4l_mite_get_status(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	u32 status;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
+	if (status & CHSR_DONE) {
+		mite_chan->done = 1;
+		writel(CHOR_CLRDONE,
+		       mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+	}
+	mmiowb();
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return status;
+}
+
+int a4l_mite_done(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned long flags;
+	int done;
+
+	a4l_mite_get_status(mite_chan);
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	done = mite_chan->done;
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return done;
+}
+
+#ifdef CONFIG_DEBUG_MITE
+
+static void a4l_mite_decode(const char *const bit_str[], unsigned int bits);
+
+/* names of bits in mite registers */
+
+static const char *const mite_CHOR_strings[] = {
+	"start", "cont", "stop", "abort",
+	"freset", "clrlc", "clrrb", "clrdone",
+	"clr_lpause", "set_lpause", "clr_send_tc",
+	"set_send_tc", "12", "13", "14",
+	"15", "16", "17", "18",
+	"19", "20", "21", "22",
+	"23", "24", "25", "26",
+	"27", "28", "29", "30",
+	"dmareset",
+};
+
+static const char *const mite_CHCR_strings[] = {
+	"continue", "ringbuff", "2", "3",
+	"4", "5", "6", "7",
+	"8", "9", "10", "11",
+	"12", "13", "bursten", "fifodis",
+	"clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie",
+	"clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie",
+	"clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie",
+	"clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie",
+};
+
+static const char *const mite_MCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11",
+	"12", "13", "blocken", "berhand",
+	"reqsintlim/reqs0", "reqs1", "reqs2", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"24", "25", "26", "27",
+	"28", "29", "30", "stopen",
+};
+
+static const char *const mite_DCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2",
+	"aseqxp8", "13", "blocken", "berhand",
+	"reqsintlim", "reqs1", "reqs2", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"23", "24", "25", "27",
+	"28", "wsdevc", "wsdevs", "rwdevpack",
+};
+
+static const char *const mite_LKCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown",
+	"12", "13", "14", "berhand",
+	"16", "17", "18", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"24", "25", "26", "27",
+	"28", "29", "30", "chngend",
+};
+
+static const char *const mite_CHSR_strings[] = {
+	"d.err0", "d.err1", "m.err0", "m.err1",
+	"l.err0", "l.err1", "drq0", "drq1",
+	"end", "xferr", "operr0", "operr1",
+	"stops", "habort", "sabort", "error",
+	"16", "conts_rb", "18", "linkc",
+	"20", "drdy", "22", "mrdy",
+	"24", "done", "26", "sars",
+	"28", "lpauses", "30", "int",
+};
+
+void a4l_mite_dump_regs(struct mite_channel *mite_chan)
+{
+	unsigned long mite_io_addr =
+		(unsigned long)mite_chan->mite->mite_io_addr;
+	unsigned long addr = 0;
+	unsigned long temp = 0;
+
+	printk("a4l_mite_dump_regs ch%i\n", mite_chan->channel);
+	printk("mite address is  =0x%08lx\n", mite_io_addr);
+
+	addr = mite_io_addr + MITE_CHOR(mite_chan->channel);
+	printk("mite status[CHOR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHOR_strings, temp);
+	addr = mite_io_addr + MITE_CHCR(mite_chan->channel);
+	printk("mite status[CHCR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHCR_strings, temp);
+	addr = mite_io_addr + MITE_TCR(mite_chan->channel);
+	printk("mite status[TCR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_MCR(mite_chan->channel);
+	printk("mite status[MCR] at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_MCR_strings, temp);
+
+	addr = mite_io_addr + MITE_MAR(mite_chan->channel);
+	printk("mite status[MAR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_DCR(mite_chan->channel);
+	printk("mite status[DCR] at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_DCR_strings, temp);
+	addr = mite_io_addr + MITE_DAR(mite_chan->channel);
+	printk("mite status[DAR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_LKCR(mite_chan->channel);
+	printk("mite status[LKCR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_LKCR_strings, temp);
+	addr = mite_io_addr + MITE_LKAR(mite_chan->channel);
+	printk("mite status[LKAR]at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+
+	addr = mite_io_addr + MITE_CHSR(mite_chan->channel);
+	printk("mite status[CHSR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHSR_strings, temp);
+	addr = mite_io_addr + MITE_FCR(mite_chan->channel);
+	printk("mite status[FCR] at 0x%08lx =0x%08x\n\n", addr,
+	       readl((void *)addr));
+}
+
+
+static void a4l_mite_decode(const char *const bit_str[], unsigned int bits)
+{
+	int i;
+
+	for (i = 31; i >= 0; i--) {
+		if (bits & (1 << i)) {
+			printk(" %s", bit_str[i]);
+		}
+	}
+	printk("\n");
+}
+
+#endif /* CONFIG_DEBUG_MITE */
+
+
+static int __init mite_init(void)
+{
+	int err;
+
+	/* Register the mite's PCI driver */
+	err = pci_register_driver(&mite_driver);
+
+	if(err == 0)
+		a4l_mite_list_devices();
+
+	return err;
+}
+
+static void __exit mite_cleanup(void)
+{
+
+	/* Unregister the PCI structure driver */
+	pci_unregister_driver(&mite_driver);
+
+	/* Just paranoia... */
+	while(&mite_devices != mite_devices.next) {
+		struct list_head *this = mite_devices.next;
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		list_del(this);
+		kfree(mite);
+	}
+}
+
+MODULE_LICENSE("GPL");
+module_init(mite_init);
+module_exit(mite_cleanup);
+
+EXPORT_SYMBOL_GPL(a4l_mite_dma_arm);
+EXPORT_SYMBOL_GPL(a4l_mite_dma_disarm);
+EXPORT_SYMBOL_GPL(a4l_mite_sync_input_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_sync_output_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_setup);
+EXPORT_SYMBOL_GPL(a4l_mite_unsetup);
+EXPORT_SYMBOL_GPL(a4l_mite_list_devices);
+EXPORT_SYMBOL_GPL(a4l_mite_request_channel_in_range);
+EXPORT_SYMBOL_GPL(a4l_mite_release_channel);
+EXPORT_SYMBOL_GPL(a4l_mite_prep_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_buf_change);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_lb);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_ub);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_in_transit);
+EXPORT_SYMBOL_GPL(a4l_mite_get_status);
+EXPORT_SYMBOL_GPL(a4l_mite_done);
+#ifdef CONFIG_DEBUG_MITE
+EXPORT_SYMBOL_GPL(a4l_mite_decode);
+EXPORT_SYMBOL_GPL(a4l_mite_dump_regs);
+#endif /* CONFIG_DEBUG_MITE */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/mite.h	2022-03-21 12:58:31.115872130 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_660x.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_NI_MITE_H__
+#define __ANALOGY_NI_MITE_H__
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <rtdm/analogy/device.h>
+
+#define PCI_VENDOR_ID_NATINST 0x1093
+#define PCI_MITE_SIZE 4096
+#define PCI_DAQ_SIZE 4096
+#define PCI_DAQ_SIZE_660X 8192
+#define PCIMIO_COMPAT
+#define MAX_MITE_DMA_CHANNELS 8
+
+#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
+
+struct mite_dma_descriptor {
+	u32 count;
+	u32 addr;
+	u32 next;
+	u32 dar;
+};
+
+struct mite_dma_descriptor_ring {
+	struct pci_dev *pcidev;
+	u32 n_links;
+	struct mite_dma_descriptor *descriptors;
+	dma_addr_t descriptors_dma_addr;
+};
+
+struct mite_channel {
+	struct mite_struct *mite;
+	u32 channel;
+	u32 dir;
+	u32 done;
+	struct mite_dma_descriptor_ring *ring;
+};
+
+struct mite_struct {
+	struct list_head list;
+	rtdm_lock_t lock;
+	u32 used;
+	u32 num_channels;
+
+	struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
+	u32 channel_allocated[MAX_MITE_DMA_CHANNELS];
+
+	struct pci_dev *pcidev;
+	resource_size_t mite_phys_addr;
+	void *mite_io_addr;
+	resource_size_t daq_phys_addr;
+	void *daq_io_addr;
+};
+
+static inline
+struct mite_dma_descriptor_ring *mite_alloc_ring(struct	mite_struct *mite)
+{
+	struct mite_dma_descriptor_ring *ring =
+		kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_DMA);
+
+	if (ring == NULL)
+		return ring;
+
+	memset(ring, 0, sizeof(struct mite_dma_descriptor_ring));
+
+	ring->pcidev = mite->pcidev;
+	if (ring->pcidev == NULL) {
+		kfree(ring);
+		return NULL;
+	}
+
+	return ring;
+};
+
+static inline void mite_free_ring(struct mite_dma_descriptor_ring *ring)
+{
+	if (ring) {
+		if (ring->descriptors) {
+			pci_free_consistent(
+				ring->pcidev,
+				ring->n_links *
+				sizeof(struct mite_dma_descriptor),
+				ring->descriptors, ring->descriptors_dma_addr);
+		}
+		kfree(ring);
+	}
+};
+
+static inline unsigned int mite_irq(struct mite_struct *mite)
+{
+	return mite->pcidev->irq;
+};
+static inline unsigned int mite_device_id(struct mite_struct *mite)
+{
+	return mite->pcidev->device;
+};
+
+int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1);
+void a4l_mite_unsetup(struct mite_struct *mite);
+void a4l_mite_list_devices(void);
+struct mite_struct * a4l_mite_find_device(int bus,
+					  int slot, unsigned short device_id);
+struct mite_channel *
+a4l_mite_request_channel_in_range(struct mite_struct *mite,
+				  struct mite_dma_descriptor_ring *ring,
+				  unsigned min_channel, unsigned max_channel);
+static inline struct mite_channel *mite_request_channel(struct mite_struct
+	*mite, struct mite_dma_descriptor_ring *ring)
+{
+	return a4l_mite_request_channel_in_range(mite, ring, 0,
+		mite->num_channels - 1);
+}
+void a4l_mite_release_channel(struct mite_channel *mite_chan);
+
+void a4l_mite_dma_arm(struct mite_channel *mite_chan);
+void a4l_mite_dma_disarm(struct mite_channel *mite_chan);
+int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd);
+int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd);
+u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_in_transit(struct mite_channel *mite_chan);
+u32 a4l_mite_get_status(struct mite_channel *mite_chan);
+int a4l_mite_done(struct mite_channel *mite_chan);
+void a4l_mite_prep_dma(struct mite_channel *mite_chan,
+		   unsigned int num_device_bits, unsigned int num_memory_bits);
+int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd);
+
+#ifdef CONFIG_DEBUG_MITE
+void mite_print_chsr(unsigned int chsr);
+void a4l_mite_dump_regs(struct mite_channel *mite_chan);
+#endif
+
+static inline int CHAN_OFFSET(int channel)
+{
+	return 0x500 + 0x100 * channel;
+};
+
+enum mite_registers {
+	/* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
+	   written and read back.  The bits 0x1f always read as 1.
+	   The rest always read as zero. */
+	MITE_UNKNOWN_DMA_BURST_REG = 0x28,
+	MITE_IODWBSR = 0xc0,	//IO Device Window Base Size Register
+	MITE_IODWBSR_1 = 0xc4,	// IO Device Window Base Size Register 1
+	MITE_IODWCR_1 = 0xf4,
+	MITE_PCI_CONFIG_OFFSET = 0x300,
+	MITE_CSIGR = 0x460	//chip signature
+};
+static inline int MITE_CHOR(int channel)	// channel operation
+{
+	return CHAN_OFFSET(channel) + 0x0;
+};
+static inline int MITE_CHCR(int channel)	// channel control
+{
+	return CHAN_OFFSET(channel) + 0x4;
+};
+static inline int MITE_TCR(int channel)	// transfer count
+{
+	return CHAN_OFFSET(channel) + 0x8;
+};
+static inline int MITE_MCR(int channel)	// memory configuration
+{
+	return CHAN_OFFSET(channel) + 0xc;
+};
+static inline int MITE_MAR(int channel)	// memory address
+{
+	return CHAN_OFFSET(channel) + 0x10;
+};
+static inline int MITE_DCR(int channel)	// device configuration
+{
+	return CHAN_OFFSET(channel) + 0x14;
+};
+static inline int MITE_DAR(int channel)	// device address
+{
+	return CHAN_OFFSET(channel) + 0x18;
+};
+static inline int MITE_LKCR(int channel)	// link configuration
+{
+	return CHAN_OFFSET(channel) + 0x1c;
+};
+static inline int MITE_LKAR(int channel)	// link address
+{
+	return CHAN_OFFSET(channel) + 0x20;
+};
+static inline int MITE_LLKAR(int channel)	// see mite section of tnt5002 manual
+{
+	return CHAN_OFFSET(channel) + 0x24;
+};
+static inline int MITE_BAR(int channel)	// base address
+{
+	return CHAN_OFFSET(channel) + 0x28;
+};
+static inline int MITE_BCR(int channel)	// base count
+{
+	return CHAN_OFFSET(channel) + 0x2c;
+};
+static inline int MITE_SAR(int channel)	// ? address
+{
+	return CHAN_OFFSET(channel) + 0x30;
+};
+static inline int MITE_WSCR(int channel)	// ?
+{
+	return CHAN_OFFSET(channel) + 0x34;
+};
+static inline int MITE_WSER(int channel)	// ?
+{
+	return CHAN_OFFSET(channel) + 0x38;
+};
+static inline int MITE_CHSR(int channel)	// channel status
+{
+	return CHAN_OFFSET(channel) + 0x3c;
+};
+static inline int MITE_FCR(int channel)	// fifo count
+{
+	return CHAN_OFFSET(channel) + 0x40;
+};
+
+enum MITE_IODWBSR_bits {
+	WENAB = 0x80,		// window enable
+};
+
+static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
+{
+	unsigned order = 0;
+	while (size >>= 1)
+		++order;
+	BUG_ON(order < 1);
+	return (order - 1) & 0x1f;
+}
+
+enum MITE_UNKNOWN_DMA_BURST_bits {
+	UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
+};
+
+static inline int mite_csigr_version(u32 csigr_bits)
+{
+	return csigr_bits & 0xf;
+};
+static inline int mite_csigr_type(u32 csigr_bits)
+{				// original mite = 0, minimite = 1
+	return (csigr_bits >> 4) & 0xf;
+};
+static inline int mite_csigr_mmode(u32 csigr_bits)
+{				// mite mode, minimite = 1
+	return (csigr_bits >> 8) & 0x3;
+};
+static inline int mite_csigr_imode(u32 csigr_bits)
+{				// cpu port interface mode, pci = 0x3
+	return (csigr_bits >> 12) & 0x3;
+};
+static inline int mite_csigr_dmac(u32 csigr_bits)
+{				// number of dma channels
+	return (csigr_bits >> 16) & 0xf;
+};
+static inline int mite_csigr_wpdep(u32 csigr_bits)
+{				// write post fifo depth
+	unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
+	if (wpdep_bits == 0)
+		return 0;
+	else
+		return 1 << (wpdep_bits - 1);
+};
+static inline int mite_csigr_wins(u32 csigr_bits)
+{
+	return (csigr_bits >> 24) & 0x1f;
+};
+static inline int mite_csigr_iowins(u32 csigr_bits)
+{				// number of io windows
+	return (csigr_bits >> 29) & 0x7;
+};
+
+enum MITE_MCR_bits {
+	MCRPON = 0,
+};
+
+enum MITE_DCR_bits {
+	DCR_NORMAL = (1 << 29),
+	DCRPON = 0,
+};
+
+enum MITE_CHOR_bits {
+	CHOR_DMARESET = (1 << 31),
+	CHOR_SET_SEND_TC = (1 << 11),
+	CHOR_CLR_SEND_TC = (1 << 10),
+	CHOR_SET_LPAUSE = (1 << 9),
+	CHOR_CLR_LPAUSE = (1 << 8),
+	CHOR_CLRDONE = (1 << 7),
+	CHOR_CLRRB = (1 << 6),
+	CHOR_CLRLC = (1 << 5),
+	CHOR_FRESET = (1 << 4),
+	CHOR_ABORT = (1 << 3),	/* stop without emptying fifo */
+	CHOR_STOP = (1 << 2),	/* stop after emptying fifo */
+	CHOR_CONT = (1 << 1),
+	CHOR_START = (1 << 0),
+	CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
+};
+
+enum MITE_CHCR_bits {
+	CHCR_SET_DMA_IE = (1 << 31),
+	CHCR_CLR_DMA_IE = (1 << 30),
+	CHCR_SET_LINKP_IE = (1 << 29),
+	CHCR_CLR_LINKP_IE = (1 << 28),
+	CHCR_SET_SAR_IE = (1 << 27),
+	CHCR_CLR_SAR_IE = (1 << 26),
+	CHCR_SET_DONE_IE = (1 << 25),
+	CHCR_CLR_DONE_IE = (1 << 24),
+	CHCR_SET_MRDY_IE = (1 << 23),
+	CHCR_CLR_MRDY_IE = (1 << 22),
+	CHCR_SET_DRDY_IE = (1 << 21),
+	CHCR_CLR_DRDY_IE = (1 << 20),
+	CHCR_SET_LC_IE = (1 << 19),
+	CHCR_CLR_LC_IE = (1 << 18),
+	CHCR_SET_CONT_RB_IE = (1 << 17),
+	CHCR_CLR_CONT_RB_IE = (1 << 16),
+	CHCR_FIFODIS = (1 << 15),
+	CHCR_FIFO_ON = 0,
+	CHCR_BURSTEN = (1 << 14),
+	CHCR_NO_BURSTEN = 0,
+	CHCR_BYTE_SWAP_DEVICE = (1 << 6),
+	CHCR_BYTE_SWAP_MEMORY = (1 << 4),
+	CHCR_DIR = (1 << 3),
+	CHCR_DEV_TO_MEM = CHCR_DIR,
+	CHCR_MEM_TO_DEV = 0,
+	CHCR_NORMAL = (0 << 0),
+	CHCR_CONTINUE = (1 << 0),
+	CHCR_RINGBUFF = (2 << 0),
+	CHCR_LINKSHORT = (4 << 0),
+	CHCR_LINKLONG = (5 << 0),
+	CHCRPON =
+		(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+		CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
+};
+
+enum ConfigRegister_bits {
+	CR_REQS_MASK = 0x7 << 16,
+	CR_ASEQDONT = 0x0 << 10,
+	CR_ASEQUP = 0x1 << 10,
+	CR_ASEQDOWN = 0x2 << 10,
+	CR_ASEQ_MASK = 0x3 << 10,
+	CR_PSIZE8 = (1 << 8),
+	CR_PSIZE16 = (2 << 8),
+	CR_PSIZE32 = (3 << 8),
+	CR_PORTCPU = (0 << 6),
+	CR_PORTIO = (1 << 6),
+	CR_PORTVXI = (2 << 6),
+	CR_PORTMXI = (3 << 6),
+	CR_AMDEVICE = (1 << 0),
+};
+static inline int CR_REQS(int source)
+{
+	return (source & 0x7) << 16;
+};
+static inline int CR_REQSDRQ(unsigned drq_line)
+{
+	/* This also works on m-series when
+	   using channels (drq_line) 4 or 5. */
+	return CR_REQS((drq_line & 0x3) | 0x4);
+}
+static inline int CR_RL(unsigned int retry_limit)
+{
+	int value = 0;
+
+	while (retry_limit) {
+		retry_limit >>= 1;
+		value++;
+	}
+	if (value > 0x7)
+		__a4l_err("bug! retry_limit too large\n");
+
+	return (value & 0x7) << 21;
+}
+
+enum CHSR_bits {
+	CHSR_INT = (1 << 31),
+	CHSR_LPAUSES = (1 << 29),
+	CHSR_SARS = (1 << 27),
+	CHSR_DONE = (1 << 25),
+	CHSR_MRDY = (1 << 23),
+	CHSR_DRDY = (1 << 21),
+	CHSR_LINKC = (1 << 19),
+	CHSR_CONTS_RB = (1 << 17),
+	CHSR_ERROR = (1 << 15),
+	CHSR_SABORT = (1 << 14),
+	CHSR_HABORT = (1 << 13),
+	CHSR_STOPS = (1 << 12),
+	CHSR_OPERR_mask = (3 << 10),
+	CHSR_OPERR_NOERROR = (0 << 10),
+	CHSR_OPERR_FIFOERROR = (1 << 10),
+	CHSR_OPERR_LINKERROR = (1 << 10),	/* ??? */
+	CHSR_XFERR = (1 << 9),
+	CHSR_END = (1 << 8),
+	CHSR_DRQ1 = (1 << 7),
+	CHSR_DRQ0 = (1 << 6),
+	CHSR_LxERR_mask = (3 << 4),
+	CHSR_LBERR = (1 << 4),
+	CHSR_LRERR = (2 << 4),
+	CHSR_LOERR = (3 << 4),
+	CHSR_MxERR_mask = (3 << 2),
+	CHSR_MBERR = (1 << 2),
+	CHSR_MRERR = (2 << 2),
+	CHSR_MOERR = (3 << 2),
+	CHSR_DxERR_mask = (3 << 0),
+	CHSR_DBERR = (1 << 0),
+	CHSR_DRERR = (2 << 0),
+	CHSR_DOERR = (3 << 0),
+};
+
+static inline void mite_dma_reset(struct mite_channel *mite_chan)
+{
+	writel(CHOR_DMARESET | CHOR_FRESET,
+		mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+};
+
+#endif /* !__ANALOGY_NI_MITE_H__ */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_660x.c	2022-03-21 12:58:31.108872199 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_stc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * comedi/drivers/ni_660x.c
+ * Hardware driver for NI 660x devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Driver: ni_660x
+ * Description: National Instruments 660x counter/timer boards
+ * Devices:
+ * [National Instruments] PCI-6601 (ni_660x), PCI-6602, PXI-6602,
+ * PXI-6608
+ * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
+ * Herman.Bruyninckx@mech.kuleuven.ac.be,
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Updated: Thu Oct 18 12:56:06 EDT 2007
+ * Status: experimental
+
+ * Encoders work.  PulseGeneration (both single pulse and pulse train)
+ * works. Buffered commands work for input but not output.
+
+ * References:
+ * DAQ 660x Register-Level Programmer Manual  (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ */
+
+/*
+ * Integration with Xenomai/Analogy layer based on the
+ * comedi driver. Adaptation made by
+ *   Julien Delange <julien.delange@esa.int>
+ */
+
+#include <linux/interrupt.h>
+
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+#include "ni_tio.h"
+#include "mite.h"
+
+enum io_direction {
+       DIRECTION_INPUT = 0,
+       DIRECTION_OUTPUT = 1,
+       DIRECTION_OPENDRAIN = 2
+};
+
+
+enum ni_660x_constants {
+	min_counter_pfi_chan = 8,
+	max_dio_pfi_chan = 31,
+	counters_per_chip = 4
+};
+
+struct ni_660x_subd_priv {
+   int                     io_bits;
+   unsigned int            state;
+   uint16_t                readback[2];
+   uint16_t                config;
+   struct ni_gpct*         counter;
+};
+
+#define NUM_PFI_CHANNELS 40
+/* Really there are only up to 3 dma channels, but the register layout
+   allows for 4 */
+#define MAX_DMA_CHANNEL 4
+
+static struct a4l_channels_desc chandesc_ni660x = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = NUM_PFI_CHANNELS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, sizeof(sampl_t)},
+	},
+};
+
+#define subdev_priv ((struct ni_660x_subd_priv*)s->priv)
+
+/* See Register-Level Programmer Manual page 3.1 */
+enum NI_660x_Register {
+	G0InterruptAcknowledge,
+	G0StatusRegister,
+	G1InterruptAcknowledge,
+	G1StatusRegister,
+	G01StatusRegister,
+	G0CommandRegister,
+	STCDIOParallelInput,
+	G1CommandRegister,
+	G0HWSaveRegister,
+	G1HWSaveRegister,
+	STCDIOOutput,
+	STCDIOControl,
+	G0SWSaveRegister,
+	G1SWSaveRegister,
+	G0ModeRegister,
+	G01JointStatus1Register,
+	G1ModeRegister,
+	STCDIOSerialInput,
+	G0LoadARegister,
+	G01JointStatus2Register,
+	G0LoadBRegister,
+	G1LoadARegister,
+	G1LoadBRegister,
+	G0InputSelectRegister,
+	G1InputSelectRegister,
+	G0AutoincrementRegister,
+	G1AutoincrementRegister,
+	G01JointResetRegister,
+	G0InterruptEnable,
+	G1InterruptEnable,
+	G0CountingModeRegister,
+	G1CountingModeRegister,
+	G0SecondGateRegister,
+	G1SecondGateRegister,
+	G0DMAConfigRegister,
+	G0DMAStatusRegister,
+	G1DMAConfigRegister,
+	G1DMAStatusRegister,
+	G2InterruptAcknowledge,
+	G2StatusRegister,
+	G3InterruptAcknowledge,
+	G3StatusRegister,
+	G23StatusRegister,
+	G2CommandRegister,
+	G3CommandRegister,
+	G2HWSaveRegister,
+	G3HWSaveRegister,
+	G2SWSaveRegister,
+	G3SWSaveRegister,
+	G2ModeRegister,
+	G23JointStatus1Register,
+	G3ModeRegister,
+	G2LoadARegister,
+	G23JointStatus2Register,
+	G2LoadBRegister,
+	G3LoadARegister,
+	G3LoadBRegister,
+	G2InputSelectRegister,
+	G3InputSelectRegister,
+	G2AutoincrementRegister,
+	G3AutoincrementRegister,
+	G23JointResetRegister,
+	G2InterruptEnable,
+	G3InterruptEnable,
+	G2CountingModeRegister,
+	G3CountingModeRegister,
+	G3SecondGateRegister,
+	G2SecondGateRegister,
+	G2DMAConfigRegister,
+	G2DMAStatusRegister,
+	G3DMAConfigRegister,
+	G3DMAStatusRegister,
+	DIO32Input,
+	DIO32Output,
+	ClockConfigRegister,
+	GlobalInterruptStatusRegister,
+	DMAConfigRegister,
+	GlobalInterruptConfigRegister,
+	IOConfigReg0_1,
+	IOConfigReg2_3,
+	IOConfigReg4_5,
+	IOConfigReg6_7,
+	IOConfigReg8_9,
+	IOConfigReg10_11,
+	IOConfigReg12_13,
+	IOConfigReg14_15,
+	IOConfigReg16_17,
+	IOConfigReg18_19,
+	IOConfigReg20_21,
+	IOConfigReg22_23,
+	IOConfigReg24_25,
+	IOConfigReg26_27,
+	IOConfigReg28_29,
+	IOConfigReg30_31,
+	IOConfigReg32_33,
+	IOConfigReg34_35,
+	IOConfigReg36_37,
+	IOConfigReg38_39,
+	NumRegisters,
+};
+
+static inline unsigned IOConfigReg(unsigned pfi_channel)
+{
+	unsigned reg = IOConfigReg0_1 + pfi_channel / 2;
+	BUG_ON(reg > IOConfigReg38_39);
+	return reg;
+}
+
+enum ni_660x_register_width {
+	DATA_1B,
+	DATA_2B,
+	DATA_4B
+};
+
+enum ni_660x_register_direction {
+	NI_660x_READ,
+	NI_660x_WRITE,
+	NI_660x_READ_WRITE
+};
+
+enum ni_660x_pfi_output_select {
+	pfi_output_select_high_Z = 0,
+	pfi_output_select_counter = 1,
+	pfi_output_select_do = 2,
+	num_pfi_output_selects
+};
+
+enum ni_660x_subdevices {
+	NI_660X_DIO_SUBDEV = 1,
+	NI_660X_GPCT_SUBDEV_0 = 2
+};
+
+static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
+{
+	return NI_660X_GPCT_SUBDEV_0 + index;
+}
+
+struct NI_660xRegisterData {
+
+	const char *name; /*  Register Name */
+	int offset; /*  Offset from base address from GPCT chip */
+	enum ni_660x_register_direction direction;
+	enum ni_660x_register_width size; /*  1 byte, 2 bytes, or 4 bytes */
+};
+
+static const struct NI_660xRegisterData registerData[NumRegisters] = {
+	{"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
+	{"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
+	{"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
+	{"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
+	{"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
+	{"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
+	{"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
+	{"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
+	{"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
+	{"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
+	{"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
+	{"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
+	{"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
+	{"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
+	{"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
+	{"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
+	{"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
+	{"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
+	{"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
+	{"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
+	{"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
+	{"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
+	{"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
+	{"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
+	{"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
+	{"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
+	{"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
+	{"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
+	{"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
+	{"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
+	{"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
+	{"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
+	{"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
+	{"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
+	{"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
+	{"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
+	{"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
+	{"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
+	{"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
+	{"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
+	{"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
+	{"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
+	{"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
+	{"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
+	{"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
+	{"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
+	{"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
+	{"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
+	{"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
+	{"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
+	{"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
+	{"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
+	{"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
+	{"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
+	{"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
+	{"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
+	{"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
+	{"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
+	{"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
+	{"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
+	{"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
+	{"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
+	{"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
+	{"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
+	{"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
+	{"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
+	{"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
+	{"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
+	{"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
+	{"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
+	{"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
+	{"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
+	{"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
+	{"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
+	{"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
+	{"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
+};
+
+/* kind of ENABLE for the second counter */
+enum clock_config_register_bits {
+	CounterSwap = 0x1 << 21
+};
+
+/* ioconfigreg */
+static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
+{
+	if (pfi_channel % 2)
+		return 0;
+	else
+		return 8;
+}
+
+static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
+{
+	return 0x3 << ioconfig_bitshift(pfi_channel);
+}
+
+static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
+					      unsigned output_select)
+{
+	return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
+}
+
+static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
+{
+	return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
+}
+
+static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
+					     unsigned input_select)
+{
+	return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
+}
+
+/* Dma configuration register bits */
+static inline unsigned dma_select_mask(unsigned dma_channel)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return 0x1f << (8 * dma_channel);
+}
+
+enum dma_selection {
+	dma_selection_none = 0x1f,
+};
+
+static inline unsigned dma_selection_counter(unsigned counter_index)
+{
+	BUG_ON(counter_index >= counters_per_chip);
+	return counter_index;
+}
+
+static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
+}
+
+static inline unsigned dma_reset_bit(unsigned dma_channel)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return 0x80 << (8 * dma_channel);
+}
+
+enum global_interrupt_status_register_bits {
+	Counter_0_Int_Bit = 0x100,
+	Counter_1_Int_Bit = 0x200,
+	Counter_2_Int_Bit = 0x400,
+	Counter_3_Int_Bit = 0x800,
+	Cascade_Int_Bit = 0x20000000,
+	Global_Int_Bit = 0x80000000
+};
+
+enum global_interrupt_config_register_bits {
+	Cascade_Int_Enable_Bit = 0x20000000,
+	Global_Int_Polarity_Bit = 0x40000000,
+	Global_Int_Enable_Bit = 0x80000000
+};
+
+/* Offset of the GPCT chips from the base-adress of the card:
+   First chip is at base-address +0x00, etc. */
+static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
+
+/* Board description */
+struct ni_660x_board {
+	unsigned short dev_id;	/* `lspci` will show you this */
+	const char *name;
+	unsigned n_chips;	/* total number of TIO chips */
+};
+
+static const struct ni_660x_board ni_660x_boards[] = {
+	{
+	 .dev_id = 0x2c60,
+	 .name = "PCI-6601",
+	 .n_chips = 1,
+	 },
+	{
+	 .dev_id = 0x1310,
+	 .name = "PCI-6602",
+	 .n_chips = 2,
+	 },
+	{
+	 .dev_id = 0x1360,
+	 .name = "PXI-6602",
+	 .n_chips = 2,
+	 },
+	{
+	 .dev_id = 0x2cc0,
+	 .name = "PXI-6608",
+	 .n_chips = 2,
+	 },
+};
+
+#define NI_660X_MAX_NUM_CHIPS 2
+#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
+
+static const struct pci_device_id ni_660x_pci_table[] = {
+	{
+	PCI_VENDOR_ID_NATINST, 0x2c60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x2cc0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	0}
+};
+
+MODULE_DEVICE_TABLE(pci, ni_660x_pci_table);
+
+struct ni_660x_private {
+	struct mite_struct *mite;
+	struct ni_gpct_device *counter_dev;
+	uint64_t pfi_direction_bits;
+
+	struct mite_dma_descriptor_ring
+	  *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
+
+	rtdm_lock_t mite_channel_lock;
+	/* Interrupt_lock prevents races between interrupt and
+	   comedi_poll */
+	rtdm_lock_t interrupt_lock;
+	unsigned int dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
+	rtdm_lock_t soft_reg_copy_lock;
+	unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
+
+	struct ni_660x_board *board_ptr;
+};
+
+#undef devpriv
+#define devpriv ((struct ni_660x_private *)dev->priv)
+
+static inline struct ni_660x_private *private(struct a4l_device *dev)
+{
+	return (struct ni_660x_private*) dev->priv;
+}
+
+/* Initialized in ni_660x_find_device() */
+static inline const struct ni_660x_board *board(struct a4l_device *dev)
+{
+	return ((struct ni_660x_private*)dev->priv)->board_ptr;
+}
+
+#define n_ni_660x_boards ARRAY_SIZE(ni_660x_boards)
+
+static int ni_660x_attach(struct a4l_device *dev,
+					 a4l_lnkdesc_t *arg);
+static int ni_660x_detach(struct a4l_device *dev);
+static void init_tio_chip(struct a4l_device *dev, int chipset);
+static void ni_660x_select_pfi_output(struct a4l_device *dev,
+				      unsigned pfi_channel,
+				      unsigned output_select);
+
+static struct a4l_driver ni_660x_drv = {
+	.board_name = "analogy_ni_660x",
+	.driver_name = "ni_660x",
+	.owner = THIS_MODULE,
+	.attach = ni_660x_attach,
+	.detach = ni_660x_detach,
+   .privdata_size = sizeof(struct ni_660x_private),
+};
+
+static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan,
+				   unsigned source);
+
+/* Possible instructions for a GPCT */
+static int ni_660x_GPCT_rinsn(
+			      struct a4l_subdevice *s,
+			      struct a4l_kernel_instruction *insn);
+static int ni_660x_GPCT_insn_config(
+				    struct a4l_subdevice *s,
+				    struct a4l_kernel_instruction *insn);
+static int ni_660x_GPCT_winsn(
+			      struct a4l_subdevice *s,
+			      struct a4l_kernel_instruction *insn);
+
+/* Possible instructions for Digital IO */
+static int ni_660x_dio_insn_config(
+	       struct a4l_subdevice *s,
+	       struct a4l_kernel_instruction *insn);
+static int ni_660x_dio_insn_bits(
+	     struct a4l_subdevice *s,
+	     struct a4l_kernel_instruction *insn);
+
+static inline unsigned ni_660x_num_counters(struct a4l_device *dev)
+{
+	return board(dev)->n_chips * counters_per_chip;
+}
+
+static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg)
+{
+
+	enum NI_660x_Register ni_660x_register;
+	switch (reg) {
+	case NITIO_G0_Autoincrement_Reg:
+		ni_660x_register = G0AutoincrementRegister;
+		break;
+	case NITIO_G1_Autoincrement_Reg:
+		ni_660x_register = G1AutoincrementRegister;
+		break;
+	case NITIO_G2_Autoincrement_Reg:
+		ni_660x_register = G2AutoincrementRegister;
+		break;
+	case NITIO_G3_Autoincrement_Reg:
+		ni_660x_register = G3AutoincrementRegister;
+		break;
+	case NITIO_G0_Command_Reg:
+		ni_660x_register = G0CommandRegister;
+		break;
+	case NITIO_G1_Command_Reg:
+		ni_660x_register = G1CommandRegister;
+		break;
+	case NITIO_G2_Command_Reg:
+		ni_660x_register = G2CommandRegister;
+		break;
+	case NITIO_G3_Command_Reg:
+		ni_660x_register = G3CommandRegister;
+		break;
+	case NITIO_G0_HW_Save_Reg:
+		ni_660x_register = G0HWSaveRegister;
+		break;
+	case NITIO_G1_HW_Save_Reg:
+		ni_660x_register = G1HWSaveRegister;
+		break;
+	case NITIO_G2_HW_Save_Reg:
+		ni_660x_register = G2HWSaveRegister;
+		break;
+	case NITIO_G3_HW_Save_Reg:
+		ni_660x_register = G3HWSaveRegister;
+		break;
+	case NITIO_G0_SW_Save_Reg:
+		ni_660x_register = G0SWSaveRegister;
+		break;
+	case NITIO_G1_SW_Save_Reg:
+		ni_660x_register = G1SWSaveRegister;
+		break;
+	case NITIO_G2_SW_Save_Reg:
+		ni_660x_register = G2SWSaveRegister;
+		break;
+	case NITIO_G3_SW_Save_Reg:
+		ni_660x_register = G3SWSaveRegister;
+		break;
+	case NITIO_G0_Mode_Reg:
+		ni_660x_register = G0ModeRegister;
+		break;
+	case NITIO_G1_Mode_Reg:
+		ni_660x_register = G1ModeRegister;
+		break;
+	case NITIO_G2_Mode_Reg:
+		ni_660x_register = G2ModeRegister;
+		break;
+	case NITIO_G3_Mode_Reg:
+		ni_660x_register = G3ModeRegister;
+		break;
+	case NITIO_G0_LoadA_Reg:
+		ni_660x_register = G0LoadARegister;
+		break;
+	case NITIO_G1_LoadA_Reg:
+		ni_660x_register = G1LoadARegister;
+		break;
+	case NITIO_G2_LoadA_Reg:
+		ni_660x_register = G2LoadARegister;
+		break;
+	case NITIO_G3_LoadA_Reg:
+		ni_660x_register = G3LoadARegister;
+		break;
+	case NITIO_G0_LoadB_Reg:
+		ni_660x_register = G0LoadBRegister;
+		break;
+	case NITIO_G1_LoadB_Reg:
+		ni_660x_register = G1LoadBRegister;
+		break;
+	case NITIO_G2_LoadB_Reg:
+		ni_660x_register = G2LoadBRegister;
+		break;
+	case NITIO_G3_LoadB_Reg:
+		ni_660x_register = G3LoadBRegister;
+		break;
+	case NITIO_G0_Input_Select_Reg:
+		ni_660x_register = G0InputSelectRegister;
+		break;
+	case NITIO_G1_Input_Select_Reg:
+		ni_660x_register = G1InputSelectRegister;
+		break;
+	case NITIO_G2_Input_Select_Reg:
+		ni_660x_register = G2InputSelectRegister;
+		break;
+	case NITIO_G3_Input_Select_Reg:
+		ni_660x_register = G3InputSelectRegister;
+		break;
+	case NITIO_G01_Status_Reg:
+		ni_660x_register = G01StatusRegister;
+		break;
+	case NITIO_G23_Status_Reg:
+		ni_660x_register = G23StatusRegister;
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		ni_660x_register = G01JointResetRegister;
+		break;
+	case NITIO_G23_Joint_Reset_Reg:
+		ni_660x_register = G23JointResetRegister;
+		break;
+	case NITIO_G01_Joint_Status1_Reg:
+		ni_660x_register = G01JointStatus1Register;
+		break;
+	case NITIO_G23_Joint_Status1_Reg:
+		ni_660x_register = G23JointStatus1Register;
+		break;
+	case NITIO_G01_Joint_Status2_Reg:
+		ni_660x_register = G01JointStatus2Register;
+		break;
+	case NITIO_G23_Joint_Status2_Reg:
+		ni_660x_register = G23JointStatus2Register;
+		break;
+	case NITIO_G0_Counting_Mode_Reg:
+		ni_660x_register = G0CountingModeRegister;
+		break;
+	case NITIO_G1_Counting_Mode_Reg:
+		ni_660x_register = G1CountingModeRegister;
+		break;
+	case NITIO_G2_Counting_Mode_Reg:
+		ni_660x_register = G2CountingModeRegister;
+		break;
+	case NITIO_G3_Counting_Mode_Reg:
+		ni_660x_register = G3CountingModeRegister;
+		break;
+	case NITIO_G0_Second_Gate_Reg:
+		ni_660x_register = G0SecondGateRegister;
+		break;
+	case NITIO_G1_Second_Gate_Reg:
+		ni_660x_register = G1SecondGateRegister;
+		break;
+	case NITIO_G2_Second_Gate_Reg:
+		ni_660x_register = G2SecondGateRegister;
+		break;
+	case NITIO_G3_Second_Gate_Reg:
+		ni_660x_register = G3SecondGateRegister;
+		break;
+	case NITIO_G0_DMA_Config_Reg:
+		ni_660x_register = G0DMAConfigRegister;
+		break;
+	case NITIO_G0_DMA_Status_Reg:
+		ni_660x_register = G0DMAStatusRegister;
+		break;
+	case NITIO_G1_DMA_Config_Reg:
+		ni_660x_register = G1DMAConfigRegister;
+		break;
+	case NITIO_G1_DMA_Status_Reg:
+		ni_660x_register = G1DMAStatusRegister;
+		break;
+	case NITIO_G2_DMA_Config_Reg:
+		ni_660x_register = G2DMAConfigRegister;
+		break;
+	case NITIO_G2_DMA_Status_Reg:
+		ni_660x_register = G2DMAStatusRegister;
+		break;
+	case NITIO_G3_DMA_Config_Reg:
+		ni_660x_register = G3DMAConfigRegister;
+		break;
+	case NITIO_G3_DMA_Status_Reg:
+		ni_660x_register = G3DMAStatusRegister;
+		break;
+	case NITIO_G0_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G0InterruptAcknowledge;
+		break;
+	case NITIO_G1_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G1InterruptAcknowledge;
+		break;
+	case NITIO_G2_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G2InterruptAcknowledge;
+		break;
+	case NITIO_G3_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G3InterruptAcknowledge;
+		break;
+	case NITIO_G0_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G1_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G2_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G3_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G0_Interrupt_Enable_Reg:
+		ni_660x_register = G0InterruptEnable;
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		ni_660x_register = G1InterruptEnable;
+		break;
+	case NITIO_G2_Interrupt_Enable_Reg:
+		ni_660x_register = G2InterruptEnable;
+		break;
+	case NITIO_G3_Interrupt_Enable_Reg:
+		ni_660x_register = G3InterruptEnable;
+		break;
+	default:
+		__a4l_err("%s: unhandled register 0x%x in switch.\n",
+			  __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return ni_660x_register;
+}
+
+static inline void ni_660x_write_register(struct a4l_device *dev,
+					  unsigned chip_index, unsigned bits,
+					  enum NI_660x_Register reg)
+{
+	void *const write_address =
+	    private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+	    registerData[reg].offset;
+
+	switch (registerData[reg].size) {
+	case DATA_2B:
+		writew(bits, write_address);
+		break;
+	case DATA_4B:
+		writel(bits, write_address);
+		break;
+	default:
+		__a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n",
+			  __FILE__, __FUNCTION__, reg);
+		BUG();
+		break;
+	}
+}
+
+static inline unsigned ni_660x_read_register(struct a4l_device *dev,
+					     unsigned chip_index,
+					     enum NI_660x_Register reg)
+{
+	void *const read_address =
+	    private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+	    registerData[reg].offset;
+
+	switch (registerData[reg].size) {
+	case DATA_2B:
+		return readw(read_address);
+		break;
+	case DATA_4B:
+		return readl(read_address);
+		break;
+	default:
+		__a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n",
+			  __FILE__, __FUNCTION__, reg);
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static void ni_gpct_write_register(struct ni_gpct *counter,
+				   unsigned int bits, enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
+
+	ni_660x_write_register(dev, counter->chip_index, bits,
+			       ni_660x_register);
+}
+
+static unsigned ni_gpct_read_register(struct ni_gpct *counter,
+				      enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
+
+	return ni_660x_read_register(dev, counter->chip_index,
+				     ni_660x_register);
+}
+
+static inline
+struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private *priv,
+					   struct ni_gpct *counter)
+{
+
+	return priv->mite_rings[counter->chip_index][counter->counter_index];
+}
+
+static inline
+void ni_660x_set_dma_channel(struct a4l_device *dev,
+			     unsigned int mite_channel, struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] &=
+	    ~dma_select_mask(mite_channel);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] |=
+	    dma_select_bits(mite_channel,
+			    dma_selection_counter(counter->counter_index));
+	ni_660x_write_register(dev, counter->chip_index,
+			       private(dev)->
+			       dma_configuration_soft_copies
+			       [counter->chip_index] |
+			       dma_reset_bit(mite_channel), DMAConfigRegister);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags);
+}
+
+static inline
+void ni_660x_unset_dma_channel(struct a4l_device *dev,
+			       unsigned int mite_channel,
+			       struct ni_gpct *counter)
+{
+	unsigned long flags;
+	rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] &=
+	    ~dma_select_mask(mite_channel);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] |=
+	    dma_select_bits(mite_channel, dma_selection_none);
+	ni_660x_write_register(dev, counter->chip_index,
+			       private(dev)->
+			       dma_configuration_soft_copies
+			       [counter->chip_index], DMAConfigRegister);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags);
+}
+
+static int ni_660x_request_mite_channel(struct a4l_device *dev,
+					struct ni_gpct *counter,
+					enum io_direction direction)
+{
+	unsigned long flags;
+	struct mite_channel *mite_chan;
+
+	rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags);
+	BUG_ON(counter->mite_chan);
+	mite_chan = mite_request_channel(private(dev)->mite,
+					 mite_ring(private(dev), counter));
+	if (mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+		a4l_err(dev,
+			"%s: failed to reserve mite dma channel for counter.\n",
+			__FUNCTION__);
+		return -EBUSY;
+	}
+	mite_chan->dir = direction;
+	a4l_ni_tio_set_mite_channel(counter, mite_chan);
+	ni_660x_set_dma_channel(dev, mite_chan->channel, counter);
+	rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+	return 0;
+}
+
+void ni_660x_release_mite_channel(struct a4l_device *dev,
+				  struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags);
+	if (counter->mite_chan) {
+		struct mite_channel *mite_chan = counter->mite_chan;
+
+		ni_660x_unset_dma_channel(dev, mite_chan->channel, counter);
+		a4l_ni_tio_set_mite_channel(counter, NULL);
+		a4l_mite_release_channel(mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+}
+
+static int ni_660x_cmd(struct a4l_subdevice *s, struct a4l_cmd_desc* cmd)
+{
+	int retval;
+
+	struct ni_gpct *counter = subdev_priv->counter;
+
+	retval = ni_660x_request_mite_channel(s->dev, counter, A4L_INPUT);
+	if (retval) {
+		a4l_err(s->dev,
+			"%s: no dma channel available for use by counter",
+			__FUNCTION__);
+		return retval;
+	}
+
+	a4l_ni_tio_acknowledge_and_confirm (counter, NULL, NULL, NULL, NULL);
+	retval = a4l_ni_tio_cmd(counter, cmd);
+
+	return retval;
+}
+
+static int ni_660x_cmdtest(struct a4l_subdevice *s, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct *counter = subdev_priv->counter;
+	return a4l_ni_tio_cmdtest(counter, cmd);
+}
+
+static int ni_660x_cancel(struct a4l_subdevice *s)
+{
+	struct ni_gpct *counter = subdev_priv->counter;
+	int retval;
+
+	retval = a4l_ni_tio_cancel(counter);
+	ni_660x_release_mite_channel(s->dev, counter);
+	return retval;
+}
+
+static void set_tio_counterswap(struct a4l_device *dev, int chipset)
+{
+	/* See P. 3.5 of the Register-Level Programming manual.  The
+	   CounterSwap bit has to be set on the second chip, otherwise
+	   it will try to use the same pins as the first chip.
+	 */
+
+	if (chipset)
+		ni_660x_write_register(dev,
+				       chipset,
+				       CounterSwap, ClockConfigRegister);
+	else
+		ni_660x_write_register(dev,
+				       chipset, 0, ClockConfigRegister);
+}
+
+static void ni_660x_handle_gpct_interrupt(struct a4l_device *dev,
+					  struct a4l_subdevice *s)
+{
+   struct a4l_buffer *buf = s->buf;
+
+   a4l_ni_tio_handle_interrupt(subdev_priv->counter, dev);
+   if ( test_bit(A4L_BUF_EOA_NR, &buf->flags) &&
+	test_bit(A4L_BUF_ERROR_NR, &buf->flags) &&
+	test_bit(A4L_BUF_EOA_NR, &buf->flags))
+	   ni_660x_cancel(s);
+   else
+	   a4l_buf_evt(s, 0);
+}
+
+static int ni_660x_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	unsigned long flags;
+
+	if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags))
+		return -ENOENT;
+
+	/* Lock to avoid race with comedi_poll */
+	rtdm_lock_get_irqsave(&private(dev)->interrupt_lock, flags);
+	smp_mb();
+
+	while (&dev->subdvsq != dev->subdvsq.next) {
+		struct list_head *this = dev->subdvsq.next;
+		struct a4l_subdevice *tmp = list_entry(this, struct a4l_subdevice, list);
+		ni_660x_handle_gpct_interrupt(dev, tmp);
+	}
+
+	rtdm_lock_put_irqrestore(&private(dev)->interrupt_lock, flags);
+	return 0;
+}
+
+static int ni_660x_alloc_mite_rings(struct a4l_device *dev)
+{
+	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < board(dev)->n_chips; ++i) {
+		for (j = 0; j < counters_per_chip; ++j) {
+			private(dev)->mite_rings[i][j] =
+				mite_alloc_ring(private(dev)->mite);
+			if (private(dev)->mite_rings[i][j] == NULL)
+				return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void ni_660x_free_mite_rings(struct a4l_device *dev)
+{
+	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		for (j = 0; j < counters_per_chip; ++j)
+			mite_free_ring(private(dev)->mite_rings[i][j]);
+}
+
+
+static int __init driver_ni_660x_init_module(void)
+{
+	return a4l_register_drv (&ni_660x_drv);
+}
+
+static void __exit driver_ni_660x_cleanup_module(void)
+{
+	a4l_unregister_drv (&ni_660x_drv);
+}
+
+module_init(driver_ni_660x_init_module);
+module_exit(driver_ni_660x_cleanup_module);
+
+static int ni_660x_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	struct a4l_subdevice *s;
+	int ret;
+	int err;
+	int bus, slot;
+	unsigned i;
+	int nsubdev = 0;
+	unsigned global_interrupt_config_bits;
+	struct mite_struct *mitedev;
+	struct ni_660x_board* boardptr = NULL;
+
+	ret = 0;
+	bus = slot = 0;
+	mitedev = NULL;
+	nsubdev = 0;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	for (i = 0; ( i < n_ni_660x_boards ) && ( mitedev == NULL ); i++) {
+		mitedev  = a4l_mite_find_device(bus, slot,
+						ni_660x_boards[i].dev_id);
+		boardptr = (struct ni_660x_board*) &ni_660x_boards[i];
+	}
+
+
+	if(mitedev == NULL) {
+		a4l_info(dev, "mite device not found\n");
+		return -ENOENT;
+	}
+
+	a4l_info(dev, "Board found (name=%s), continue initialization ...",
+		 boardptr->name);
+
+	private(dev)->mite      = mitedev;
+	private(dev)->board_ptr = boardptr;
+
+	rtdm_lock_init(&private(dev)->mite_channel_lock);
+	rtdm_lock_init(&private(dev)->interrupt_lock);
+	rtdm_lock_init(&private(dev)->soft_reg_copy_lock);
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
+		private(dev)->pfi_output_selects[i] = pfi_output_select_counter;
+	}
+
+	ret = a4l_mite_setup(private(dev)->mite, 1);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__);
+		return ret;
+	}
+
+	ret = ni_660x_alloc_mite_rings(dev);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite rings\n", __FUNCTION__);
+		return ret;
+	}
+
+	/* Setup first subdevice */
+	s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+	if (s == NULL)
+		return -ENOMEM;
+
+	s->flags = A4L_SUBD_UNUSED;
+
+	err = a4l_add_subd(dev, s);
+	if (err != nsubdev) {
+		a4l_info(dev, "cannot add first subdevice, returns %d, expect %d\n", err, i);
+		return err;
+	}
+
+	nsubdev++;
+
+	/* Setup second subdevice */
+	s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+	if (s == NULL) {
+		a4l_info(dev, "cannot allocate second subdevice\n");
+		return -ENOMEM;
+	}
+
+	s->flags          = A4L_SUBD_DIO;
+	s->flags         |= A4L_SUBD_CMD;
+	s->chan_desc      = &chandesc_ni660x;
+	s->rng_desc       = &range_digital;
+	s->insn_bits      = ni_660x_dio_insn_bits;
+	s->insn_config    = ni_660x_dio_insn_config;
+	s->dev            = dev;
+	subdev_priv->io_bits = 0;
+	ni_660x_write_register(dev, 0, 0, STCDIOControl);
+
+	err = a4l_add_subd(dev, s);
+	if (err != nsubdev)
+		return err;
+
+	nsubdev++;
+
+	private(dev)->counter_dev =
+		a4l_ni_gpct_device_construct(dev,
+					     &ni_gpct_write_register,
+					     &ni_gpct_read_register,
+					     ni_gpct_variant_660x,
+					     ni_660x_num_counters (dev));
+	if (private(dev)->counter_dev == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < ni_660x_num_counters(dev); ++i) {
+		/* TODO: check why there are kmalloc here... and in pcimio */
+		private(dev)->counter_dev->counters[i] =
+			kmalloc(sizeof(struct ni_gpct), GFP_KERNEL);
+		private(dev)->counter_dev->counters[i]->counter_dev =
+			private(dev)->counter_dev;
+		rtdm_lock_init(&(private(dev)->counter_dev->counters[i]->lock));
+	}
+
+	for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
+		if (i < ni_660x_num_counters(dev)) {
+			/* Setup other subdevice */
+			s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+
+			if (s == NULL)
+				return -ENOMEM;
+
+			s->flags             = A4L_SUBD_COUNTER;
+			s->chan_desc         = rtdm_malloc (sizeof (struct a4l_channels_desc));
+			s->chan_desc->length = 3;
+			s->insn_read         = ni_660x_GPCT_rinsn;
+			s->insn_write        = ni_660x_GPCT_winsn;
+			s->insn_config       = ni_660x_GPCT_insn_config;
+			s->do_cmd            = &ni_660x_cmd;
+			s->do_cmdtest        = &ni_660x_cmdtest;
+			s->cancel            = &ni_660x_cancel;
+
+			subdev_priv->counter = private(dev)->counter_dev->counters[i];
+
+			private(dev)->counter_dev->counters[i]->chip_index =
+				i / counters_per_chip;
+			private(dev)->counter_dev->counters[i]->counter_index =
+				i % counters_per_chip;
+		} else {
+			s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+			if (s == NULL)
+				return -ENOMEM;
+			s->flags = A4L_SUBD_UNUSED;
+		}
+
+		err = a4l_add_subd(dev, s);
+
+		if (err != nsubdev)
+			return err;
+
+		nsubdev++;
+	}
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		init_tio_chip(dev, i);
+
+	for (i = 0; i < ni_660x_num_counters(dev); ++i)
+		a4l_ni_tio_init_counter(private(dev)->counter_dev->counters[i]);
+
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
+		if (i < min_counter_pfi_chan)
+			ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
+		else
+			ni_660x_set_pfi_routing(dev, i,
+						pfi_output_select_counter);
+		ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
+	}
+
+
+	/* To be safe, set counterswap bits on tio chips after all the
+	   counter outputs have been set to high impedance mode */
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		set_tio_counterswap(dev, i);
+
+	ret = a4l_request_irq(dev,
+			      mite_irq(private(dev)->mite),
+			      ni_660x_interrupt, RTDM_IRQTYPE_SHARED, dev);
+
+	if (ret < 0) {
+		a4l_err(dev, "%s: IRQ not available\n", __FUNCTION__);
+		return ret;
+	}
+
+	global_interrupt_config_bits = Global_Int_Enable_Bit;
+	if (board(dev)->n_chips > 1)
+		global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
+
+	ni_660x_write_register(dev, 0, global_interrupt_config_bits,
+			       GlobalInterruptConfigRegister);
+
+	a4l_info(dev, "attach succeed, ready to be used\n");
+
+	return 0;
+}
+
+static int ni_660x_detach(struct a4l_device *dev)
+{
+	int i;
+
+	a4l_info(dev, "begin to detach the driver ...");
+
+	/* Free irq */
+	if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED)
+		a4l_free_irq(dev,a4l_get_irq(dev));
+
+	if (dev->priv) {
+
+		if (private(dev)->counter_dev) {
+
+			for (i = 0; i < ni_660x_num_counters(dev); ++i)
+				if ((private(dev)->counter_dev->counters[i]) != NULL)
+					kfree (private(dev)->counter_dev->counters[i]);
+
+			a4l_ni_gpct_device_destroy(private(dev)->counter_dev);
+		}
+
+		if (private(dev)->mite) {
+			ni_660x_free_mite_rings(dev);
+			a4l_mite_unsetup(private(dev)->mite);
+		}
+	}
+
+	a4l_info(dev, "driver detached !\n");
+
+	return 0;
+}
+
+static int ni_660x_GPCT_rinsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_rinsn(subdev_priv->counter, insn);
+}
+
+static void init_tio_chip(struct a4l_device *dev, int chipset)
+{
+	unsigned int i;
+
+	/*  Init dma configuration register */
+	private(dev)->dma_configuration_soft_copies[chipset] = 0;
+	for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
+		private(dev)->dma_configuration_soft_copies[chipset] |=
+		    dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
+	}
+
+	ni_660x_write_register(dev, chipset,
+			       private(dev)->
+			       dma_configuration_soft_copies[chipset],
+			       DMAConfigRegister);
+
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i)
+		ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
+}
+
+static int ni_660x_GPCT_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_insn_config (subdev_priv->counter, insn);
+}
+
+static int ni_660x_GPCT_winsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_winsn(subdev_priv->counter, insn);
+}
+
+static int ni_660x_dio_insn_bits(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	unsigned int* data = (unsigned int*) insn->data;
+	unsigned int base_bitfield_channel = CR_CHAN(insn->chan_desc);
+
+	/*  Check if we have to write some bits */
+	if (data[0]) {
+		subdev_priv->state &= ~(data[0] << base_bitfield_channel);
+		subdev_priv->state |= (data[0] & data[1]) << base_bitfield_channel;
+		/* Write out the new digital output lines */
+		ni_660x_write_register(s->dev, 0, subdev_priv->state, DIO32Output);
+	}
+
+	/* On return, data[1] contains the value of the digital input
+	   and output lines. */
+	data[1] = ni_660x_read_register(s->dev, 0,DIO32Input) >>
+		base_bitfield_channel;
+
+	return 0;
+}
+
+static void ni_660x_select_pfi_output(struct a4l_device *dev,
+				      unsigned pfi_channel,
+				      unsigned output_select)
+{
+	static const unsigned counter_4_7_first_pfi = 8;
+	static const unsigned counter_4_7_last_pfi = 23;
+	unsigned active_chipset = 0;
+	unsigned idle_chipset = 0;
+	unsigned active_bits;
+	unsigned idle_bits;
+
+	if (board(dev)->n_chips > 1) {
+		if (output_select == pfi_output_select_counter &&
+		    pfi_channel >= counter_4_7_first_pfi &&
+		    pfi_channel <= counter_4_7_last_pfi) {
+			active_chipset = 1;
+			idle_chipset = 0;
+		} else {
+			active_chipset = 0;
+			idle_chipset = 1;
+		}
+	}
+
+	if (idle_chipset != active_chipset) {
+
+		idle_bits =ni_660x_read_register(dev, idle_chipset,
+						 IOConfigReg(pfi_channel));
+		idle_bits &= ~pfi_output_select_mask(pfi_channel);
+		idle_bits |=
+		    pfi_output_select_bits(pfi_channel,
+					   pfi_output_select_high_Z);
+		ni_660x_write_register(dev, idle_chipset, idle_bits,
+				       IOConfigReg(pfi_channel));
+	}
+
+	active_bits =
+	    ni_660x_read_register(dev, active_chipset,
+				  IOConfigReg(pfi_channel));
+	active_bits &= ~pfi_output_select_mask(pfi_channel);
+	active_bits |= pfi_output_select_bits(pfi_channel, output_select);
+	ni_660x_write_register(dev, active_chipset, active_bits,
+			       IOConfigReg(pfi_channel));
+}
+
+static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan,
+				   unsigned source)
+{
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+
+	if (source > num_pfi_output_selects)
+		return -EINVAL;
+	if (source == pfi_output_select_high_Z)
+		return -EINVAL;
+	if (chan < min_counter_pfi_chan) {
+		if (source == pfi_output_select_counter)
+			return -EINVAL;
+	} else if (chan > max_dio_pfi_chan) {
+		if (source == pfi_output_select_do)
+			return -EINVAL;
+	}
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+
+	private(dev)->pfi_output_selects[chan] = source;
+	if (private(dev)->pfi_direction_bits & (((uint64_t) 1) << chan))
+		ni_660x_select_pfi_output(dev, chan,
+					  private(dev)->
+					  pfi_output_selects[chan]);
+	return 0;
+}
+
+static unsigned ni_660x_get_pfi_routing(struct a4l_device *dev,
+					unsigned chan)
+{
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+	return private(dev)->pfi_output_selects[chan];
+}
+
+static void ni660x_config_filter(struct a4l_device *dev,
+				 unsigned pfi_channel,
+				 int filter)
+{
+	unsigned int bits;
+
+	bits = ni_660x_read_register(dev, 0, IOConfigReg(pfi_channel));
+	bits &= ~pfi_input_select_mask(pfi_channel);
+	bits |= pfi_input_select_bits(pfi_channel, filter);
+	ni_660x_write_register(dev, 0, bits, IOConfigReg(pfi_channel));
+}
+
+static int ni_660x_dio_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	unsigned int* data = insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	struct a4l_device* dev = s->dev;
+
+	if (data == NULL)
+		return -EINVAL;
+
+	/* The input or output configuration of each digital line is
+	 * configured by a special insn_config instruction.  chanspec
+	 * contains the channel to be changed, and data[0] contains the
+	 * value COMEDI_INPUT or COMEDI_OUTPUT. */
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		private(dev)->pfi_direction_bits |= ((uint64_t) 1) << chan;
+		ni_660x_select_pfi_output(dev, chan,
+					  private(dev)->
+					  pfi_output_selects[chan]);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		private(dev)->pfi_direction_bits &= ~(((uint64_t) 1) << chan);
+		ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] =
+		    (private(dev)->pfi_direction_bits &
+		     (((uint64_t) 1) << chan)) ? A4L_OUTPUT : A4L_INPUT;
+		return 0;
+	case A4L_INSN_CONFIG_SET_ROUTING:
+		return ni_660x_set_pfi_routing(dev, chan, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_ROUTING:
+		data[1] = ni_660x_get_pfi_routing(dev, chan);
+		break;
+	case A4L_INSN_CONFIG_FILTER:
+		ni660x_config_filter(dev, chan, data[1]);
+		break;
+	default:
+		return -EINVAL;
+		break;
+	};
+
+	return 0;
+}
+
+
+MODULE_DESCRIPTION("Analogy driver for NI660x series cards");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_stc.h	2022-03-21 12:58:31.100872277 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Register descriptions for NI DAQ-STC chip
+ *
+ * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this code; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * References:
+ * 340934b.pdf  DAQ-STC reference manual
+ *
+ */
+#ifndef __ANALOGY_NI_STC_H__
+#define __ANALOGY_NI_STC_H__
+
+#include "ni_tio.h"
+
+#define _bit15		0x8000
+#define _bit14		0x4000
+#define _bit13		0x2000
+#define _bit12		0x1000
+#define _bit11		0x0800
+#define _bit10		0x0400
+#define _bit9		0x0200
+#define _bit8		0x0100
+#define _bit7		0x0080
+#define _bit6		0x0040
+#define _bit5		0x0020
+#define _bit4		0x0010
+#define _bit3		0x0008
+#define _bit2		0x0004
+#define _bit1		0x0002
+#define _bit0		0x0001
+
+#define NUM_PFI_OUTPUT_SELECT_REGS 6
+
+/* Registers in the National Instruments DAQ-STC chip */
+
+#define Interrupt_A_Ack_Register	2
+#define G0_Gate_Interrupt_Ack			_bit15
+#define G0_TC_Interrupt_Ack			_bit14
+#define AI_Error_Interrupt_Ack			_bit13
+#define AI_STOP_Interrupt_Ack			_bit12
+#define AI_START_Interrupt_Ack			_bit11
+#define AI_START2_Interrupt_Ack			_bit10
+#define AI_START1_Interrupt_Ack			_bit9
+#define AI_SC_TC_Interrupt_Ack			_bit8
+#define AI_SC_TC_Error_Confirm			_bit7
+#define G0_TC_Error_Confirm			_bit6
+#define G0_Gate_Error_Confirm			_bit5
+
+#define AI_Status_1_Register		2
+#define Interrupt_A_St				_bit15
+#define AI_FIFO_Full_St				_bit14
+#define AI_FIFO_Half_Full_St			_bit13
+#define AI_FIFO_Empty_St			_bit12
+#define AI_Overrun_St				_bit11
+#define AI_Overflow_St				_bit10
+#define AI_SC_TC_Error_St			_bit9
+#define AI_START2_St				_bit8
+#define AI_START1_St				_bit7
+#define AI_SC_TC_St				_bit6
+#define AI_START_St				_bit5
+#define AI_STOP_St				_bit4
+#define G0_TC_St				_bit3
+#define G0_Gate_Interrupt_St			_bit2
+#define AI_FIFO_Request_St			_bit1
+#define Pass_Thru_0_Interrupt_St		_bit0
+
+#define AI_Status_2_Register		5
+
+#define Interrupt_B_Ack_Register	3
+#define G1_Gate_Error_Confirm			_bit1
+#define G1_TC_Error_Confirm			_bit2
+#define AO_BC_TC_Trigger_Error_Confirm		_bit3
+#define AO_BC_TC_Error_Confirm			_bit4
+#define AO_UI2_TC_Error_Confrim			_bit5
+#define AO_UI2_TC_Interrupt_Ack			_bit6
+#define AO_UC_TC_Interrupt_Ack			_bit7
+#define AO_BC_TC_Interrupt_Ack			_bit8
+#define AO_START1_Interrupt_Ack			_bit9
+#define AO_UPDATE_Interrupt_Ack			_bit10
+#define AO_START_Interrupt_Ack			_bit11
+#define AO_STOP_Interrupt_Ack			_bit12
+#define AO_Error_Interrupt_Ack			_bit13
+#define G1_TC_Interrupt_Ack			_bit14
+#define G1_Gate_Interrupt_Ack			_bit15
+
+#define AO_Status_1_Register		3
+#define Interrupt_B_St				_bit15
+#define AO_FIFO_Full_St				_bit14
+#define AO_FIFO_Half_Full_St			_bit13
+#define AO_FIFO_Empty_St			_bit12
+#define AO_BC_TC_Error_St			_bit11
+#define AO_START_St				_bit10
+#define AO_Overrun_St				_bit9
+#define AO_START1_St				_bit8
+#define AO_BC_TC_St				_bit7
+#define AO_UC_TC_St				_bit6
+#define AO_UPDATE_St				_bit5
+#define AO_UI2_TC_St				_bit4
+#define G1_TC_St				_bit3
+#define G1_Gate_Interrupt_St			_bit2
+#define AO_FIFO_Request_St			_bit1
+#define Pass_Thru_1_Interrupt_St		_bit0
+
+
+#define AI_Command_2_Register		4
+#define AI_End_On_SC_TC				_bit15
+#define AI_End_On_End_Of_Scan			_bit14
+#define AI_START1_Disable			_bit11
+#define AI_SC_Save_Trace			_bit10
+#define AI_SI_Switch_Load_On_SC_TC		_bit9
+#define AI_SI_Switch_Load_On_STOP		_bit8
+#define AI_SI_Switch_Load_On_TC			_bit7
+#define AI_SC_Switch_Load_On_TC			_bit4
+#define AI_STOP_Pulse				_bit3
+#define AI_START_Pulse				_bit2
+#define AI_START2_Pulse				_bit1
+#define AI_START1_Pulse				_bit0
+
+#define AO_Command_2_Register		5
+#define AO_End_On_BC_TC(x)			(((x) & 0x3) << 14)
+#define AO_Start_Stop_Gate_Enable		_bit13
+#define AO_UC_Save_Trace			_bit12
+#define AO_BC_Gate_Enable			_bit11
+#define AO_BC_Save_Trace			_bit10
+#define AO_UI_Switch_Load_On_BC_TC		_bit9
+#define AO_UI_Switch_Load_On_Stop		_bit8
+#define AO_UI_Switch_Load_On_TC			_bit7
+#define AO_UC_Switch_Load_On_BC_TC		_bit6
+#define AO_UC_Switch_Load_On_TC			_bit5
+#define AO_BC_Switch_Load_On_TC			_bit4
+#define AO_Mute_B				_bit3
+#define AO_Mute_A				_bit2
+#define AO_UPDATE2_Pulse			_bit1
+#define AO_START1_Pulse				_bit0
+
+#define AO_Status_2_Register		6
+
+#define DIO_Parallel_Input_Register	7
+
+#define AI_Command_1_Register		8
+#define AI_Analog_Trigger_Reset			_bit14
+#define AI_Disarm				_bit13
+#define AI_SI2_Arm				_bit12
+#define AI_SI2_Load				_bit11
+#define AI_SI_Arm				_bit10
+#define AI_SI_Load				_bit9
+#define AI_DIV_Arm				_bit8
+#define AI_DIV_Load				_bit7
+#define AI_SC_Arm				_bit6
+#define AI_SC_Load				_bit5
+#define AI_SCAN_IN_PROG_Pulse			_bit4
+#define AI_EXTMUX_CLK_Pulse			_bit3
+#define AI_LOCALMUX_CLK_Pulse			_bit2
+#define AI_SC_TC_Pulse				_bit1
+#define AI_CONVERT_Pulse			_bit0
+
+#define AO_Command_1_Register		9
+#define AO_Analog_Trigger_Reset			_bit15
+#define AO_START_Pulse				_bit14
+#define AO_Disarm				_bit13
+#define AO_UI2_Arm_Disarm			_bit12
+#define AO_UI2_Load				_bit11
+#define AO_UI_Arm				_bit10
+#define AO_UI_Load				_bit9
+#define AO_UC_Arm				_bit8
+#define AO_UC_Load				_bit7
+#define AO_BC_Arm				_bit6
+#define AO_BC_Load				_bit5
+#define AO_DAC1_Update_Mode			_bit4
+#define AO_LDAC1_Source_Select			_bit3
+#define AO_DAC0_Update_Mode			_bit2
+#define AO_LDAC0_Source_Select			_bit1
+#define AO_UPDATE_Pulse				_bit0
+
+
+#define DIO_Output_Register		10
+#define DIO_Parallel_Data_Out(a)                ((a)&0xff)
+#define DIO_Parallel_Data_Mask                  0xff
+#define DIO_SDOUT                               _bit0
+#define DIO_SDIN                                _bit4
+#define DIO_Serial_Data_Out(a)                  (((a)&0xff)<<8)
+#define DIO_Serial_Data_Mask                    0xff00
+
+#define DIO_Control_Register		11
+#define DIO_Software_Serial_Control             _bit11
+#define DIO_HW_Serial_Timebase                  _bit10
+#define DIO_HW_Serial_Enable                    _bit9
+#define DIO_HW_Serial_Start                     _bit8
+#define DIO_Pins_Dir(a)                         ((a)&0xff)
+#define DIO_Pins_Dir_Mask                       0xff
+
+#define AI_Mode_1_Register		12
+#define AI_CONVERT_Source_Select(a)		(((a) & 0x1f) << 11)
+#define AI_SI_Source_select(a)			(((a) & 0x1f) << 6)
+#define AI_CONVERT_Source_Polarity		_bit5
+#define AI_SI_Source_Polarity		_bit4
+#define AI_Start_Stop				_bit3
+#define AI_Mode_1_Reserved			_bit2
+#define AI_Continuous				_bit1
+#define AI_Trigger_Once				_bit0
+
+#define AI_Mode_2_Register		13
+#define AI_SC_Gate_Enable			_bit15
+#define AI_Start_Stop_Gate_Enable		_bit14
+#define AI_Pre_Trigger				_bit13
+#define AI_External_MUX_Present			_bit12
+#define AI_SI2_Initial_Load_Source		_bit9
+#define AI_SI2_Reload_Mode			_bit8
+#define AI_SI_Initial_Load_Source		_bit7
+#define AI_SI_Reload_Mode(a)			(((a) & 0x7)<<4)
+#define AI_SI_Write_Switch			_bit3
+#define AI_SC_Initial_Load_Source		_bit2
+#define AI_SC_Reload_Mode			_bit1
+#define AI_SC_Write_Switch			_bit0
+
+#define AI_SI_Load_A_Registers		14
+#define AI_SI_Load_B_Registers		16
+#define AI_SC_Load_A_Registers		18
+#define AI_SC_Load_B_Registers		20
+#define AI_SI_Save_Registers		64
+#define AI_SC_Save_Registers		66
+
+#define AI_SI2_Load_A_Register		23
+#define AI_SI2_Load_B_Register		25
+
+#define Joint_Status_1_Register         27
+#define DIO_Serial_IO_In_Progress_St            _bit12
+
+#define DIO_Serial_Input_Register       28
+#define Joint_Status_2_Register         29
+#define AO_TMRDACWRs_In_Progress_St		_bit5
+
+#define AO_Mode_1_Register		38
+#define AO_UPDATE_Source_Select(x)		(((x)&0x1f)<<11)
+#define AO_UI_Source_Select(x)			(((x)&0x1f)<<6)
+#define AO_Multiple_Channels			_bit5
+#define AO_UPDATE_Source_Polarity		_bit4
+#define AO_UI_Source_Polarity			_bit3
+#define AO_UC_Switch_Load_Every_TC		_bit2
+#define AO_Continuous				_bit1
+#define AO_Trigger_Once				_bit0
+
+#define AO_Mode_2_Register		39
+#define AO_FIFO_Mode_Mask			( 0x3 << 14 )
+#define AO_FIFO_Mode_HF_to_F			(3<<14)
+#define AO_FIFO_Mode_F				(2<<14)
+#define AO_FIFO_Mode_HF				(1<<14)
+#define AO_FIFO_Mode_E				(0<<14)
+#define AO_FIFO_Retransmit_Enable		_bit13
+#define AO_START1_Disable			_bit12
+#define AO_UC_Initial_Load_Source		_bit11
+#define AO_UC_Write_Switch			_bit10
+#define AO_UI2_Initial_Load_Source		_bit9
+#define AO_UI2_Reload_Mode			_bit8
+#define AO_UI_Initial_Load_Source		_bit7
+#define AO_UI_Reload_Mode(x)			(((x) & 0x7) << 4)
+#define AO_UI_Write_Switch			_bit3
+#define AO_BC_Initial_Load_Source		_bit2
+#define AO_BC_Reload_Mode			_bit1
+#define AO_BC_Write_Switch			_bit0
+
+#define AO_UI_Load_A_Register		40
+#define AO_UI_Load_A_Register_High	40
+#define AO_UI_Load_A_Register_Low	41
+#define AO_UI_Load_B_Register		42
+#define AO_UI_Save_Registers		16
+#define AO_BC_Load_A_Register		44
+#define AO_BC_Load_A_Register_High	44
+#define AO_BC_Load_A_Register_Low	45
+#define AO_BC_Load_B_Register		46
+#define AO_BC_Load_B_Register_High	46
+#define AO_BC_Load_B_Register_Low	47
+#define AO_BC_Save_Registers		18
+#define AO_UC_Load_A_Register		48
+#define AO_UC_Load_A_Register_High	48
+#define AO_UC_Load_A_Register_Low	49
+#define AO_UC_Load_B_Register		50
+#define AO_UC_Save_Registers		20
+
+#define Clock_and_FOUT_Register		56
+#define FOUT_Enable				_bit15
+#define FOUT_Timebase_Select			_bit14
+#define DIO_Serial_Out_Divide_By_2		_bit13
+#define Slow_Internal_Time_Divide_By_2		_bit12
+#define Slow_Internal_Timebase			_bit11
+#define G_Source_Divide_By_2			_bit10
+#define Clock_To_Board_Divide_By_2		_bit9
+#define Clock_To_Board				_bit8
+#define AI_Output_Divide_By_2			_bit7
+#define AI_Source_Divide_By_2			_bit6
+#define AO_Output_Divide_By_2			_bit5
+#define AO_Source_Divide_By_2			_bit4
+#define FOUT_Divider_mask			0xf
+#define FOUT_Divider(x)				(((x) & 0xf) << 0)
+
+#define IO_Bidirection_Pin_Register	57
+#define	RTSI_Trig_Direction_Register	58
+#define	Drive_RTSI_Clock_Bit			0x1
+#define	Use_RTSI_Clock_Bit			0x2
+
+static inline unsigned int RTSI_Output_Bit(unsigned channel, int is_mseries)
+{
+	unsigned max_channel;
+	unsigned base_bit_shift;
+	if(is_mseries)
+	{
+		base_bit_shift = 8;
+		max_channel = 7;
+	}else
+	{
+		base_bit_shift = 9;
+		max_channel = 6;
+	}
+	if(channel > max_channel)
+	{
+		rtdm_printk("%s: bug, invalid RTSI_channel=%i\n",
+			    __FUNCTION__, channel);
+		return 0;
+	}
+	return 1 << (base_bit_shift + channel);
+}
+
+#define Interrupt_Control_Register	59
+#define Interrupt_B_Enable			_bit15
+#define Interrupt_B_Output_Select(x)		((x)<<12)
+#define Interrupt_A_Enable			_bit11
+#define Interrupt_A_Output_Select(x)		((x)<<8)
+#define Pass_Thru_0_Interrupt_Polarity		_bit3
+#define Pass_Thru_1_Interrupt_Polarity		_bit2
+#define Interrupt_Output_On_3_Pins		_bit1
+#define Interrupt_Output_Polarity		_bit0
+
+#define AI_Output_Control_Register	60
+#define AI_START_Output_Select			_bit10
+#define AI_SCAN_IN_PROG_Output_Select(x)	(((x) & 0x3) << 8)
+#define AI_EXTMUX_CLK_Output_Select(x)		(((x) & 0x3) << 6)
+#define AI_LOCALMUX_CLK_Output_Select(x)	((x)<<4)
+#define AI_SC_TC_Output_Select(x)		((x)<<2)
+#define AI_CONVERT_Output_High_Z		0
+#define AI_CONVERT_Output_Ground		1
+#define AI_CONVERT_Output_Enable_Low		2
+#define AI_CONVERT_Output_Enable_High		3
+#define AI_CONVERT_Output_Select(x)		((x) & 0x3)
+
+#define AI_START_STOP_Select_Register	62
+#define AI_START_Polarity			_bit15
+#define AI_STOP_Polarity			_bit14
+#define AI_STOP_Sync				_bit13
+#define AI_STOP_Edge				_bit12
+#define AI_STOP_Select(a)			(((a) & 0x1f)<<7)
+#define AI_START_Sync				_bit6
+#define AI_START_Edge				_bit5
+#define AI_START_Select(a)			((a) & 0x1f)
+
+#define AI_Trigger_Select_Register	63
+#define AI_START1_Polarity			_bit15
+#define AI_START2_Polarity			_bit14
+#define AI_START2_Sync				_bit13
+#define AI_START2_Edge				_bit12
+#define AI_START2_Select(a)			(((a) & 0x1f) << 7)
+#define AI_START1_Sync				_bit6
+#define AI_START1_Edge				_bit5
+#define AI_START1_Select(a)			((a) & 0x1f)
+
+#define AI_DIV_Load_A_Register	64
+
+#define AO_Start_Select_Register	66
+#define AO_UI2_Software_Gate			_bit15
+#define AO_UI2_External_Gate_Polarity		_bit14
+#define AO_START_Polarity			_bit13
+#define AO_AOFREQ_Enable			_bit12
+#define AO_UI2_External_Gate_Select(a)		(((a) & 0x1f) << 7)
+#define AO_START_Sync				_bit6
+#define AO_START_Edge				_bit5
+#define AO_START_Select(a)			((a) & 0x1f)
+
+#define AO_Trigger_Select_Register	67
+#define AO_UI2_External_Gate_Enable		_bit15
+#define AO_Delayed_START1			_bit14
+#define AO_START1_Polarity			_bit13
+#define AO_UI2_Source_Polarity			_bit12
+#define AO_UI2_Source_Select(x)			(((x)&0x1f)<<7)
+#define AO_START1_Sync				_bit6
+#define AO_START1_Edge				_bit5
+#define AO_START1_Select(x)			(((x)&0x1f)<<0)
+
+#define AO_Mode_3_Register		70
+#define AO_UI2_Switch_Load_Next_TC		_bit13
+#define AO_UC_Switch_Load_Every_BC_TC		_bit12
+#define AO_Trigger_Length			_bit11
+#define AO_Stop_On_Overrun_Error		_bit5
+#define AO_Stop_On_BC_TC_Trigger_Error		_bit4
+#define AO_Stop_On_BC_TC_Error			_bit3
+#define AO_Not_An_UPDATE			_bit2
+#define AO_Software_Gate			_bit1
+#define AO_Last_Gate_Disable			_bit0	/* M Series only */
+
+#define Joint_Reset_Register		72
+#define Software_Reset				_bit11
+#define AO_Configuration_End			_bit9
+#define AI_Configuration_End			_bit8
+#define AO_Configuration_Start			_bit5
+#define AI_Configuration_Start			_bit4
+#define G1_Reset				_bit3
+#define G0_Reset				_bit2
+#define AO_Reset				_bit1
+#define AI_Reset				_bit0
+
+#define Interrupt_A_Enable_Register	73
+#define Pass_Thru_0_Interrupt_Enable		_bit9
+#define G0_Gate_Interrupt_Enable		_bit8
+#define AI_FIFO_Interrupt_Enable		_bit7
+#define G0_TC_Interrupt_Enable			_bit6
+#define AI_Error_Interrupt_Enable		_bit5
+#define AI_STOP_Interrupt_Enable		_bit4
+#define AI_START_Interrupt_Enable		_bit3
+#define AI_START2_Interrupt_Enable		_bit2
+#define AI_START1_Interrupt_Enable		_bit1
+#define AI_SC_TC_Interrupt_Enable		_bit0
+
+#define Interrupt_B_Enable_Register	75
+#define Pass_Thru_1_Interrupt_Enable		_bit11
+#define G1_Gate_Interrupt_Enable		_bit10
+#define G1_TC_Interrupt_Enable			_bit9
+#define AO_FIFO_Interrupt_Enable		_bit8
+#define AO_UI2_TC_Interrupt_Enable		_bit7
+#define AO_UC_TC_Interrupt_Enable		_bit6
+#define AO_Error_Interrupt_Enable		_bit5
+#define AO_STOP_Interrupt_Enable		_bit4
+#define AO_START_Interrupt_Enable		_bit3
+#define AO_UPDATE_Interrupt_Enable		_bit2
+#define AO_START1_Interrupt_Enable		_bit1
+#define AO_BC_TC_Interrupt_Enable		_bit0
+
+#define Second_IRQ_A_Enable_Register	74
+#define AI_SC_TC_Second_Irq_Enable		_bit0
+#define AI_START1_Second_Irq_Enable		_bit1
+#define AI_START2_Second_Irq_Enable		_bit2
+#define AI_START_Second_Irq_Enable		_bit3
+#define AI_STOP_Second_Irq_Enable		_bit4
+#define AI_Error_Second_Irq_Enable		_bit5
+#define G0_TC_Second_Irq_Enable			_bit6
+#define AI_FIFO_Second_Irq_Enable		_bit7
+#define G0_Gate_Second_Irq_Enable		_bit8
+#define Pass_Thru_0_Second_Irq_Enable		_bit9
+
+#define Second_IRQ_B_Enable_Register	76
+#define AO_BC_TC_Second_Irq_Enable		_bit0
+#define AO_START1_Second_Irq_Enable		_bit1
+#define AO_UPDATE_Second_Irq_Enable		_bit2
+#define AO_START_Second_Irq_Enable		_bit3
+#define AO_STOP_Second_Irq_Enable		_bit4
+#define AO_Error_Second_Irq_Enable		_bit5
+#define AO_UC_TC_Second_Irq_Enable		_bit6
+#define AO_UI2_TC_Second_Irq_Enable		_bit7
+#define AO_FIFO_Second_Irq_Enable		_bit8
+#define G1_TC_Second_Irq_Enable			_bit9
+#define G1_Gate_Second_Irq_Enable		_bit10
+#define Pass_Thru_1_Second_Irq_Enable		_bit11
+
+#define AI_Personal_Register		77
+#define AI_SHIFTIN_Pulse_Width			_bit15
+#define AI_EOC_Polarity				_bit14
+#define AI_SOC_Polarity				_bit13
+#define AI_SHIFTIN_Polarity			_bit12
+#define AI_CONVERT_Pulse_Timebase		_bit11
+#define AI_CONVERT_Pulse_Width			_bit10
+#define AI_CONVERT_Original_Pulse		_bit9
+#define AI_FIFO_Flags_Polarity			_bit8
+#define AI_Overrun_Mode				_bit7
+#define AI_EXTMUX_CLK_Pulse_Width		_bit6
+#define AI_LOCALMUX_CLK_Pulse_Width		_bit5
+#define AI_AIFREQ_Polarity			_bit4
+
+#define AO_Personal_Register		78
+#define AO_Interval_Buffer_Mode			_bit3
+#define AO_BC_Source_Select			_bit4
+#define AO_UPDATE_Pulse_Width			_bit5
+#define AO_UPDATE_Pulse_Timebase		_bit6
+#define AO_UPDATE_Original_Pulse		_bit7
+#define AO_DMA_PIO_Control			_bit8 /* M Series: reserved */
+#define AO_AOFREQ_Polarity			_bit9 /* M Series: reserved */
+#define AO_FIFO_Enable				_bit10
+#define AO_FIFO_Flags_Polarity			_bit11 /* M Series: reserved */
+#define AO_TMRDACWR_Pulse_Width			_bit12
+#define AO_Fast_CPU				_bit13 /* M Series: reserved */
+#define AO_Number_Of_DAC_Packages		_bit14 /* 1 for "single" mode,
+							  0 for "dual" */
+#define AO_Multiple_DACS_Per_Package		_bit15 /* M Series only */
+
+#define	RTSI_Trig_A_Output_Register	79
+
+#define	RTSI_Trig_B_Output_Register	80
+#define RTSI_Sub_Selection_1_Bit		_bit15 /* not for M Series */
+#define RTSI_Trig_Output_Bits(x, y)		((y & 0xf) << ((x % 4) * 4))
+#define RTSI_Trig_Output_Mask(x)		(0xf << ((x % 4) * 4))
+#define RTSI_Trig_Output_Source(x, y)		((y >> ((x % 4) * 4)) & 0xf)
+
+#define	RTSI_Board_Register		81
+#define Write_Strobe_0_Register		82
+#define Write_Strobe_1_Register		83
+#define Write_Strobe_2_Register		84
+#define Write_Strobe_3_Register		85
+
+#define AO_Output_Control_Register	86
+#define AO_External_Gate_Enable			_bit15
+#define AO_External_Gate_Select(x)		(((x)&0x1f)<<10)
+#define AO_Number_Of_Channels(x)		(((x)&0xf)<<6)
+#define AO_UPDATE2_Output_Select(x)		(((x)&0x3)<<4)
+#define AO_External_Gate_Polarity		_bit3
+#define AO_UPDATE2_Output_Toggle		_bit2
+#define AO_Update_Output_High_Z			0
+#define AO_Update_Output_Ground			1
+#define AO_Update_Output_Enable_Low		2
+#define AO_Update_Output_Enable_High		3
+#define AO_UPDATE_Output_Select(x)		(x&0x3)
+
+#define AI_Mode_3_Register		87
+#define AI_Trigger_Length			_bit15
+#define AI_Delay_START				_bit14
+#define AI_Software_Gate			_bit13
+#define AI_SI_Special_Trigger_Delay		_bit12
+#define AI_SI2_Source_Select			_bit11
+#define AI_Delayed_START2			_bit10
+#define AI_Delayed_START1			_bit9
+#define AI_External_Gate_Mode			_bit8
+#define AI_FIFO_Mode_HF_to_E			(3<<6)
+#define AI_FIFO_Mode_F				(2<<6)
+#define AI_FIFO_Mode_HF				(1<<6)
+#define AI_FIFO_Mode_NE				(0<<6)
+#define AI_External_Gate_Polarity		_bit5
+#define AI_External_Gate_Select(a)		((a) & 0x1f)
+
+#define G_Autoincrement_Register(a)	(68+(a))
+#define G_Command_Register(a)		(6+(a))
+#define G_HW_Save_Register(a)		(8+(a)*2)
+#define G_HW_Save_Register_High(a)	(8+(a)*2)
+#define G_HW_Save_Register_Low(a)	(9+(a)*2)
+#define G_Input_Select_Register(a)	(36+(a))
+#define G_Load_A_Register(a)		(28+(a)*4)
+#define G_Load_A_Register_High(a)	(28+(a)*4)
+#define G_Load_A_Register_Low(a)	(29+(a)*4)
+#define G_Load_B_Register(a)		(30+(a)*4)
+#define G_Load_B_Register_High(a)	(30+(a)*4)
+#define G_Load_B_Register_Low(a)	(31+(a)*4)
+#define G_Mode_Register(a)		(26+(a))
+#define G_Save_Register(a)		(12+(a)*2)
+#define G_Save_Register_High(a)		(12+(a)*2)
+#define G_Save_Register_Low(a)		(13+(a)*2)
+#define G_Status_Register		4
+#define Analog_Trigger_Etc_Register	61
+
+/* command register */
+#define G_Disarm_Copy			_bit15		/* strobe */
+#define G_Save_Trace_Copy		_bit14
+#define G_Arm_Copy			_bit13		/* strobe */
+#define G_Bank_Switch_Start		_bit10		/* strobe */
+#define G_Little_Big_Endian		_bit9
+#define G_Synchronized_Gate		_bit8
+#define G_Write_Switch			_bit7
+#define G_Up_Down(a)			(((a)&0x03)<<5)
+#define G_Disarm			_bit4		/* strobe */
+#define G_Analog_Trigger_Reset		_bit3		/* strobe */
+#define G_Save_Trace			_bit1
+#define G_Arm				_bit0		/* strobe */
+
+/* channel agnostic names for the command register #defines */
+#define G_Bank_Switch_Enable		_bit12
+#define G_Bank_Switch_Mode		_bit11
+#define G_Load				_bit2		/* strobe */
+
+/* input select register */
+#define G_Gate_Select(a)		(((a)&0x1f)<<7)
+#define G_Source_Select(a)		(((a)&0x1f)<<2)
+#define G_Write_Acknowledges_Irq	_bit1
+#define G_Read_Acknowledges_Irq		_bit0
+
+/* same input select register, but with channel agnostic names */
+#define G_Source_Polarity		_bit15
+#define G_Output_Polarity		_bit14
+#define G_OR_Gate			_bit13
+#define G_Gate_Select_Load_Source	_bit12
+
+/* mode register */
+#define G_Loading_On_TC			_bit12
+#define G_Output_Mode(a)		(((a)&0x03)<<8)
+#define G_Trigger_Mode_For_Edge_Gate(a)	(((a)&0x03)<<3)
+#define G_Gating_Mode(a)		(((a)&0x03)<<0)
+
+/* same input mode register, but with channel agnostic names */
+#define G_Load_Source_Select		_bit7
+#define G_Reload_Source_Switching	_bit15
+#define G_Loading_On_Gate		_bit14
+#define G_Gate_Polarity		_bit13
+
+#define G_Counting_Once(a)		(((a)&0x03)<<10)
+#define G_Stop_Mode(a)			(((a)&0x03)<<5)
+#define G_Gate_On_Both_Edges		_bit2
+
+/* G_Status_Register */
+#define G1_Gate_Error_St		_bit15
+#define G0_Gate_Error_St		_bit14
+#define G1_TC_Error_St			_bit13
+#define G0_TC_Error_St			_bit12
+#define G1_No_Load_Between_Gates_St	_bit11
+#define G0_No_Load_Between_Gates_St	_bit10
+#define G1_Armed_St			_bit9
+#define G0_Armed_St			_bit8
+#define G1_Stale_Data_St		_bit7
+#define G0_Stale_Data_St		_bit6
+#define G1_Next_Load_Source_St		_bit5
+#define G0_Next_Load_Source_St		_bit4
+#define G1_Counting_St			_bit3
+#define G0_Counting_St			_bit2
+#define G1_Save_St			_bit1
+#define G0_Save_St			_bit0
+
+/* general purpose counter timer */
+#define G_Autoincrement(a)              ((a)<<0)
+
+/*Analog_Trigger_Etc_Register*/
+#define Analog_Trigger_Mode(x) ((x) & 0x7)
+#define Analog_Trigger_Enable _bit3
+#define Analog_Trigger_Drive _bit4
+#define GPFO_1_Output_Select		_bit7
+#define GPFO_0_Output_Select(a)		((a)<<11)
+#define GPFO_0_Output_Enable		_bit14
+#define GPFO_1_Output_Enable		_bit15
+
+/* Additional windowed registers unique to E series */
+
+/* 16 bit registers shadowed from DAQ-STC */
+#define Window_Address			0x00
+#define Window_Data			0x02
+
+#define Configuration_Memory_Clear	82
+#define ADC_FIFO_Clear			83
+#define DAC_FIFO_Clear			84
+
+/* i/o port offsets */
+
+/* 8 bit registers */
+#define XXX_Status			0x01
+#define PROMOUT					_bit0
+#define AI_FIFO_LOWER_NOT_EMPTY			_bit3
+
+#define Serial_Command			0x0d
+#define Misc_Command			0x0f
+#define Port_A				0x19
+#define Port_B				0x1b
+#define Port_C				0x1d
+#define Configuration			0x1f
+#define Strobes				0x01
+#define Channel_A_Mode			0x03
+#define Channel_B_Mode			0x05
+#define Channel_C_Mode			0x07
+#define AI_AO_Select			0x09
+#define AI_DMA_Select_Shift		0
+#define AI_DMA_Select_Mask		0xf
+#define AO_DMA_Select_Shift		4
+#define AO_DMA_Select_Mask		(0xf << AO_DMA_Select_Shift)
+
+#define G0_G1_Select			0x0b
+
+static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
+{
+	if(channel < 4) return 1 << channel;
+	if(channel == 4) return 0x3;
+	if(channel == 5) return 0x5;
+	BUG();
+	return 0;
+}
+static inline unsigned GPCT_DMA_Select_Bits(unsigned gpct_index, unsigned mite_channel)
+{
+	BUG_ON(gpct_index > 1);
+	return ni_stc_dma_channel_select_bitfield(mite_channel) << (4 * gpct_index);
+}
+static inline unsigned GPCT_DMA_Select_Mask(unsigned gpct_index)
+{
+	BUG_ON(gpct_index > 1);
+	return 0xf << (4 * gpct_index);
+}
+
+/* 16 bit registers */
+
+#define Configuration_Memory_Low	0x10
+#define AI_DITHER				_bit9
+#define AI_LAST_CHANNEL				_bit15
+
+#define Configuration_Memory_High	0x12
+#define AI_AC_COUPLE				_bit11
+#define AI_DIFFERENTIAL				_bit12
+#define AI_COMMON				_bit13
+#define AI_GROUND				(_bit12|_bit13)
+#define AI_CONFIG_CHANNEL(x)			(x&0x3f)
+
+#define ADC_FIFO_Data_Register		0x1c
+
+#define AO_Configuration		0x16
+#define AO_Bipolar		_bit0
+#define AO_Deglitch		_bit1
+#define AO_Ext_Ref		_bit2
+#define AO_Ground_Ref		_bit3
+#define AO_Channel(x)		((x) << 8)
+
+#define DAC_FIFO_Data			0x1e
+#define DAC0_Direct_Data		0x18
+#define DAC1_Direct_Data		0x1a
+
+/* 611x registers (these boards differ from the e-series) */
+
+#define Magic_611x			0x19 /* w8 (new) */
+#define Calibration_Channel_Select_611x	0x1a /* w16 (new) */
+#define ADC_FIFO_Data_611x		0x1c /* r32 (incompatible) */
+#define AI_FIFO_Offset_Load_611x	0x05 /* r8 (new) */
+#define DAC_FIFO_Data_611x		0x14 /* w32 (incompatible) */
+#define Cal_Gain_Select_611x		0x05 /* w8 (new) */
+
+#define AO_Window_Address_611x		0x18
+#define AO_Window_Data_611x		0x1e
+
+/* 6143 registers */
+#define Magic_6143			0x19 /* w8 */
+#define G0G1_DMA_Select_6143		0x0B /* w8 */
+#define PipelineDelay_6143		0x1f /* w8 */
+#define EOC_Set_6143			0x1D /* w8 */
+#define AIDMA_Select_6143		0x09 /* w8 */
+#define AIFIFO_Data_6143		0x8C /* w32 */
+#define AIFIFO_Flag_6143		0x84 /* w32 */
+#define AIFIFO_Control_6143		0x88 /* w32 */
+#define AIFIFO_Status_6143		0x88 /* w32 */
+#define AIFIFO_DMAThreshold_6143	0x90 /* w32 */
+#define AIFIFO_Words_Available_6143	0x94 /* w32 */
+
+#define Calibration_Channel_6143	0x42 /* w16 */
+#define Calibration_LowTime_6143	0x20 /* w16 */
+#define Calibration_HighTime_6143	0x22 /* w16 */
+#define Relay_Counter_Load_Val__6143	0x4C /* w32 */
+#define Signature_6143			0x50 /* w32 */
+#define Release_Date_6143		0x54 /* w32 */
+#define Release_Oldest_Date_6143	0x58 /* w32 */
+
+#define Calibration_Channel_6143_RelayOn	0x8000	/* Calibration relay switch On */
+#define Calibration_Channel_6143_RelayOff	0x4000	/* Calibration relay switch Off */
+#define Calibration_Channel_Gnd_Gnd	0x00	/* Offset Calibration */
+#define Calibration_Channel_2v5_Gnd	0x02	/* 2.5V Reference */
+#define Calibration_Channel_Pwm_Gnd	0x05	/* +/- 5V Self Cal */
+#define Calibration_Channel_2v5_Pwm	0x0a	/* PWM Calibration */
+#define Calibration_Channel_Pwm_Pwm	0x0d	/* CMRR */
+#define Calibration_Channel_Gnd_Pwm	0x0e	/* PWM Calibration */
+
+/* 671x, 611x registers */
+
+/* 671xi 611x windowed ao registers */
+#define AO_Immediate_671x			0x11 /* W 16 */
+#define AO_Timed_611x				0x10 /* W 16 */
+#define AO_FIFO_Offset_Load_611x		0x13 /* W32 */
+#define AO_Later_Single_Point_Updates		0x14 /* W 16 */
+#define AO_Waveform_Generation_611x		0x15 /* W 16 */
+#define AO_Misc_611x				0x16 /* W 16 */
+#define AO_Calibration_Channel_Select_67xx	0x17 /* W 16 */
+#define AO_Configuration_2_67xx			0x18 /* W 16 */
+#define CAL_ADC_Command_67xx			0x19 /* W 8 */
+#define CAL_ADC_Status_67xx			0x1a /* R 8 */
+#define CAL_ADC_Data_67xx			0x1b /* R 16 */
+#define CAL_ADC_Config_Data_High_Word_67xx	0x1c /* RW 16 */
+#define CAL_ADC_Config_Data_Low_Word_67xx	0x1d /* RW 16 */
+
+static inline unsigned int DACx_Direct_Data_671x(int channel)
+{
+	return channel;
+}
+
+#define CLEAR_WG				_bit0
+
+#define CSCFG_CAL_CONTROL_MASK			0x7
+#define CSCFG_SELF_CAL_OFFSET			0x1
+#define CSCFG_SELF_CAL_GAIN			0x2
+#define CSCFG_SELF_CAL_OFFSET_GAIN		0x3
+#define CSCFG_SYSTEM_CAL_OFFSET			0x5
+#define CSCFG_SYSTEM_CAL_GAIN			0x6
+#define CSCFG_DONE				(1 << 3)
+#define CSCFG_POWER_SAVE_SELECT			(1 << 4)
+#define CSCFG_PORT_MODE				(1 << 5)
+#define CSCFG_RESET_VALID			(1 << 6)
+#define CSCFG_RESET				(1 << 7)
+#define CSCFG_UNIPOLAR				(1 << 12)
+#define CSCFG_WORD_RATE_2180_CYCLES		(0x0 << 13)
+#define CSCFG_WORD_RATE_1092_CYCLES		(0x1 << 13)
+#define CSCFG_WORD_RATE_532_CYCLES		(0x2 << 13)
+#define CSCFG_WORD_RATE_388_CYCLES		(0x3 << 13)
+#define CSCFG_WORD_RATE_324_CYCLES		(0x4 << 13)
+#define CSCFG_WORD_RATE_17444_CYCLES		(0x5 << 13)
+#define CSCFG_WORD_RATE_8724_CYCLES		(0x6 << 13)
+#define CSCFG_WORD_RATE_4364_CYCLES		(0x7 << 13)
+#define CSCFG_WORD_RATE_MASK			(0x7 << 13)
+#define CSCFG_LOW_POWER				(1 << 16)
+
+#define CS5529_CONFIG_DOUT(x)			(1 << (18 + x))
+#define CS5529_CONFIG_AOUT(x)			(1 << (22 + x))
+
+/* cs5529 command bits */
+#define CSCMD_POWER_SAVE			_bit0
+#define CSCMD_REGISTER_SELECT_MASK		0xe
+#define CSCMD_OFFSET_REGISTER			0x0
+#define CSCMD_GAIN_REGISTER			_bit1
+#define CSCMD_CONFIG_REGISTER			_bit2
+#define CSCMD_READ				_bit4
+#define CSCMD_CONTINUOUS_CONVERSIONS		_bit5
+#define CSCMD_SINGLE_CONVERSION			_bit6
+#define CSCMD_COMMAND				_bit7
+
+/* cs5529 status bits */
+#define CSS_ADC_BUSY				_bit0
+#define CSS_OSC_DETECT				_bit1 /* indicates adc error */
+#define CSS_OVERRANGE				_bit3
+
+#define SerDacLd(x)			(0x08<<(x))
+
+/*
+	This is stuff unique to the NI E series drivers,
+	but I thought I'd put it here anyway.
+*/
+
+enum
+{
+	ai_gain_16 = 0,
+	ai_gain_8,
+	ai_gain_14,
+	ai_gain_4,
+	ai_gain_611x,
+	ai_gain_622x,
+	ai_gain_628x,
+	ai_gain_6143
+};
+enum caldac_enum
+{
+	caldac_none=0,
+	mb88341,
+	dac8800,
+	dac8043,
+	ad8522,
+	ad8804,
+	ad8842,
+	ad8804_debug
+};
+enum ni_reg_type
+{
+	ni_reg_normal = 0x0,
+	ni_reg_611x = 0x1,
+	ni_reg_6711 = 0x2,
+	ni_reg_6713 = 0x4,
+	ni_reg_67xx_mask = 0x6,
+	ni_reg_6xxx_mask = 0x7,
+	ni_reg_622x = 0x8,
+	ni_reg_625x = 0x10,
+	ni_reg_628x = 0x18,
+	ni_reg_m_series_mask = 0x18,
+	ni_reg_6143 = 0x20
+};
+
+/* M Series registers offsets */
+#define M_Offset_CDIO_DMA_Select		0x7 /* write */
+#define M_Offset_SCXI_Status			0x7 /* read */
+#define M_Offset_AI_AO_Select			0x9 /* write, same offset as e-series */
+#define M_Offset_SCXI_Serial_Data_In		0x9 /* read */
+#define M_Offset_G0_G1_Select			0xb /* write, same offset as e-series */
+#define M_Offset_Misc_Command			0xf
+#define M_Offset_SCXI_Serial_Data_Out		0x11
+#define M_Offset_SCXI_Control			0x13
+#define M_Offset_SCXI_Output_Enable		0x15
+#define M_Offset_AI_FIFO_Data			0x1c
+#define M_Offset_Static_Digital_Output		0x24 /* write */
+#define M_Offset_Static_Digital_Input		0x24 /* read */
+#define M_Offset_DIO_Direction			0x28
+#define M_Offset_Cal_PWM			0x40
+#define M_Offset_AI_Config_FIFO_Data		0x5e
+#define M_Offset_Interrupt_C_Enable		0x88 /* write */
+#define M_Offset_Interrupt_C_Status		0x88 /* read */
+#define M_Offset_Analog_Trigger_Control		0x8c
+#define M_Offset_AO_Serial_Interrupt_Enable	0xa0
+#define M_Offset_AO_Serial_Interrupt_Ack	0xa1 /* write */
+#define M_Offset_AO_Serial_Interrupt_Status	0xa1 /* read */
+#define M_Offset_AO_Calibration			0xa3
+#define M_Offset_AO_FIFO_Data			0xa4
+#define M_Offset_PFI_Filter			0xb0
+#define M_Offset_RTSI_Filter			0xb4
+#define M_Offset_SCXI_Legacy_Compatibility	0xbc
+#define M_Offset_Interrupt_A_Ack		0x104 /* write */
+#define M_Offset_AI_Status_1			0x104 /* read */
+#define M_Offset_Interrupt_B_Ack		0x106 /* write */
+#define M_Offset_AO_Status_1			0x106 /* read */
+#define M_Offset_AI_Command_2			0x108 /* write */
+#define M_Offset_G01_Status			0x108 /* read */
+#define M_Offset_AO_Command_2			0x10a
+#define M_Offset_AO_Status_2			0x10c /* read */
+#define M_Offset_G0_Command			0x10c /* write */
+#define M_Offset_G1_Command			0x10e /* write */
+#define M_Offset_G0_HW_Save			0x110
+#define M_Offset_G0_HW_Save_High		0x110
+#define M_Offset_AI_Command_1			0x110
+#define M_Offset_G0_HW_Save_Low			0x112
+#define M_Offset_AO_Command_1			0x112
+#define M_Offset_G1_HW_Save			0x114
+#define M_Offset_G1_HW_Save_High		0x114
+#define M_Offset_G1_HW_Save_Low			0x116
+#define M_Offset_AI_Mode_1			0x118
+#define M_Offset_G0_Save			0x118
+#define M_Offset_G0_Save_High			0x118
+#define M_Offset_AI_Mode_2			0x11a
+#define M_Offset_G0_Save_Low			0x11a
+#define M_Offset_AI_SI_Load_A			0x11c
+#define M_Offset_G1_Save			0x11c
+#define M_Offset_G1_Save_High			0x11c
+#define M_Offset_G1_Save_Low			0x11e
+#define M_Offset_AI_SI_Load_B			0x120 /* write */
+#define M_Offset_AO_UI_Save			0x120 /* read */
+#define M_Offset_AI_SC_Load_A			0x124 /* write */
+#define M_Offset_AO_BC_Save			0x124 /* read */
+#define M_Offset_AI_SC_Load_B			0x128 /* write */
+#define M_Offset_AO_UC_Save			0x128 /* read */
+#define M_Offset_AI_SI2_Load_A			0x12c
+#define M_Offset_AI_SI2_Load_B			0x130
+#define M_Offset_G0_Mode			0x134
+#define M_Offset_G1_Mode			0x136 /* write */
+#define M_Offset_Joint_Status_1			0x136 /* read */
+#define M_Offset_G0_Load_A			0x138
+#define M_Offset_Joint_Status_2			0x13a
+#define M_Offset_G0_Load_B			0x13c
+#define M_Offset_G1_Load_A			0x140
+#define M_Offset_G1_Load_B			0x144
+#define M_Offset_G0_Input_Select		0x148
+#define M_Offset_G1_Input_Select		0x14a
+#define M_Offset_AO_Mode_1			0x14c
+#define M_Offset_AO_Mode_2			0x14e
+#define M_Offset_AO_UI_Load_A			0x150
+#define M_Offset_AO_UI_Load_B			0x154
+#define M_Offset_AO_BC_Load_A			0x158
+#define M_Offset_AO_BC_Load_B			0x15c
+#define M_Offset_AO_UC_Load_A			0x160
+#define M_Offset_AO_UC_Load_B			0x164
+#define M_Offset_Clock_and_FOUT			0x170
+#define M_Offset_IO_Bidirection_Pin		0x172
+#define M_Offset_RTSI_Trig_Direction		0x174
+#define M_Offset_Interrupt_Control		0x176
+#define M_Offset_AI_Output_Control		0x178
+#define M_Offset_Analog_Trigger_Etc		0x17a
+#define M_Offset_AI_START_STOP_Select		0x17c
+#define M_Offset_AI_Trigger_Select		0x17e
+#define M_Offset_AI_SI_Save			0x180 /* read */
+#define M_Offset_AI_DIV_Load_A			0x180 /* write */
+#define M_Offset_AI_SC_Save			0x184 /* read */
+#define M_Offset_AO_Start_Select		0x184 /* write */
+#define M_Offset_AO_Trigger_Select		0x186
+#define M_Offset_AO_Mode_3			0x18c
+#define M_Offset_G0_Autoincrement		0x188
+#define M_Offset_G1_Autoincrement		0x18a
+#define M_Offset_Joint_Reset			0x190
+#define M_Offset_Interrupt_A_Enable		0x192
+#define M_Offset_Interrupt_B_Enable		0x196
+#define M_Offset_AI_Personal			0x19a
+#define M_Offset_AO_Personal			0x19c
+#define M_Offset_RTSI_Trig_A_Output		0x19e
+#define M_Offset_RTSI_Trig_B_Output		0x1a0
+#define M_Offset_RTSI_Shared_MUX		0x1a2
+#define M_Offset_AO_Output_Control		0x1ac
+#define M_Offset_AI_Mode_3			0x1ae
+#define M_Offset_Configuration_Memory_Clear	0x1a4
+#define M_Offset_AI_FIFO_Clear			0x1a6
+#define M_Offset_AO_FIFO_Clear			0x1a8
+#define M_Offset_G0_Counting_Mode		0x1b0
+#define M_Offset_G1_Counting_Mode		0x1b2
+#define M_Offset_G0_Second_Gate			0x1b4
+#define M_Offset_G1_Second_Gate			0x1b6
+#define M_Offset_G0_DMA_Config			0x1b8 /* write */
+#define M_Offset_G0_DMA_Status			0x1b8 /* read */
+#define M_Offset_G1_DMA_Config			0x1ba /* write */
+#define M_Offset_G1_DMA_Status			0x1ba /* read */
+#define M_Offset_G0_MSeries_ABZ			0x1c0
+#define M_Offset_G1_MSeries_ABZ			0x1c2
+#define M_Offset_Clock_and_Fout2		0x1c4
+#define M_Offset_PLL_Control			0x1c6
+#define M_Offset_PLL_Status			0x1c8
+#define M_Offset_PFI_Output_Select_1		0x1d0
+#define M_Offset_PFI_Output_Select_2		0x1d2
+#define M_Offset_PFI_Output_Select_3		0x1d4
+#define M_Offset_PFI_Output_Select_4		0x1d6
+#define M_Offset_PFI_Output_Select_5		0x1d8
+#define M_Offset_PFI_Output_Select_6		0x1da
+#define M_Offset_PFI_DI				0x1dc
+#define M_Offset_PFI_DO				0x1de
+#define M_Offset_AI_Config_FIFO_Bypass		0x218
+#define M_Offset_SCXI_DIO_Enable		0x21c
+#define M_Offset_CDI_FIFO_Data			0x220 /* read */
+#define M_Offset_CDO_FIFO_Data			0x220 /* write */
+#define M_Offset_CDIO_Status			0x224 /* read */
+#define M_Offset_CDIO_Command			0x224 /* write */
+#define M_Offset_CDI_Mode			0x228
+#define M_Offset_CDO_Mode			0x22c
+#define M_Offset_CDI_Mask_Enable		0x230
+#define M_Offset_CDO_Mask_Enable		0x234
+#define M_Offset_AO_Waveform_Order(x)		(0xc2 + 0x4 * x)
+#define M_Offset_AO_Config_Bank(x)		(0xc3 + 0x4 * x)
+#define M_Offset_DAC_Direct_Data(x)		(0xc0 + 0x4 * x)
+#define M_Offset_Gen_PWM(x)			(0x44 + 0x2 * x)
+
+static inline int M_Offset_Static_AI_Control(int i)
+{
+	int offset[] =
+	{
+		0x64,
+		0x261,
+		0x262,
+		0x263,
+	};
+	if(((unsigned)i) >= sizeof(offset) / sizeof(offset[0]))
+	{
+		rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, i);
+		return offset[0];
+	}
+	return offset[i];
+};
+static inline int M_Offset_AO_Reference_Attenuation(int channel)
+{
+	int offset[] =
+	{
+		0x264,
+		0x265,
+		0x266,
+		0x267
+	};
+	if(((unsigned)channel) >= sizeof(offset) / sizeof(offset[0]))
+	{
+		rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, channel);
+		return offset[0];
+	}
+	return offset[channel];
+};
+static inline unsigned M_Offset_PFI_Output_Select(unsigned n)
+{
+	if(n < 1 || n > NUM_PFI_OUTPUT_SELECT_REGS)
+	{
+		rtdm_printk("%s: invalid pfi output select register=%i\n", __FUNCTION__, n);
+		return M_Offset_PFI_Output_Select_1;
+	}
+	return M_Offset_PFI_Output_Select_1 + (n - 1) * 2;
+}
+
+#define MSeries_AI_Config_Channel_Type_Mask			(0x7 << 6)
+#define MSeries_AI_Config_Channel_Type_Calibration_Bits		0x0
+#define MSeries_AI_Config_Channel_Type_Differential_Bits	(0x1 << 6)
+#define MSeries_AI_Config_Channel_Type_Common_Ref_Bits		(0x2 << 6)
+#define MSeries_AI_Config_Channel_Type_Ground_Ref_Bits		(0x3 << 6)
+#define MSeries_AI_Config_Channel_Type_Aux_Bits			(0x5 << 6)
+#define MSeries_AI_Config_Channel_Type_Ghost_Bits		(0x7 << 6)
+#define MSeries_AI_Config_Polarity_Bit				0x1000 /* 0 for 2's complement encoding */
+#define MSeries_AI_Config_Dither_Bit				0x2000
+#define MSeries_AI_Config_Last_Channel_Bit			0x4000
+#define MSeries_AI_Config_Channel_Bits(x)			(x & 0xf)
+#define MSeries_AI_Config_Gain_Bits(x)				((x & 0x7) << 9)
+
+static inline
+unsigned int MSeries_AI_Config_Bank_Bits(unsigned int reg_type,
+					 unsigned int channel)
+{
+	unsigned int bits = channel & 0x30;
+	if (reg_type == ni_reg_622x) {
+		if (channel & 0x40)
+			bits |= 0x400;
+	}
+	return bits;
+}
+
+#define MSeries_PLL_In_Source_Select_RTSI0_Bits			0xb
+#define MSeries_PLL_In_Source_Select_Star_Trigger_Bits		0x14
+#define MSeries_PLL_In_Source_Select_RTSI7_Bits			0x1b
+#define MSeries_PLL_In_Source_Select_PXI_Clock10		0x1d
+#define MSeries_PLL_In_Source_Select_Mask			0x1f
+#define MSeries_Timebase1_Select_Bit				0x20 /* use PLL for timebase 1 */
+#define MSeries_Timebase3_Select_Bit				0x40 /* use PLL for timebase 3 */
+/* Use 10MHz instead of 20MHz for RTSI clock frequency.  Appears
+   to have no effect, at least on pxi-6281, which always uses
+   20MHz rtsi clock frequency */
+#define MSeries_RTSI_10MHz_Bit					0x80
+
+static inline
+unsigned int MSeries_PLL_In_Source_Select_RTSI_Bits(unsigned int RTSI_channel)
+{
+	if(RTSI_channel > 7)
+	{
+		rtdm_printk("%s: bug, invalid RTSI_channel=%i\n", __FUNCTION__, RTSI_channel);
+		return 0;
+	}
+	if(RTSI_channel == 7) return MSeries_PLL_In_Source_Select_RTSI7_Bits;
+	else return MSeries_PLL_In_Source_Select_RTSI0_Bits + RTSI_channel;
+}
+
+#define MSeries_PLL_Enable_Bit					0x1000
+#define MSeries_PLL_VCO_Mode_200_325MHz_Bits			0x0
+#define MSeries_PLL_VCO_Mode_175_225MHz_Bits			0x2000
+#define MSeries_PLL_VCO_Mode_100_225MHz_Bits			0x4000
+#define MSeries_PLL_VCO_Mode_75_150MHz_Bits			0x6000
+
+static inline
+unsigned int MSeries_PLL_Divisor_Bits(unsigned int divisor)
+{
+	static const unsigned int max_divisor = 0x10;
+	if(divisor < 1 || divisor > max_divisor)
+	{
+		rtdm_printk("%s: bug, invalid divisor=%i\n", __FUNCTION__, divisor);
+		return 0;
+	}
+	return (divisor & 0xf) << 8;
+}
+static inline
+unsigned int MSeries_PLL_Multiplier_Bits(unsigned int multiplier)
+{
+	static const unsigned int max_multiplier = 0x100;
+	if(multiplier < 1 || multiplier > max_multiplier)
+	{
+		rtdm_printk("%s: bug, invalid multiplier=%i\n", __FUNCTION__, multiplier);
+		return 0;
+	}
+	return multiplier & 0xff;
+}
+
+#define MSeries_PLL_Locked_Bit				0x1
+
+#define MSeries_AI_Bypass_Channel_Mask			0x7
+#define MSeries_AI_Bypass_Bank_Mask			0x78
+#define MSeries_AI_Bypass_Cal_Sel_Pos_Mask		0x380
+#define MSeries_AI_Bypass_Cal_Sel_Neg_Mask		0x1c00
+#define MSeries_AI_Bypass_Mode_Mux_Mask			0x6000
+#define MSeries_AO_Bypass_AO_Cal_Sel_Mask		0x38000
+#define MSeries_AI_Bypass_Gain_Mask			0x1c0000
+#define MSeries_AI_Bypass_Dither_Bit			0x200000
+#define MSeries_AI_Bypass_Polarity_Bit			0x400000 /* 0 for 2's complement encoding */
+#define MSeries_AI_Bypass_Config_FIFO_Bit		0x80000000
+#define MSeries_AI_Bypass_Cal_Sel_Pos_Bits(x)		((x << 7) & \
+							 MSeries_AI_Bypass_Cal_Sel_Pos_Mask)
+#define MSeries_AI_Bypass_Cal_Sel_Neg_Bits(x)		((x << 10) & \
+							 MSeries_AI_Bypass_Cal_Sel_Pos_Mask)
+#define MSeries_AI_Bypass_Gain_Bits(x)			((x << 18) & \
+							 MSeries_AI_Bypass_Gain_Mask)
+
+#define MSeries_AO_DAC_Offset_Select_Mask		0x7
+#define MSeries_AO_DAC_Offset_0V_Bits			0x0
+#define MSeries_AO_DAC_Offset_5V_Bits			0x1
+#define MSeries_AO_DAC_Reference_Mask			0x38
+#define MSeries_AO_DAC_Reference_10V_Internal_Bits	0x0
+#define MSeries_AO_DAC_Reference_5V_Internal_Bits	0x8
+#define MSeries_AO_Update_Timed_Bit			0x40
+#define MSeries_AO_Bipolar_Bit				0x80 /* turns on 2's complement encoding */
+
+#define MSeries_Attenuate_x5_Bit			0x1
+
+#define MSeries_Cal_PWM_High_Time_Bits(x)		((x << 16) & 0xffff0000)
+#define MSeries_Cal_PWM_Low_Time_Bits(x)		(x & 0xffff)
+
+#define MSeries_PFI_Output_Select_Mask(x)		(0x1f << (x % 3) * 5)
+#define MSeries_PFI_Output_Select_Bits(x, y)		((y & 0x1f) << ((x % 3) * 5))
+// inverse to MSeries_PFI_Output_Select_Bits
+#define MSeries_PFI_Output_Select_Source(x, y)		((y >> ((x % 3) * 5)) & 0x1f)
+
+#define Gi_DMA_BankSW_Error_Bit				0x10
+#define Gi_DMA_Reset_Bit				0x8
+#define Gi_DMA_Int_Enable_Bit				0x4
+#define Gi_DMA_Write_Bit				0x2
+#define Gi_DMA_Enable_Bit				0x1
+
+#define MSeries_PFI_Filter_Select_Mask(x)		(0x3 << (x * 2))
+#define MSeries_PFI_Filter_Select_Bits(x, y)		((y << (x * 2)) & \
+							 MSeries_PFI_Filter_Select_Mask(x))
+
+/* CDIO DMA select bits */
+#define CDI_DMA_Select_Shift	0
+#define CDI_DMA_Select_Mask	0xf
+#define CDO_DMA_Select_Shift	4
+#define CDO_DMA_Select_Mask	0xf << CDO_DMA_Select_Shift
+
+/* CDIO status bits */
+#define CDO_FIFO_Empty_Bit	0x1
+#define CDO_FIFO_Full_Bit	0x2
+#define CDO_FIFO_Request_Bit	0x4
+#define CDO_Overrun_Bit		0x8
+#define CDO_Underflow_Bit	0x10
+#define CDI_FIFO_Empty_Bit	0x10000
+#define CDI_FIFO_Full_Bit	0x20000
+#define CDI_FIFO_Request_Bit	0x40000
+#define CDI_Overrun_Bit		0x80000
+#define CDI_Overflow_Bit	0x100000
+
+/* CDIO command bits */
+#define CDO_Disarm_Bit					0x1
+#define CDO_Arm_Bit					0x2
+#define CDI_Disarm_Bit					0x4
+#define CDI_Arm_Bit					0x8
+#define CDO_Reset_Bit					0x10
+#define CDI_Reset_Bit					0x20
+#define CDO_Error_Interrupt_Enable_Set_Bit		0x40
+#define CDO_Error_Interrupt_Enable_Clear_Bit		0x80
+#define CDI_Error_Interrupt_Enable_Set_Bit		0x100
+#define CDI_Error_Interrupt_Enable_Clear_Bit		0x200
+#define CDO_FIFO_Request_Interrupt_Enable_Set_Bit	0x400
+#define CDO_FIFO_Request_Interrupt_Enable_Clear_Bit	0x800
+#define CDI_FIFO_Request_Interrupt_Enable_Set_Bit	0x1000
+#define CDI_FIFO_Request_Interrupt_Enable_Clear_Bit	0x2000
+#define CDO_Error_Interrupt_Confirm_Bit			0x4000
+#define CDI_Error_Interrupt_Confirm_Bit			0x8000
+#define CDO_Empty_FIFO_Interrupt_Enable_Set_Bit		0x10000
+#define CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit	0x20000
+#define CDO_SW_Update_Bit				0x80000
+#define CDI_SW_Update_Bit				0x100000
+
+/* CDIO mode bits */
+#define CDI_Sample_Source_Select_Mask	0x3f
+#define CDI_Halt_On_Error_Bit		0x200
+/* sample clock on falling edge */
+#define CDI_Polarity_Bit		0x400
+/* set for half full mode, clear for not empty mode */
+#define CDI_FIFO_Mode_Bit		0x800
+/* data lanes specify which dio channels map to byte or word accesses
+   to the dio fifos */
+#define CDI_Data_Lane_Mask		0x3000
+#define CDI_Data_Lane_0_15_Bits		0x0
+#define CDI_Data_Lane_16_31_Bits	0x1000
+#define CDI_Data_Lane_0_7_Bits		0x0
+#define CDI_Data_Lane_8_15_Bits		0x1000
+#define CDI_Data_Lane_16_23_Bits	0x2000
+#define CDI_Data_Lane_24_31_Bits	0x3000
+
+/* CDO mode bits */
+#define CDO_Sample_Source_Select_Mask	0x3f
+#define CDO_Retransmit_Bit		0x100
+#define CDO_Halt_On_Error_Bit		0x200
+/* sample clock on falling edge */
+#define CDO_Polarity_Bit		0x400
+/* set for half full mode, clear for not full mode */
+#define CDO_FIFO_Mode_Bit		0x800
+/* data lanes specify which dio channels map to byte or word accesses
+   to the dio fifos */
+#define CDO_Data_Lane_Mask		0x3000
+#define CDO_Data_Lane_0_15_Bits		0x0
+#define CDO_Data_Lane_16_31_Bits	0x1000
+#define CDO_Data_Lane_0_7_Bits		0x0
+#define CDO_Data_Lane_8_15_Bits		0x1000
+#define CDO_Data_Lane_16_23_Bits	0x2000
+#define CDO_Data_Lane_24_31_Bits	0x3000
+
+/* Interrupt C bits */
+#define Interrupt_Group_C_Enable_Bit	0x1
+#define Interrupt_Group_C_Status_Bit	0x1
+
+#define M_SERIES_EEPROM_SIZE 1024
+
+typedef struct ni_board_struct{
+	unsigned short device_id;
+	int isapnp_id;
+	char *name;
+
+	int n_adchan;
+	int adbits;
+
+	int ai_fifo_depth;
+	unsigned int alwaysdither : 1;
+	int gainlkup;
+	int ai_speed;
+
+	int n_aochan;
+	int aobits;
+	struct a4l_rngdesc *ao_range_table;
+	int ao_fifo_depth;
+
+	unsigned ao_speed;
+
+	unsigned num_p0_dio_channels;
+
+	int reg_type;
+	unsigned int ao_unipolar : 1;
+	unsigned int has_8255 : 1;
+	unsigned int has_analog_trig : 1;
+
+	enum caldac_enum caldac[3];
+} ni_board;
+
+#define n_ni_boards  (sizeof(ni_boards)/sizeof(ni_board))
+
+#define MAX_N_CALDACS 34
+#define MAX_N_AO_CHAN 8
+#define NUM_GPCT 2
+
+#define NI_PRIVATE_COMMON					\
+	uint16_t (*stc_readw)(struct a4l_device *dev, int register);	\
+	uint32_t (*stc_readl)(struct a4l_device *dev, int register);	\
+	void (*stc_writew)(struct a4l_device *dev, uint16_t value, int register);	\
+	void (*stc_writel)(struct a4l_device *dev, uint32_t value, int register);	\
+	\
+	int dio_state;						\
+	int pfi_state;						\
+	int io_bits;						\
+	unsigned short dio_output;				\
+	unsigned short dio_control;				\
+	int ao0p,ao1p;						\
+	int lastchan;						\
+	int last_do;						\
+	int rt_irq;						\
+	int irq_polarity;					\
+	int irq_pin;						\
+	int aimode;						\
+	int ai_continuous;					\
+	int blocksize;						\
+	int n_left;						\
+	unsigned int ai_calib_source;				\
+	unsigned int ai_calib_source_enabled;			\
+	rtdm_lock_t window_lock; \
+	rtdm_lock_t soft_reg_copy_lock; \
+	rtdm_lock_t mite_channel_lock; \
+								\
+	int changain_state;					\
+	unsigned int changain_spec;				\
+								\
+	unsigned int caldac_maxdata_list[MAX_N_CALDACS];	\
+	unsigned short ao[MAX_N_AO_CHAN];					\
+	unsigned short caldacs[MAX_N_CALDACS];				\
+								\
+	unsigned short ai_cmd2;	\
+								\
+	unsigned short ao_conf[MAX_N_AO_CHAN];				\
+	unsigned short ao_mode1;				\
+	unsigned short ao_mode2;				\
+	unsigned short ao_mode3;				\
+	unsigned short ao_cmd1;					\
+	unsigned short ao_cmd2;					\
+	unsigned short ao_cmd3;					\
+	unsigned short ao_trigger_select;			\
+								\
+	struct ni_gpct_device *counter_dev;	\
+	unsigned short an_trig_etc_reg;				\
+								\
+	unsigned ai_offset[512];				\
+								\
+	unsigned long serial_interval_ns;                       \
+	unsigned char serial_hw_mode;                           \
+	unsigned short clock_and_fout;				\
+	unsigned short clock_and_fout2;				\
+								\
+	unsigned short int_a_enable_reg;			\
+	unsigned short int_b_enable_reg;			\
+	unsigned short io_bidirection_pin_reg;			\
+	unsigned short rtsi_trig_direction_reg;			\
+	unsigned short rtsi_trig_a_output_reg; \
+	unsigned short rtsi_trig_b_output_reg; \
+	unsigned short pfi_output_select_reg[NUM_PFI_OUTPUT_SELECT_REGS]; \
+	unsigned short ai_ao_select_reg; \
+	unsigned short g0_g1_select_reg; \
+	unsigned short cdio_dma_select_reg; \
+	\
+	unsigned clock_ns; \
+	unsigned clock_source; \
+	\
+	unsigned short atrig_mode;				\
+	unsigned short atrig_high;				\
+	unsigned short atrig_low;				\
+	\
+	unsigned short pwm_up_count;	\
+	unsigned short pwm_down_count;	\
+	\
+	sampl_t ai_fifo_buffer[0x2000];				\
+	uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE]; \
+	\
+	struct mite_struct *mite; \
+	struct mite_channel *ai_mite_chan; \
+	struct mite_channel *ao_mite_chan;\
+	struct mite_channel *cdo_mite_chan;\
+	struct mite_dma_descriptor_ring *ai_mite_ring; \
+	struct mite_dma_descriptor_ring *ao_mite_ring; \
+	struct mite_dma_descriptor_ring *cdo_mite_ring; \
+	struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT]; \
+	subd_8255_t subd_8255
+
+
+typedef struct {
+	ni_board *board_ptr;
+	NI_PRIVATE_COMMON;
+} ni_private;
+
+#define devpriv ((ni_private *)dev->priv)
+#define boardtype (*(ni_board *)devpriv->board_ptr)
+
+/* How we access registers */
+
+#define ni_writel(a,b)	(writel((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readl(a)	(readl(devpriv->mite->daq_io_addr + (a)))
+#define ni_writew(a,b)	(writew((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readw(a)	(readw(devpriv->mite->daq_io_addr + (a)))
+#define ni_writeb(a,b)	(writeb((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readb(a)	(readb(devpriv->mite->daq_io_addr + (a)))
+
+/* INSN_CONFIG_SET_CLOCK_SRC argument for NI cards */
+#define NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC	0 /* 10 MHz */
+#define NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC	1 /* 100 KHz */
+
+#endif /* _ANALOGY_NI_STC_H */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/Makefile	2022-03-21 12:58:31.093872345 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/tio_common.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) += analogy_ni_mite.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_TIO) += analogy_ni_tio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MIO) += analogy_ni_mio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_PCIMIO) += analogy_ni_pcimio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_670x) += analogy_ni_670x.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_660x) += analogy_ni_660x.o
+
+analogy_ni_mite-y := mite.o
+analogy_ni_tio-y := tio_common.o
+analogy_ni_mio-y := mio_common.o
+analogy_ni_pcimio-y := pcimio.o
+analogy_ni_670x-y := ni_670x.o
+analogy_ni_660x-y := ni_660x.o
+++ linux-patched/drivers/xenomai/analogy/national_instruments/tio_common.c	2022-03-21 12:58:31.086872413 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_tio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI general purpose counter
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: National Instruments general purpose counters
+ * This module is not used directly by end-users.  Rather, it is used
+ * by other drivers (for example ni_660x and ni_pcimio) to provide
+ * support for NI's general purpose counters.  It was originally based
+ * on the counter code from ni_660x.c and ni_mio_common.c.
+ *
+ * Author:
+ * J.P. Mellor <jpmellor@rose-hulman.edu>
+ * Herman.Bruyninckx@mech.kuleuven.ac.be
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * References:
+ * DAQ 660x Register-Level Programmer Manual  (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ * 340934b.pdf  DAQ-STC reference manual
+ *
+ * TODO:
+ * - Support use of both banks X and Y
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <rtdm/analogy/device.h>
+
+#include "ni_tio.h"
+#include "ni_mio.h"
+
+static inline void write_register(struct ni_gpct *counter,
+				  unsigned int bits, enum ni_gpct_register reg)
+{
+	BUG_ON(reg >= NITIO_Num_Registers);
+	counter->counter_dev->write_register(counter, bits, reg);
+}
+
+static inline unsigned int read_register(struct ni_gpct *counter,
+				     enum ni_gpct_register reg)
+{
+	BUG_ON(reg >= NITIO_Num_Registers);
+	return counter->counter_dev->read_register(counter, reg);
+}
+
+struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev,
+	void (*write_register) (struct ni_gpct * counter, unsigned int bits,
+		enum ni_gpct_register reg),
+	unsigned int (*read_register) (struct ni_gpct * counter,
+		enum ni_gpct_register reg), enum ni_gpct_variant variant,
+	unsigned int num_counters)
+{
+	struct ni_gpct_device *counter_dev =
+		kmalloc(sizeof(struct ni_gpct_device), GFP_KERNEL);
+	if (counter_dev == NULL)
+		return NULL;
+
+	memset(counter_dev, 0, sizeof(struct ni_gpct_device));
+
+	counter_dev->dev = dev;
+	counter_dev->write_register = write_register;
+	counter_dev->read_register = read_register;
+	counter_dev->variant = variant;
+	rtdm_lock_init(&counter_dev->regs_lock);
+	BUG_ON(num_counters == 0);
+
+	counter_dev->counters =
+		kmalloc(sizeof(struct ni_gpct *) * num_counters, GFP_KERNEL);
+
+	if (counter_dev->counters == NULL) {
+		 kfree(counter_dev);
+		return NULL;
+	}
+
+	memset(counter_dev->counters, 0, sizeof(struct ni_gpct *) * num_counters);
+
+	counter_dev->num_counters = num_counters;
+	return counter_dev;
+}
+
+void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
+{
+	if (counter_dev->counters == NULL)
+		return;
+	kfree(counter_dev->counters);
+	kfree(counter_dev);
+}
+
+static
+int ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
+{
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		return 1;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static
+int ni_tio_second_gate_registers_present(const struct ni_gpct_device *counter_dev)
+{
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		return 1;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline
+void ni_tio_set_bits_transient(struct ni_gpct *counter,
+			       enum ni_gpct_register register_index,
+			       unsigned int bit_mask,
+			       unsigned int bit_values,
+			       unsigned transient_bit_values)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned long flags;
+
+	BUG_ON(register_index >= NITIO_Num_Registers);
+	rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags);
+	counter_dev->regs[register_index] &= ~bit_mask;
+	counter_dev->regs[register_index] |= (bit_values & bit_mask);
+	write_register(counter,
+		       counter_dev->regs[register_index] | transient_bit_values,
+		       register_index);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags);
+}
+
+/* ni_tio_set_bits( ) is for safely writing to registers whose bits
+   may be twiddled in interrupt context, or whose software copy may be
+   read in interrupt context. */
+static inline void ni_tio_set_bits(struct ni_gpct *counter,
+				   enum ni_gpct_register register_index,
+				   unsigned int bit_mask,
+				   unsigned int bit_values)
+{
+	ni_tio_set_bits_transient(counter,
+				  register_index,
+				  bit_mask, bit_values, 0x0);
+}
+
+/* ni_tio_get_soft_copy( ) is for safely reading the software copy of
+   a register whose bits might be modified in interrupt context, or whose
+   software copy might need to be read in interrupt context. */
+static inline
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
+				  enum ni_gpct_register register_index)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned long flags;
+	unsigned value;
+
+	BUG_ON(register_index >= NITIO_Num_Registers);
+	rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags);
+	value = counter_dev->regs[register_index];
+	rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags);
+	return value;
+}
+
+static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
+{
+	write_register(counter, Gi_Reset_Bit(counter->counter_index),
+		       NITIO_Gxx_Joint_Reset_Reg(counter->counter_index));
+}
+
+void a4l_ni_tio_init_counter(struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	ni_tio_reset_count_and_disarm(counter);
+	/* Initialize counter registers */
+	counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] =
+		0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->
+				counter_index)],
+		NITIO_Gi_Autoincrement_Reg(counter->counter_index));
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		~0, Gi_Synchronize_Gate_Bit);
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0,
+		0);
+	counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)],
+		NITIO_Gi_LoadA_Reg(counter->counter_index));
+	counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)],
+		NITIO_Gi_LoadB_Reg(counter->counter_index));
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0, 0);
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index), ~0,
+			0);
+	}
+	if (ni_tio_second_gate_registers_present(counter_dev)) {
+		counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter->
+				counter_index)] = 0x0;
+		write_register(counter,
+			counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter->
+					counter_index)],
+			NITIO_Gi_Second_Gate_Reg(counter->counter_index));
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0, 0x0);
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), ~0, 0x0);
+}
+
+static lsampl_t ni_tio_counter_status(struct ni_gpct *counter)
+{
+	lsampl_t status = 0;
+	unsigned int bits;
+
+	bits = read_register(counter,NITIO_Gxx_Status_Reg(counter->counter_index));
+	if (bits & Gi_Armed_Bit(counter->counter_index)) {
+		status |= A4L_COUNTER_ARMED;
+		if (bits & Gi_Counting_Bit(counter->counter_index))
+			status |= A4L_COUNTER_COUNTING;
+	}
+	return status;
+}
+
+static
+uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
+				unsigned int generic_clock_source);
+static
+unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter);
+
+static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned counting_mode_reg =
+		NITIO_Gi_Counting_Mode_Reg(counter->counter_index);
+	static const uint64_t min_normal_sync_period_ps = 25000;
+	const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter,
+		ni_tio_generic_clock_src_select(counter));
+
+	if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
+		return;
+
+	switch (ni_tio_get_soft_copy(counter,
+			counting_mode_reg) & Gi_Counting_Mode_Mask) {
+	case Gi_Counting_Mode_QuadratureX1_Bits:
+	case Gi_Counting_Mode_QuadratureX2_Bits:
+	case Gi_Counting_Mode_QuadratureX4_Bits:
+	case Gi_Counting_Mode_Sync_Source_Bits:
+		force_alt_sync = 1;
+		break;
+	default:
+		break;
+	}
+
+	/* It's not clear what we should do if clock_period is
+	unknown, so we are not using the alt sync bit in that case,
+	but allow the caller to decide by using the force_alt_sync
+	parameter. */
+	if (force_alt_sync ||
+		(clock_period_ps
+			&& clock_period_ps < min_normal_sync_period_ps)) {
+		ni_tio_set_bits(counter, counting_mode_reg,
+			Gi_Alternate_Sync_Bit(counter_dev->variant),
+			Gi_Alternate_Sync_Bit(counter_dev->variant));
+	} else {
+		ni_tio_set_bits(counter, counting_mode_reg,
+			Gi_Alternate_Sync_Bit(counter_dev->variant), 0x0);
+	}
+}
+
+static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned mode_reg_mask;
+	unsigned mode_reg_values;
+	unsigned input_select_bits = 0;
+
+	/* these bits map directly on to the mode register */
+	static const unsigned mode_reg_direct_mask =
+		NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
+		NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
+		NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
+		NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT;
+
+	mode_reg_mask = mode_reg_direct_mask | Gi_Reload_Source_Switching_Bit;
+	mode_reg_values = mode & mode_reg_direct_mask;
+	switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) {
+	case NI_GPCT_RELOAD_SOURCE_FIXED_BITS:
+		break;
+	case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS:
+		mode_reg_values |= Gi_Reload_Source_Switching_Bit;
+		break;
+	case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS:
+		input_select_bits |= Gi_Gate_Select_Load_Source_Bit;
+		mode_reg_mask |= Gi_Gating_Mode_Mask;
+		mode_reg_values |= Gi_Level_Gating_Bits;
+		break;
+	default:
+		break;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+		mode_reg_mask, mode_reg_values);
+
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		unsigned counting_mode_bits = 0;
+		counting_mode_bits |=
+			(mode >> NI_GPCT_COUNTING_MODE_SHIFT) &
+			Gi_Counting_Mode_Mask;
+		counting_mode_bits |=
+			((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT) <<
+			Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask;
+		if (mode & NI_GPCT_INDEX_ENABLE_BIT) {
+			counting_mode_bits |= Gi_Index_Mode_Bit;
+		}
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index),
+			Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask |
+			Gi_Index_Mode_Bit, counting_mode_bits);
+		ni_tio_set_sync_mode(counter, 0);
+	}
+
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		Gi_Up_Down_Mask,
+		(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) << Gi_Up_Down_Shift);
+
+	if (mode & NI_GPCT_OR_GATE_BIT) {
+		input_select_bits |= Gi_Or_Gate_Bit;
+	}
+	if (mode & NI_GPCT_INVERT_OUTPUT_BIT) {
+		input_select_bits |= Gi_Output_Polarity_Bit;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit |
+		Gi_Output_Polarity_Bit, input_select_bits);
+
+	return 0;
+}
+
+static int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned int start_trigger)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	unsigned int command_transient_bits = 0;
+
+	if (arm) {
+		switch (start_trigger) {
+		case NI_GPCT_ARM_IMMEDIATE:
+			command_transient_bits |= Gi_Arm_Bit;
+			break;
+		case NI_GPCT_ARM_PAIRED_IMMEDIATE:
+			command_transient_bits |= Gi_Arm_Bit | Gi_Arm_Copy_Bit;
+			break;
+		default:
+			break;
+		}
+		if (ni_tio_counting_mode_registers_present(counter_dev)) {
+			unsigned counting_mode_bits = 0;
+
+			switch (start_trigger) {
+			case NI_GPCT_ARM_IMMEDIATE:
+			case NI_GPCT_ARM_PAIRED_IMMEDIATE:
+				break;
+			default:
+				if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
+					/* Pass-through the least
+					significant bits so we can
+					figure out what select later
+					*/
+					unsigned hw_arm_select_bits =
+						(start_trigger <<
+						Gi_HW_Arm_Select_Shift) &
+						Gi_HW_Arm_Select_Mask
+						(counter_dev->variant);
+
+					counting_mode_bits |=
+						Gi_HW_Arm_Enable_Bit |
+						hw_arm_select_bits;
+				} else {
+					return -EINVAL;
+				}
+				break;
+			}
+			ni_tio_set_bits(counter,
+				NITIO_Gi_Counting_Mode_Reg(counter->
+					counter_index),
+				Gi_HW_Arm_Select_Mask(counter_dev->
+					variant) | Gi_HW_Arm_Enable_Bit,
+				counting_mode_bits);
+		}
+	} else {
+		command_transient_bits |= Gi_Disarm_Bit;
+	}
+	ni_tio_set_bits_transient(counter,
+		NITIO_Gi_Command_Reg(counter->counter_index), 0, 0,
+		command_transient_bits);
+	return 0;
+}
+
+static unsigned int ni_660x_source_select_bits(lsampl_t clock_source)
+{
+	unsigned int ni_660x_clock;
+	unsigned int i;
+	const unsigned int clock_select_bits =
+		clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+
+	switch (clock_select_bits) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_1_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_2_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_3_Clock;
+		break;
+	case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Logic_Low_Clock;
+		break;
+	case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Source_Pin_i_Clock;
+		break;
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Next_Gate_Clock;
+		break;
+	case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Next_TC_Clock;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
+				ni_660x_clock = NI_660x_RTSI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_source_pin; ++i) {
+			if (clock_select_bits ==
+				NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) {
+				ni_660x_clock = NI_660x_Source_Pin_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_source_pin)
+			break;
+		ni_660x_clock = 0;
+		BUG();
+		break;
+	}
+	return Gi_Source_Select_Bits(ni_660x_clock);
+}
+
+static unsigned int ni_m_series_source_select_bits(lsampl_t clock_source)
+{
+	unsigned int ni_m_series_clock;
+	unsigned int i;
+	const unsigned int clock_select_bits =
+		clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+	switch (clock_select_bits) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_1_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_2_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_3_Clock;
+		break;
+	case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Logic_Low_Clock;
+		break;
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Next_Gate_Clock;
+		break;
+	case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Next_TC_Clock;
+		break;
+	case NI_GPCT_PXI10_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_PXI10_Clock;
+		break;
+	case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_PXI_Star_Trigger_Clock;
+		break;
+	case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Analog_Trigger_Out_Clock;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
+				ni_m_series_clock = NI_M_Series_RTSI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) {
+				ni_m_series_clock = NI_M_Series_PFI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		__a4l_err("invalid clock source 0x%lx\n",
+			     (unsigned long)clock_source);
+		BUG();
+		ni_m_series_clock = 0;
+		break;
+	}
+	return Gi_Source_Select_Bits(ni_m_series_clock);
+}
+
+static void ni_tio_set_source_subselect(struct ni_gpct *counter,
+					lsampl_t clock_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+
+	if (counter_dev->variant != ni_gpct_variant_m_series)
+		return;
+	switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
+		/* Gi_Source_Subselect is zero */
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		counter_dev->regs[second_gate_reg] &= ~Gi_Source_Subselect_Bit;
+		break;
+		/* Gi_Source_Subselect is one */
+	case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
+	case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
+		counter_dev->regs[second_gate_reg] |= Gi_Source_Subselect_Bit;
+		break;
+		/* Gi_Source_Subselect doesn't matter */
+	default:
+		return;
+		break;
+	}
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+}
+
+static int ni_tio_set_clock_src(struct ni_gpct *counter,
+				lsampl_t clock_source, lsampl_t period_ns)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned input_select_bits = 0;
+	static const uint64_t pico_per_nano = 1000;
+
+	/* FIXME: validate clock source */
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_660x:
+		input_select_bits |= ni_660x_source_select_bits(clock_source);
+		break;
+	case ni_gpct_variant_e_series:
+	case ni_gpct_variant_m_series:
+		input_select_bits |=
+			ni_m_series_source_select_bits(clock_source);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
+		input_select_bits |= Gi_Source_Polarity_Bit;
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Source_Select_Mask | Gi_Source_Polarity_Bit,
+		input_select_bits);
+	ni_tio_set_source_subselect(counter, clock_source);
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		const unsigned prescaling_mode =
+			clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK;
+		unsigned counting_mode_bits = 0;
+
+		switch (prescaling_mode) {
+		case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
+			break;
+		case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
+			counting_mode_bits |=
+				Gi_Prescale_X2_Bit(counter_dev->variant);
+			break;
+		case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
+			counting_mode_bits |=
+				Gi_Prescale_X8_Bit(counter_dev->variant);
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index),
+			Gi_Prescale_X2_Bit(counter_dev->
+				variant) | Gi_Prescale_X8_Bit(counter_dev->
+				variant), counting_mode_bits);
+	}
+	counter->clock_period_ps = pico_per_nano * period_ns;
+	ni_tio_set_sync_mode(counter, 0);
+	return 0;
+}
+
+static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter,
+		NITIO_Gi_Counting_Mode_Reg(counter->counter_index));
+	unsigned int bits = 0;
+
+	if (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Polarity_Bit)
+		bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
+	if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant))
+		bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS;
+	if (counting_mode_bits & Gi_Prescale_X8_Bit(counter_dev->variant))
+		bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS;
+	return bits;
+}
+
+static unsigned int ni_m_series_clock_src_select(const struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	unsigned int i, clock_source = 0;
+
+	const unsigned int input_select = (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Select_Mask) >>
+		Gi_Source_Select_Shift;
+
+	switch (input_select) {
+	case NI_M_Series_Timebase_1_Clock:
+		clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Timebase_2_Clock:
+		clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Timebase_3_Clock:
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Source_Subselect_Bit)
+			clock_source =
+				NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS;
+		else
+			clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Logic_Low_Clock:
+		clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Next_Gate_Clock:
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Source_Subselect_Bit)
+			clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS;
+		else
+			clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_PXI10_Clock:
+		clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Next_TC_Clock:
+		clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (input_select == NI_M_Series_RTSI_Clock(i)) {
+				clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (input_select == NI_M_Series_PFI_Clock(i)) {
+				clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		BUG();
+		break;
+	}
+	clock_source |= ni_tio_clock_src_modifiers(counter);
+	return clock_source;
+}
+
+static unsigned int ni_660x_clock_src_select(const struct ni_gpct *counter)
+{
+	unsigned int i, clock_source = 0;
+	const unsigned input_select = (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Select_Mask) >>
+		Gi_Source_Select_Shift;
+
+	switch (input_select) {
+	case NI_660x_Timebase_1_Clock:
+		clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Timebase_2_Clock:
+		clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Timebase_3_Clock:
+		clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Logic_Low_Clock:
+		clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Source_Pin_i_Clock:
+		clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Next_Gate_Clock:
+		clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Next_TC_Clock:
+		clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (input_select == NI_660x_RTSI_Clock(i)) {
+				clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_source_pin; ++i) {
+			if (input_select == NI_660x_Source_Pin_Clock(i)) {
+				clock_source =
+					NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_source_pin)
+			break;
+		BUG();
+		break;
+	}
+	clock_source |= ni_tio_clock_src_modifiers(counter);
+	return clock_source;
+}
+
+static unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
+{
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+	case ni_gpct_variant_m_series:
+		return ni_m_series_clock_src_select(counter);
+		break;
+	case ni_gpct_variant_660x:
+		return ni_660x_clock_src_select(counter);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
+				       unsigned int generic_clock_source)
+{
+	uint64_t clock_period_ps;
+
+	switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		clock_period_ps = 50000;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		clock_period_ps = 10000000;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		clock_period_ps = 12500;
+		break;
+	case NI_GPCT_PXI10_CLOCK_SRC_BITS:
+		clock_period_ps = 100000;
+		break;
+	default:
+		/* Clock period is specified by user with prescaling
+		   already taken into account. */
+		return counter->clock_period_ps;
+		break;
+	}
+
+	switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
+	case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
+		break;
+	case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
+		clock_period_ps *= 2;
+		break;
+	case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
+		clock_period_ps *= 8;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return clock_period_ps;
+}
+
+static void ni_tio_get_clock_src(struct ni_gpct *counter,
+				 unsigned int * clock_source,
+				 unsigned int * period_ns)
+{
+	static const unsigned int pico_per_nano = 1000;
+	uint64_t temp64;
+
+	*clock_source = ni_tio_generic_clock_src_select(counter);
+	temp64 = ni_tio_clock_period_ps(counter, *clock_source);
+	do_div(temp64, pico_per_nano);
+	*period_ns = temp64;
+}
+
+static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
+					    lsampl_t gate_source)
+{
+	const unsigned int mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask;
+	unsigned int mode_values = 0;
+
+	if (gate_source & CR_INVERT) {
+		mode_values |= Gi_Gate_Polarity_Bit;
+	}
+	if (gate_source & CR_EDGE) {
+		mode_values |= Gi_Rising_Edge_Gating_Bits;
+	} else {
+		mode_values |= Gi_Level_Gating_Bits;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+		mode_mask, mode_values);
+}
+
+static int ni_660x_set_first_gate(struct ni_gpct *counter, lsampl_t gate_source)
+{
+	const unsigned int selected_gate = CR_CHAN(gate_source);
+	/* Bits of selected_gate that may be meaningful to
+	   input select register */
+	const unsigned int selected_gate_mask = 0x1f;
+	unsigned ni_660x_gate_select;
+	unsigned i;
+
+	switch (selected_gate) {
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+		ni_660x_gate_select = NI_660x_Next_SRC_Gate_Select;
+		break;
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+	case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
+	case NI_GPCT_GATE_PIN_i_GATE_SELECT:
+		ni_660x_gate_select = selected_gate & selected_gate_mask;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_660x_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_gate_pin; ++i) {
+			if (selected_gate == NI_GPCT_GATE_PIN_GATE_SELECT(i)) {
+				ni_660x_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_gate_pin)
+			break;
+		return -EINVAL;
+		break;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_660x_gate_select));
+	return 0;
+}
+
+static int ni_m_series_set_first_gate(struct ni_gpct *counter,
+				      lsampl_t gate_source)
+{
+	const unsigned int selected_gate = CR_CHAN(gate_source);
+	/* bits of selected_gate that may be meaningful to input select register */
+	const unsigned int selected_gate_mask = 0x1f;
+	unsigned int i, ni_m_series_gate_select;
+
+	switch (selected_gate) {
+	case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
+	case NI_GPCT_AI_START2_GATE_SELECT:
+	case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT:
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_AI_START1_GATE_SELECT:
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+	case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+		ni_m_series_gate_select = selected_gate & selected_gate_mask;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_m_series_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (selected_gate == NI_GPCT_PFI_GATE_SELECT(i)) {
+				ni_m_series_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		return -EINVAL;
+		break;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Mask,
+		Gi_Gate_Select_Bits(ni_m_series_gate_select));
+	return 0;
+}
+
+static int ni_660x_set_second_gate(struct ni_gpct *counter,
+				   lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	const unsigned int selected_second_gate = CR_CHAN(gate_source);
+	/* bits of second_gate that may be meaningful to second gate register */
+	static const unsigned int selected_second_gate_mask = 0x1f;
+	unsigned int i, ni_660x_second_gate_select;
+
+	switch (selected_second_gate) {
+	case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
+	case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT:
+	case NI_GPCT_SELECTED_GATE_GATE_SELECT:
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+		ni_660x_second_gate_select =
+			selected_second_gate & selected_second_gate_mask;
+		break;
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+		ni_660x_second_gate_select =
+			NI_660x_Next_SRC_Second_Gate_Select;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (selected_second_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_660x_second_gate_select =
+					selected_second_gate &
+					selected_second_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_up_down_pin; ++i) {
+			if (selected_second_gate ==
+				NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) {
+				ni_660x_second_gate_select =
+					selected_second_gate &
+					selected_second_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_up_down_pin)
+			break;
+		return -EINVAL;
+		break;
+	};
+	counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit;
+	counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask;
+	counter_dev->regs[second_gate_reg] |=
+		Gi_Second_Gate_Select_Bits(ni_660x_second_gate_select);
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+	return 0;
+}
+
+static int ni_m_series_set_second_gate(struct ni_gpct *counter,
+				       lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	const unsigned int selected_second_gate = CR_CHAN(gate_source);
+	/* Bits of second_gate that may be meaningful to second gate register */
+	static const unsigned int selected_second_gate_mask = 0x1f;
+	unsigned int ni_m_series_second_gate_select;
+
+	/* FIXME: We don't know what the m-series second gate codes
+	   are, so we'll just pass the bits through for now. */
+	switch (selected_second_gate) {
+	default:
+		ni_m_series_second_gate_select =
+			selected_second_gate & selected_second_gate_mask;
+		break;
+	};
+	counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit;
+	counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask;
+	counter_dev->regs[second_gate_reg] |=
+		Gi_Second_Gate_Select_Bits(ni_m_series_second_gate_select);
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+	return 0;
+}
+
+static int ni_tio_set_gate_src(struct ni_gpct *counter,
+			       unsigned int gate_index, lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+
+	switch (gate_index) {
+	case 0:
+		if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
+			ni_tio_set_bits(counter,
+				NITIO_Gi_Mode_Reg(counter->counter_index),
+				Gi_Gating_Mode_Mask, Gi_Gating_Disabled_Bits);
+			return 0;
+		}
+		ni_tio_set_first_gate_modifiers(counter, gate_source);
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			return ni_m_series_set_first_gate(counter, gate_source);
+			break;
+		case ni_gpct_variant_660x:
+			return ni_660x_set_first_gate(counter, gate_source);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	case 1:
+		if (ni_tio_second_gate_registers_present(counter_dev) == 0)
+			return -EINVAL;
+		if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
+			counter_dev->regs[second_gate_reg] &=
+				~Gi_Second_Gate_Mode_Bit;
+			write_register(counter,
+				counter_dev->regs[second_gate_reg],
+				second_gate_reg);
+			return 0;
+		}
+		if (gate_source & CR_INVERT) {
+			counter_dev->regs[second_gate_reg] |=
+				Gi_Second_Gate_Polarity_Bit;
+		} else {
+			counter_dev->regs[second_gate_reg] &=
+				~Gi_Second_Gate_Polarity_Bit;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_m_series:
+			return ni_m_series_set_second_gate(counter,
+				gate_source);
+			break;
+		case ni_gpct_variant_660x:
+			return ni_660x_set_second_gate(counter, gate_source);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int ni_tio_set_other_src(struct ni_gpct *counter,
+				unsigned int index, unsigned int source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	if (counter_dev->variant == ni_gpct_variant_m_series) {
+		unsigned int abz_reg, shift, mask;
+
+		abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index);
+		switch (index) {
+		case NI_GPCT_SOURCE_ENCODER_A:
+			shift = 10;
+			break;
+		case NI_GPCT_SOURCE_ENCODER_B:
+			shift = 5;
+			break;
+		case NI_GPCT_SOURCE_ENCODER_Z:
+			shift = 0;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		mask = 0x1f << shift;
+		if (source > 0x1f) {
+			/* Disable gate */
+			source = 0x1f;
+		}
+		counter_dev->regs[abz_reg] &= ~mask;
+		counter_dev->regs[abz_reg] |= (source << shift) & mask;
+		write_register(counter, counter_dev->regs[abz_reg], abz_reg);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static unsigned int ni_660x_first_gate_to_generic_gate_source(unsigned int ni_660x_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_660x_gate_select) {
+	case NI_660x_Source_Pin_i_Gate_Select:
+		return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Gate_Pin_i_Gate_Select:
+		return NI_GPCT_GATE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Next_SRC_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_660x_Next_Out_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_660x_Logic_Low_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (ni_660x_gate_select == NI_660x_RTSI_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_gate_pin; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_Gate_Pin_Gate_Select(i)) {
+				return NI_GPCT_GATE_PIN_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_gate_pin)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_m_series_first_gate_to_generic_gate_source(unsigned int
+	ni_m_series_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_m_series_gate_select) {
+	case NI_M_Series_Timestamp_Mux_Gate_Select:
+		return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+		break;
+	case NI_M_Series_AI_START2_Gate_Select:
+		return NI_GPCT_AI_START2_GATE_SELECT;
+		break;
+	case NI_M_Series_PXI_Star_Trigger_Gate_Select:
+		return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+		break;
+	case NI_M_Series_Next_Out_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_M_Series_AI_START1_Gate_Select:
+		return NI_GPCT_AI_START1_GATE_SELECT;
+		break;
+	case NI_M_Series_Next_SRC_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_M_Series_Analog_Trigger_Out_Gate_Select:
+		return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+		break;
+	case NI_M_Series_Logic_Low_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (ni_m_series_gate_select ==
+				NI_M_Series_RTSI_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (ni_m_series_gate_select ==
+				NI_M_Series_PFI_Gate_Select(i)) {
+				return NI_GPCT_PFI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_660x_second_gate_to_generic_gate_source(unsigned int
+	ni_660x_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_660x_gate_select) {
+	case NI_660x_Source_Pin_i_Second_Gate_Select:
+		return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Up_Down_Pin_i_Second_Gate_Select:
+		return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Next_SRC_Second_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_660x_Next_Out_Second_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_660x_Selected_Gate_Second_Gate_Select:
+		return NI_GPCT_SELECTED_GATE_GATE_SELECT;
+		break;
+	case NI_660x_Logic_Low_Second_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_RTSI_Second_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_up_down_pin; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_Up_Down_Pin_Second_Gate_Select(i)) {
+				return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_up_down_pin)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_m_series_second_gate_to_generic_gate_source(unsigned int
+	ni_m_series_gate_select)
+{
+	/* FIXME: the second gate sources for the m series are
+	   undocumented, so we just return the raw bits for now. */
+	switch (ni_m_series_gate_select) {
+	default:
+		return ni_m_series_gate_select;
+		break;
+	}
+	return 0;
+};
+
+static int ni_tio_get_gate_src(struct ni_gpct *counter,
+			       unsigned int gate_index,
+			       unsigned int * gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int mode_bits = ni_tio_get_soft_copy(counter,
+		NITIO_Gi_Mode_Reg(counter->counter_index));
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	unsigned int gate_select_bits;
+
+	switch (gate_index) {
+	case 0:
+		if ((mode_bits & Gi_Gating_Mode_Mask) ==
+			Gi_Gating_Disabled_Bits) {
+			*gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+			return 0;
+		} else {
+			gate_select_bits =
+				(ni_tio_get_soft_copy(counter,
+					NITIO_Gi_Input_Select_Reg(counter->
+						counter_index)) &
+				Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			*gate_source =
+				ni_m_series_first_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		case ni_gpct_variant_660x:
+			*gate_source =
+				ni_660x_first_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		if (mode_bits & Gi_Gate_Polarity_Bit) {
+			*gate_source |= CR_INVERT;
+		}
+		if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+			*gate_source |= CR_EDGE;
+		}
+		break;
+	case 1:
+		if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits
+			|| (counter_dev->
+				regs[second_gate_reg] & Gi_Second_Gate_Mode_Bit)
+			== 0) {
+			*gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+			return 0;
+		} else {
+			gate_select_bits =
+				(counter_dev->
+				regs[second_gate_reg] &
+				Gi_Second_Gate_Select_Mask) >>
+				Gi_Second_Gate_Select_Shift;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			*gate_source =
+				ni_m_series_second_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		case ni_gpct_variant_660x:
+			*gate_source =
+				ni_660x_second_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Second_Gate_Polarity_Bit) {
+			*gate_source |= CR_INVERT;
+		}
+		/* Second gate can't have edge/level mode set independently */
+		if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+			*gate_source |= CR_EDGE;
+		}
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	unsigned int *data = (unsigned int *)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SET_COUNTER_MODE:
+		return ni_tio_set_counter_mode(counter, data[1]);
+		break;
+	case A4L_INSN_CONFIG_ARM:
+		return ni_tio_arm(counter, 1, data[1]);
+		break;
+	case A4L_INSN_CONFIG_DISARM:
+		ni_tio_arm(counter, 0, 0);
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_COUNTER_STATUS:
+		data[1] = ni_tio_counter_status(counter);
+		data[2] = counter_status_mask;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_CLOCK_SRC:
+		return ni_tio_set_clock_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_GET_CLOCK_SRC:
+		ni_tio_get_clock_src(counter, &data[1], &data[2]);
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_GATE_SRC:
+		return ni_tio_set_gate_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_GET_GATE_SRC:
+		return ni_tio_get_gate_src(counter, data[1], &data[2]);
+		break;
+	case A4L_INSN_CONFIG_SET_OTHER_SRC:
+		return ni_tio_set_other_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_RESET:
+		ni_tio_reset_count_and_disarm(counter);
+		return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int channel = CR_CHAN(insn->chan_desc);
+	unsigned int first_read;
+	unsigned int second_read;
+	unsigned int correct_read;
+
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (insn->data_size != sizeof(uint32_t))
+		return -EINVAL;
+
+	switch (channel) {
+	case 0:
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index),
+			Gi_Save_Trace_Bit, 0);
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index),
+			Gi_Save_Trace_Bit, Gi_Save_Trace_Bit);
+		/* The count doesn't get latched until the next clock
+		   edge, so it is possible the count may change (once)
+		   while we are reading.  Since the read of the
+		   SW_Save_Reg isn't atomic (apparently even when it's a
+		   32 bit register according to 660x docs), we need to
+		   read twice and make sure the reading hasn't changed.
+		   If it has, a third read will be correct since the
+		   count value will definitely have latched by then. */
+		first_read =
+			read_register(counter,
+			NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		second_read =
+			read_register(counter,
+			NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		if (first_read != second_read)
+			correct_read =
+				read_register(counter,
+				NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		else
+			correct_read = first_read;
+		data[0] = correct_read;
+		return 0;
+		break;
+	case 1:
+		data[0] = counter_dev->regs
+			[NITIO_Gi_LoadA_Reg(counter->counter_index)];
+		break;
+	case 2:
+		data[0] = counter_dev->regs
+			[NITIO_Gi_LoadB_Reg(counter->counter_index)];
+		break;
+	};
+
+	return 0;
+}
+
+static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
+{
+	const unsigned int bits = read_register(counter,
+		NITIO_Gxx_Status_Reg(counter->counter_index));
+
+	if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) {
+		return NITIO_Gi_LoadB_Reg(counter->counter_index);
+	} else {
+		return NITIO_Gi_LoadA_Reg(counter->counter_index);
+	}
+}
+
+int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int channel = CR_CHAN(insn->chan_desc);
+	unsigned int load_reg;
+
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (insn->data_size != sizeof(uint32_t))
+		return -EINVAL;
+
+	switch (channel) {
+	case 0:
+		/* Unsafe if counter is armed.  Should probably check
+		   status and return -EBUSY if armed. */
+		/* Don't disturb load source select, just use
+		   whichever load register is already selected. */
+		load_reg = ni_tio_next_load_register(counter);
+		write_register(counter, data[0], load_reg);
+		ni_tio_set_bits_transient(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index), 0, 0,
+			Gi_Load_Bit);
+		/* Restore state of load reg to whatever the user set
+		   last set it to */
+		write_register(counter, counter_dev->regs[load_reg], load_reg);
+		break;
+	case 1:
+		counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] =
+			data[0];
+		write_register(counter, data[0],
+			NITIO_Gi_LoadA_Reg(counter->counter_index));
+		break;
+	case 2:
+		counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] =
+			data[0];
+		write_register(counter, data[0],
+			NITIO_Gi_LoadB_Reg(counter->counter_index));
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static void ni_tio_configure_dma(struct ni_gpct *counter,
+				 short enable, short read_not_write)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned int input_select_bits = 0;
+
+	if (enable) {
+		if (read_not_write) {
+			input_select_bits |= Gi_Read_Acknowledges_Irq;
+		} else {
+			input_select_bits |= Gi_Write_Acknowledges_Irq;
+		}
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq,
+		input_select_bits);
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		{
+			unsigned gi_dma_config_bits = 0;
+
+			if (enable) {
+				gi_dma_config_bits |= Gi_DMA_Enable_Bit;
+				gi_dma_config_bits |= Gi_DMA_Int_Bit;
+			}
+			if (read_not_write == 0) {
+				gi_dma_config_bits |= Gi_DMA_Write_Bit;
+			}
+			ni_tio_set_bits(counter,
+				NITIO_Gi_DMA_Config_Reg(counter->counter_index),
+				Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit |
+				Gi_DMA_Write_Bit, gi_dma_config_bits);
+		}
+		break;
+	}
+}
+
+/* TODO: a4l_ni_tio_input_inttrig is left unused because the trigger
+   callback cannot be changed at run time */
+int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum)
+{
+	unsigned long flags;
+	int retval = 0;
+
+	BUG_ON(counter == NULL);
+	if (trignum != 0)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan)
+		a4l_mite_dma_arm(counter->mite_chan);
+	else
+		retval = -EIO;
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	if (retval < 0)
+		return retval;
+	retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	return retval;
+}
+
+static int ni_tio_input_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	int retval = 0;
+
+	counter->mite_chan->dir = A4L_INPUT;
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		a4l_mite_prep_dma(counter->mite_chan, 32, 32);
+		break;
+	case ni_gpct_variant_e_series:
+		a4l_mite_prep_dma(counter->mite_chan, 16, 32);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		Gi_Save_Trace_Bit, 0);
+	ni_tio_configure_dma(counter, 1, 1);
+	switch (cmd->start_src) {
+	case TRIG_NOW:
+		a4l_mite_dma_arm(counter->mite_chan);
+		retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+		break;
+	case TRIG_INT:
+		break;
+	case TRIG_EXT:
+		a4l_mite_dma_arm(counter->mite_chan);
+		retval = ni_tio_arm(counter, 1, cmd->start_arg);
+		break;
+	case TRIG_OTHER:
+		a4l_mite_dma_arm(counter->mite_chan);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return retval;
+}
+
+static int ni_tio_output_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	__a4l_err("ni_tio: output commands not yet implemented.\n");
+	return -ENOTSUPP;
+}
+
+static int ni_tio_cmd_setup(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	int retval = 0, set_gate_source = 0;
+	unsigned int gate_source;
+
+	if (cmd->scan_begin_src == TRIG_EXT) {
+		set_gate_source = 1;
+		gate_source = cmd->scan_begin_arg;
+	} else if (cmd->convert_src == TRIG_EXT) {
+		set_gate_source = 1;
+		gate_source = cmd->convert_arg;
+	}
+	if (set_gate_source) {
+		retval = ni_tio_set_gate_src(counter, 0, gate_source);
+	}
+	if (cmd->flags & TRIG_WAKE_EOS) {
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
+			Gi_Gate_Interrupt_Enable_Bit(counter->counter_index),
+			Gi_Gate_Interrupt_Enable_Bit(counter->counter_index));
+	}
+	return retval;
+}
+
+int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	int retval = 0;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan == NULL) {
+		__a4l_err("a4l_ni_tio_cmd: commands only supported with DMA."
+			     " Interrupt-driven commands not yet implemented.\n");
+		retval = -EIO;
+	} else {
+		retval = ni_tio_cmd_setup(counter, cmd);
+		if (retval == 0) {
+			if (cmd->flags & A4L_CMD_WRITE) {
+				retval = ni_tio_output_cmd(counter, cmd);
+			} else {
+				retval = ni_tio_input_cmd(counter, cmd);
+			}
+		}
+	}
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	return retval;
+}
+
+struct a4l_cmd_desc a4l_ni_tio_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT | TRIG_OTHER | TRIG_EXT,
+	.scan_begin_src = TRIG_FOLLOW | TRIG_EXT | TRIG_OTHER,
+	.convert_src = TRIG_NOW | TRIG_EXT | TRIG_OTHER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	/* Make sure trigger sources are trivially valid */
+
+	if ((cmd->start_src & TRIG_EXT) != 0 &&
+	    ni_tio_counting_mode_registers_present(counter->counter_dev) == 0)
+		return -EINVAL;
+
+	/* Make sure trigger sources are mutually compatible */
+
+	if (cmd->convert_src != TRIG_NOW && cmd->scan_begin_src != TRIG_FOLLOW)
+		return -EINVAL;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_src != TRIG_EXT) {
+		if (cmd->start_arg != 0) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->scan_begin_src != TRIG_EXT) {
+		if (cmd->scan_begin_arg) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->convert_src != TRIG_EXT) {
+		if (cmd->convert_arg) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		return -EINVAL;
+	}
+
+	if (cmd->stop_src == TRIG_NONE) {
+		if (cmd->stop_arg != 0) {
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int a4l_ni_tio_cancel(struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	ni_tio_arm(counter, 0, 0);
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan) {
+		a4l_mite_dma_disarm(counter->mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	ni_tio_configure_dma(counter, 0, 0);
+
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
+		Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 0x0);
+	return 0;
+}
+
+/*  During buffered input counter operation for e-series, the gate
+   interrupt is acked automatically by the dma controller, due to the
+   Gi_Read/Write_Acknowledges_IRQ bits in the input select
+   register. */
+static int should_ack_gate(struct ni_gpct *counter)
+{
+	unsigned long flags;
+	int retval = 0;
+
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		/* Not sure if 660x really supports gate interrupts
+		   (the bits are not listed in register-level manual) */
+		return 1;
+		break;
+	case ni_gpct_variant_e_series:
+		rtdm_lock_get_irqsave(&counter->lock, flags);
+		{
+			if (counter->mite_chan == NULL ||
+				counter->mite_chan->dir != A4L_INPUT ||
+				(a4l_mite_done(counter->mite_chan))) {
+				retval = 1;
+			}
+		}
+		rtdm_lock_put_irqrestore(&counter->lock, flags);
+		break;
+	}
+	return retval;
+}
+
+void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
+				    int *gate_error,
+				    int *tc_error,
+				    int *perm_stale_data, int *stale_data)
+{
+	const unsigned short gxx_status = read_register(counter,
+		NITIO_Gxx_Status_Reg(counter->counter_index));
+	const unsigned short gi_status = read_register(counter,
+		NITIO_Gi_Status_Reg(counter->counter_index));
+	unsigned ack = 0;
+
+	if (gate_error)
+		*gate_error = 0;
+	if (tc_error)
+		*tc_error = 0;
+	if (perm_stale_data)
+		*perm_stale_data = 0;
+	if (stale_data)
+		*stale_data = 0;
+
+	if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
+		ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
+		if (gate_error) {
+			/* 660x don't support automatic
+			   acknowledgement of gate interrupt via dma
+			   read/write and report bogus gate errors */
+			if (counter->counter_dev->variant !=
+				ni_gpct_variant_660x) {
+				*gate_error = 1;
+			}
+		}
+	}
+	if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) {
+		ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index);
+		if (tc_error)
+			*tc_error = 1;
+	}
+	if (gi_status & Gi_TC_Bit) {
+		ack |= Gi_TC_Interrupt_Ack_Bit;
+	}
+	if (gi_status & Gi_Gate_Interrupt_Bit) {
+		if (should_ack_gate(counter))
+			ack |= Gi_Gate_Interrupt_Ack_Bit;
+	}
+	if (ack)
+		write_register(counter, ack,
+			NITIO_Gi_Interrupt_Acknowledge_Reg(counter->
+				counter_index));
+	if (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Mode_Reg(counter->
+				counter_index)) & Gi_Loading_On_Gate_Bit) {
+		if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) {
+			if (stale_data)
+				*stale_data = 1;
+		}
+		if (read_register(counter,
+				NITIO_Gxx_Joint_Status2_Reg(counter->
+					counter_index)) &
+			Gi_Permanent_Stale_Bit(counter->counter_index)) {
+			__a4l_err("%s: Gi_Permanent_Stale_Data detected.\n",
+				    __FUNCTION__);
+			if (perm_stale_data)
+				*perm_stale_data = 1;
+		}
+	}
+}
+
+/* TODO: to be adapted after a4l_buf_evt review */
+void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev)
+{
+	unsigned gpct_mite_status;
+	unsigned long flags;
+	int gate_error;
+	int tc_error;
+	int perm_stale_data;
+	struct a4l_subdevice *subd =
+		a4l_get_subd(dev, NI_GPCT_SUBDEV(counter->counter_index));
+
+	a4l_ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
+		&perm_stale_data, NULL);
+	if (gate_error) {
+		__a4l_err("%s: Gi_Gate_Error detected.\n", __FUNCTION__);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	if (perm_stale_data) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		if (read_register(counter,
+				  NITIO_Gi_DMA_Status_Reg(counter->counter_index))
+		    & Gi_DRQ_Error_Bit) {
+			__a4l_err("%s: Gi_DRQ_Error detected.\n", __FUNCTION__);
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+		}
+		break;
+	case ni_gpct_variant_e_series:
+		break;
+	}
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&counter->lock, flags);
+		return;
+	}
+	gpct_mite_status = a4l_mite_get_status(counter->mite_chan);
+	if (gpct_mite_status & CHSR_LINKC) {
+		writel(CHOR_CLRLC,
+			counter->mite_chan->mite->mite_io_addr +
+			MITE_CHOR(counter->mite_chan->channel));
+	}
+	a4l_mite_sync_input_dma(counter->mite_chan, subd);
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+}
+
+void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter,
+			     struct mite_channel *mite_chan)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	counter->mite_chan = mite_chan;
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static int __init ni_tio_init_module(void)
+{
+	return 0;
+}
+
+static void __exit ni_tio_cleanup_module(void)
+{
+}
+
+MODULE_DESCRIPTION("Analogy support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
+
+module_init(ni_tio_init_module);
+module_exit(ni_tio_cleanup_module);
+
+EXPORT_SYMBOL_GPL(a4l_ni_tio_rinsn);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_winsn);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_insn_config);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_init_counter);
+EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_construct);
+EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_destroy);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+EXPORT_SYMBOL_GPL(a4l_ni_tio_input_inttrig);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd_mask);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmdtest);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cancel);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_handle_interrupt);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_set_mite_channel);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_acknowledge_and_confirm);
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_tio.h	2022-03-21 12:58:31.078872491 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI general purpose counter
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __ANALOGY_NI_TIO_H__
+#define __ANALOGY_NI_TIO_H__
+
+#include <rtdm/analogy/device.h>
+
+#ifdef CONFIG_PCI
+#include "mite.h"
+#endif
+
+enum ni_gpct_register {
+	NITIO_G0_Autoincrement_Reg,
+	NITIO_G1_Autoincrement_Reg,
+	NITIO_G2_Autoincrement_Reg,
+	NITIO_G3_Autoincrement_Reg,
+	NITIO_G0_Command_Reg,
+	NITIO_G1_Command_Reg,
+	NITIO_G2_Command_Reg,
+	NITIO_G3_Command_Reg,
+	NITIO_G0_HW_Save_Reg,
+	NITIO_G1_HW_Save_Reg,
+	NITIO_G2_HW_Save_Reg,
+	NITIO_G3_HW_Save_Reg,
+	NITIO_G0_SW_Save_Reg,
+	NITIO_G1_SW_Save_Reg,
+	NITIO_G2_SW_Save_Reg,
+	NITIO_G3_SW_Save_Reg,
+	NITIO_G0_Mode_Reg,
+	NITIO_G1_Mode_Reg,
+	NITIO_G2_Mode_Reg,
+	NITIO_G3_Mode_Reg,
+	NITIO_G0_LoadA_Reg,
+	NITIO_G1_LoadA_Reg,
+	NITIO_G2_LoadA_Reg,
+	NITIO_G3_LoadA_Reg,
+	NITIO_G0_LoadB_Reg,
+	NITIO_G1_LoadB_Reg,
+	NITIO_G2_LoadB_Reg,
+	NITIO_G3_LoadB_Reg,
+	NITIO_G0_Input_Select_Reg,
+	NITIO_G1_Input_Select_Reg,
+	NITIO_G2_Input_Select_Reg,
+	NITIO_G3_Input_Select_Reg,
+	NITIO_G0_Counting_Mode_Reg,
+	NITIO_G1_Counting_Mode_Reg,
+	NITIO_G2_Counting_Mode_Reg,
+	NITIO_G3_Counting_Mode_Reg,
+	NITIO_G0_Second_Gate_Reg,
+	NITIO_G1_Second_Gate_Reg,
+	NITIO_G2_Second_Gate_Reg,
+	NITIO_G3_Second_Gate_Reg,
+	NITIO_G01_Status_Reg,
+	NITIO_G23_Status_Reg,
+	NITIO_G01_Joint_Reset_Reg,
+	NITIO_G23_Joint_Reset_Reg,
+	NITIO_G01_Joint_Status1_Reg,
+	NITIO_G23_Joint_Status1_Reg,
+	NITIO_G01_Joint_Status2_Reg,
+	NITIO_G23_Joint_Status2_Reg,
+	NITIO_G0_DMA_Config_Reg,
+	NITIO_G1_DMA_Config_Reg,
+	NITIO_G2_DMA_Config_Reg,
+	NITIO_G3_DMA_Config_Reg,
+	NITIO_G0_DMA_Status_Reg,
+	NITIO_G1_DMA_Status_Reg,
+	NITIO_G2_DMA_Status_Reg,
+	NITIO_G3_DMA_Status_Reg,
+	NITIO_G0_ABZ_Reg,
+	NITIO_G1_ABZ_Reg,
+	NITIO_G0_Interrupt_Acknowledge_Reg,
+	NITIO_G1_Interrupt_Acknowledge_Reg,
+	NITIO_G2_Interrupt_Acknowledge_Reg,
+	NITIO_G3_Interrupt_Acknowledge_Reg,
+	NITIO_G0_Status_Reg,
+	NITIO_G1_Status_Reg,
+	NITIO_G2_Status_Reg,
+	NITIO_G3_Status_Reg,
+	NITIO_G0_Interrupt_Enable_Reg,
+	NITIO_G1_Interrupt_Enable_Reg,
+	NITIO_G2_Interrupt_Enable_Reg,
+	NITIO_G3_Interrupt_Enable_Reg,
+	NITIO_Num_Registers,
+};
+
+static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Autoincrement_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Autoincrement_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Autoincrement_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Autoincrement_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Command_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Command_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Command_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Command_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Counting_Mode_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Counting_Mode_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Counting_Mode_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Counting_Mode_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Input_Select_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Input_Select_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Input_Select_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Input_Select_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Reset_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Reset_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Status1_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Status1_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Status2_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Status2_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Status_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_LoadA_Reg;
+		break;
+	case 1:
+		return NITIO_G1_LoadA_Reg;
+		break;
+	case 2:
+		return NITIO_G2_LoadA_Reg;
+		break;
+	case 3:
+		return NITIO_G3_LoadA_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_LoadB_Reg;
+		break;
+	case 1:
+		return NITIO_G1_LoadB_Reg;
+		break;
+	case 2:
+		return NITIO_G2_LoadB_Reg;
+		break;
+	case 3:
+		return NITIO_G3_LoadB_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Mode_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Mode_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Mode_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Mode_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_SW_Save_Reg;
+		break;
+	case 1:
+		return NITIO_G1_SW_Save_Reg;
+		break;
+	case 2:
+		return NITIO_G2_SW_Save_Reg;
+		break;
+	case 3:
+		return NITIO_G3_SW_Save_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Second_Gate_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Second_Gate_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Second_Gate_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Second_Gate_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_DMA_Config_Reg;
+		break;
+	case 1:
+		return NITIO_G1_DMA_Config_Reg;
+		break;
+	case 2:
+		return NITIO_G2_DMA_Config_Reg;
+		break;
+	case 3:
+		return NITIO_G3_DMA_Config_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_DMA_Status_Reg;
+		break;
+	case 1:
+		return NITIO_G1_DMA_Status_Reg;
+		break;
+	case 2:
+		return NITIO_G2_DMA_Status_Reg;
+		break;
+	case 3:
+		return NITIO_G3_DMA_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_ABZ_Reg;
+		break;
+	case 1:
+		return NITIO_G1_ABZ_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(int
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Interrupt_Acknowledge_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Interrupt_Acknowledge_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Interrupt_Acknowledge_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Interrupt_Acknowledge_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Status_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Status_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Status_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg(int
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Interrupt_Enable_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Interrupt_Enable_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Interrupt_Enable_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Interrupt_Enable_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+enum ni_gpct_variant {
+	ni_gpct_variant_e_series,
+	ni_gpct_variant_m_series,
+	ni_gpct_variant_660x
+};
+
+struct ni_gpct {
+	struct ni_gpct_device *counter_dev;
+	unsigned counter_index;
+	unsigned chip_index;
+	uint64_t clock_period_ps; /* clock period in picoseconds */
+	struct mite_channel *mite_chan;
+	rtdm_lock_t lock;
+};
+
+struct ni_gpct_device {
+	struct a4l_device *dev;
+	void (*write_register)(struct ni_gpct * counter,
+				unsigned int bits, enum ni_gpct_register reg);
+	unsigned (*read_register)(struct ni_gpct * counter,
+				   enum ni_gpct_register reg);
+	enum ni_gpct_variant variant;
+	struct ni_gpct **counters;
+	unsigned num_counters;
+	unsigned regs[NITIO_Num_Registers];
+	rtdm_lock_t regs_lock;
+};
+
+#define Gi_Auto_Increment_Mask		0xff
+#define Gi_Up_Down_Shift		5
+
+#define Gi_Arm_Bit			0x1
+#define Gi_Save_Trace_Bit		0x2
+#define Gi_Load_Bit			0x4
+#define Gi_Disarm_Bit			0x10
+#define Gi_Up_Down_Mask			(0x3 << Gi_Up_Down_Shift)
+#define Gi_Always_Down_Bits		(0x0 << Gi_Up_Down_Shift)
+#define Gi_Always_Up_Bits		(0x1 << Gi_Up_Down_Shift)
+#define Gi_Up_Down_Hardware_IO_Bits	(0x2 << Gi_Up_Down_Shift)
+#define Gi_Up_Down_Hardware_Gate_Bits	(0x3 << Gi_Up_Down_Shift)
+#define Gi_Write_Switch_Bit		0x80
+#define Gi_Synchronize_Gate_Bit		0x100
+#define Gi_Little_Big_Endian_Bit	0x200
+#define Gi_Bank_Switch_Start_Bit	0x400
+#define Gi_Bank_Switch_Mode_Bit		0x800
+#define Gi_Bank_Switch_Enable_Bit	0x1000
+#define Gi_Arm_Copy_Bit			0x2000
+#define Gi_Save_Trace_Copy_Bit		0x4000
+#define Gi_Disarm_Copy_Bit		0x8000
+
+#define Gi_Index_Phase_Bitshift	5
+#define Gi_HW_Arm_Select_Shift		8
+
+#define Gi_Counting_Mode_Mask		0x7
+#define Gi_Counting_Mode_Normal_Bits	0x0
+#define Gi_Counting_Mode_QuadratureX1_Bits 0x1
+#define Gi_Counting_Mode_QuadratureX2_Bits 0x2
+#define Gi_Counting_Mode_QuadratureX4_Bits 0x3
+#define Gi_Counting_Mode_Two_Pulse_Bits	0x4
+#define Gi_Counting_Mode_Sync_Source_Bits 0x6
+#define Gi_Index_Mode_Bit		0x10
+#define Gi_Index_Phase_Mask		(0x3 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_LowA_LowB	(0x0 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_LowA_HighB	(0x1 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_HighA_LowB	(0x2 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_HighA_HighB	(0x3 << Gi_Index_Phase_Bitshift)
+
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_HW_Arm_Enable_Bit		0x80
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_660x_HW_Arm_Select_Mask	(0x7 << Gi_HW_Arm_Select_Shift)
+#define Gi_660x_Prescale_X8_Bit		0x1000
+#define Gi_M_Series_Prescale_X8_Bit	0x2000
+#define Gi_M_Series_HW_Arm_Select_Mask	(0x1f << Gi_HW_Arm_Select_Shift)
+/* Must be set for clocks over 40MHz,
+   which includes synchronous counting and quadrature modes */
+#define Gi_660x_Alternate_Sync_Bit	0x2000
+#define Gi_M_Series_Alternate_Sync_Bit	0x4000
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_660x_Prescale_X2_Bit		0x4000
+#define Gi_M_Series_Prescale_X2_Bit	0x8000
+
+static inline unsigned int Gi_Alternate_Sync_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Alternate_Sync_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Alternate_Sync_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_Prescale_X2_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Prescale_X2_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Prescale_X2_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_Prescale_X8_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Prescale_X8_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Prescale_X8_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_HW_Arm_Select_Mask(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_HW_Arm_Select_Mask;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_HW_Arm_Select_Mask;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+#define NI_660x_Timebase_1_Clock	0x0 /* 20MHz */
+#define NI_660x_Source_Pin_i_Clock	0x1
+#define NI_660x_Next_Gate_Clock		0xa
+#define NI_660x_Timebase_2_Clock	0x12 /* 100KHz */
+#define NI_660x_Next_TC_Clock		0x13
+#define NI_660x_Timebase_3_Clock	0x1e /* 80MHz */
+#define NI_660x_Logic_Low_Clock		0x1f
+
+#define ni_660x_max_rtsi_channel	6
+#define ni_660x_max_source_pin		7
+
+static inline unsigned int NI_660x_RTSI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return (0xb + n);
+}
+
+static inline unsigned int NI_660x_Source_Pin_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_source_pin);
+	return (0x2 + n);
+}
+
+/* Clock sources for ni e and m series boards,
+   get bits with Gi_Source_Select_Bits() */
+#define NI_M_Series_Timebase_1_Clock	0x0 /* 20MHz */
+#define NI_M_Series_Timebase_2_Clock	0x12 /* 100KHz */
+#define NI_M_Series_Next_TC_Clock	0x13
+#define NI_M_Series_Next_Gate_Clock	0x14 /* when Gi_Src_SubSelect = 0 */
+#define NI_M_Series_PXI_Star_Trigger_Clock 0x14 /* when Gi_Src_SubSelect = 1 */
+#define NI_M_Series_PXI10_Clock		0x1d
+#define NI_M_Series_Timebase_3_Clock	0x1e /* 80MHz, when Gi_Src_SubSelect = 0 */
+#define NI_M_Series_Analog_Trigger_Out_Clock 0x1e /* when Gi_Src_SubSelect = 1 */
+#define NI_M_Series_Logic_Low_Clock	0x1f
+
+#define ni_m_series_max_pfi_channel	15
+#define ni_m_series_max_rtsi_channel	7
+
+static inline unsigned int NI_M_Series_PFI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_pfi_channel);
+	if (n < 10)
+		return 1 + n;
+	else
+		return 0xb + n;
+}
+
+static inline unsigned int NI_M_Series_RTSI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_rtsi_channel);
+	if (n == 7)
+		return 0x1b;
+	else
+		return 0xb + n;
+}
+
+#define NI_660x_Source_Pin_i_Gate_Select 0x0
+#define NI_660x_Gate_Pin_i_Gate_Select	0x1
+#define NI_660x_Next_SRC_Gate_Select	0xa
+#define NI_660x_Next_Out_Gate_Select	0x14
+#define NI_660x_Logic_Low_Gate_Select	0x1f
+#define ni_660x_max_gate_pin 7
+
+static inline unsigned int NI_660x_Gate_Pin_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_gate_pin);
+	return 0x2 + n;
+}
+
+static inline unsigned int NI_660x_RTSI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return 0xb + n;
+}
+
+
+#define NI_M_Series_Timestamp_Mux_Gate_Select	0x0
+#define NI_M_Series_AI_START2_Gate_Select	0x12
+#define NI_M_Series_PXI_Star_Trigger_Gate_Select 0x13
+#define NI_M_Series_Next_Out_Gate_Select	0x14
+#define NI_M_Series_AI_START1_Gate_Select	0x1c
+#define NI_M_Series_Next_SRC_Gate_Select	0x1d
+#define NI_M_Series_Analog_Trigger_Out_Gate_Select 0x1e
+#define NI_M_Series_Logic_Low_Gate_Select	0x1f
+
+static inline unsigned int NI_M_Series_RTSI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_rtsi_channel);
+	if (n == 7)
+		return 0x1b;
+	return 0xb + n;
+}
+
+static inline unsigned int NI_M_Series_PFI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_pfi_channel);
+	if (n < 10)
+		return 1 + n;
+	return 0xb + n;
+}
+
+
+#define Gi_Source_Select_Shift 2
+#define Gi_Gate_Select_Shift 7
+
+#define Gi_Read_Acknowledges_Irq	0x1 /* not present on 660x */
+#define Gi_Write_Acknowledges_Irq	0x2 /* not present on 660x */
+#define Gi_Source_Select_Mask		0x7c
+#define Gi_Gate_Select_Mask		(0x1f << Gi_Gate_Select_Shift)
+#define Gi_Gate_Select_Load_Source_Bit	0x1000
+#define Gi_Or_Gate_Bit			0x2000
+#define Gi_Output_Polarity_Bit		0x4000 /* set to invert */
+#define Gi_Source_Polarity_Bit		0x8000 /* set to invert */
+
+#define Gi_Source_Select_Bits(x) ((x << Gi_Source_Select_Shift) & \
+				  Gi_Source_Select_Mask)
+#define Gi_Gate_Select_Bits(x) ((x << Gi_Gate_Select_Shift) & \
+				Gi_Gate_Select_Mask)
+
+#define Gi_Gating_Mode_Mask		0x3
+#define Gi_Gating_Disabled_Bits		0x0
+#define Gi_Level_Gating_Bits		0x1
+#define Gi_Rising_Edge_Gating_Bits	0x2
+#define Gi_Falling_Edge_Gating_Bits	0x3
+#define Gi_Gate_On_Both_Edges_Bit	0x4 /* used in conjunction with
+					       rising edge gating mode */
+#define Gi_Trigger_Mode_for_Edge_Gate_Mask 0x18
+#define Gi_Edge_Gate_Starts_Stops_Bits	0x0
+#define Gi_Edge_Gate_Stops_Starts_Bits	0x8
+#define Gi_Edge_Gate_Starts_Bits	0x10
+#define Gi_Edge_Gate_No_Starts_or_Stops_Bits 0x18
+#define Gi_Stop_Mode_Mask		0x60
+#define Gi_Stop_on_Gate_Bits		0x00
+#define Gi_Stop_on_Gate_or_TC_Bits	0x20
+#define Gi_Stop_on_Gate_or_Second_TC_Bits 0x40
+#define Gi_Load_Source_Select_Bit	0x80
+#define Gi_Output_Mode_Mask		0x300
+#define Gi_Output_TC_Pulse_Bits		0x100
+#define Gi_Output_TC_Toggle_Bits	0x200
+#define Gi_Output_TC_or_Gate_Toggle_Bits 0x300
+#define Gi_Counting_Once_Mask		0xc00
+#define Gi_No_Hardware_Disarm_Bits	0x000
+#define Gi_Disarm_at_TC_Bits		0x400
+#define Gi_Disarm_at_Gate_Bits		0x800
+#define Gi_Disarm_at_TC_or_Gate_Bits	0xc00
+#define Gi_Loading_On_TC_Bit		0x1000
+#define Gi_Gate_Polarity_Bit		0x2000
+#define Gi_Loading_On_Gate_Bit		0x4000
+#define Gi_Reload_Source_Switching_Bit	0x8000
+
+#define NI_660x_Source_Pin_i_Second_Gate_Select		0x0
+#define NI_660x_Up_Down_Pin_i_Second_Gate_Select	0x1
+#define NI_660x_Next_SRC_Second_Gate_Select		0xa
+#define NI_660x_Next_Out_Second_Gate_Select		0x14
+#define NI_660x_Selected_Gate_Second_Gate_Select	0x1e
+#define NI_660x_Logic_Low_Second_Gate_Select		0x1f
+
+#define ni_660x_max_up_down_pin		7
+
+static inline
+unsigned int NI_660x_Up_Down_Pin_Second_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_up_down_pin);
+	return 0x2 + n;
+}
+static inline
+unsigned int NI_660x_RTSI_Second_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return 0xb + n;
+}
+
+#define Gi_Second_Gate_Select_Shift	7
+
+/*FIXME: m-series has a second gate subselect bit */
+/*FIXME: m-series second gate sources are undocumented (by NI)*/
+#define Gi_Second_Gate_Mode_Bit		0x1
+#define Gi_Second_Gate_Select_Mask	(0x1f << Gi_Second_Gate_Select_Shift)
+#define Gi_Second_Gate_Polarity_Bit	0x2000
+#define Gi_Second_Gate_Subselect_Bit	0x4000 /* m-series only */
+#define Gi_Source_Subselect_Bit		0x8000 /* m-series only */
+
+static inline
+unsigned int Gi_Second_Gate_Select_Bits(unsigned int second_gate_select)
+{
+	return (second_gate_select << Gi_Second_Gate_Select_Shift) &
+		Gi_Second_Gate_Select_Mask;
+}
+
+#define G0_Save_Bit		0x1
+#define G1_Save_Bit		0x2
+#define G0_Counting_Bit		0x4
+#define G1_Counting_Bit		0x8
+#define G0_Next_Load_Source_Bit	0x10
+#define G1_Next_Load_Source_Bit	0x20
+#define G0_Stale_Data_Bit	0x40
+#define G1_Stale_Data_Bit	0x80
+#define G0_Armed_Bit		0x100
+#define G1_Armed_Bit		0x200
+#define G0_No_Load_Between_Gates_Bit 0x400
+#define G1_No_Load_Between_Gates_Bit 0x800
+#define G0_TC_Error_Bit		0x1000
+#define G1_TC_Error_Bit		0x2000
+#define G0_Gate_Error_Bit	0x4000
+#define G1_Gate_Error_Bit	0x8000
+
+static inline unsigned int Gi_Counting_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Counting_Bit;
+	return G0_Counting_Bit;
+}
+
+static inline unsigned int Gi_Armed_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Armed_Bit;
+	return G0_Armed_Bit;
+}
+
+static inline unsigned int Gi_Next_Load_Source_Bit(unsigned counter_index)
+{
+	if (counter_index % 2)
+		return G1_Next_Load_Source_Bit;
+	return G0_Next_Load_Source_Bit;
+}
+
+static inline unsigned int Gi_Stale_Data_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Stale_Data_Bit;
+	return G0_Stale_Data_Bit;
+}
+
+static inline unsigned int Gi_TC_Error_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_TC_Error_Bit;
+	return G0_TC_Error_Bit;
+}
+
+static inline unsigned int Gi_Gate_Error_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Gate_Error_Bit;
+	return G0_Gate_Error_Bit;
+}
+
+/* Joint reset register bits */
+static inline unsigned Gi_Reset_Bit(unsigned int counter_index)
+{
+	return 0x1 << (2 + (counter_index % 2));
+}
+
+#define G0_Output_Bit		0x1
+#define G1_Output_Bit		0x2
+#define G0_HW_Save_Bit		0x1000
+#define G1_HW_Save_Bit		0x2000
+#define G0_Permanent_Stale_Bit	0x4000
+#define G1_Permanent_Stale_Bit	0x8000
+
+static inline unsigned int Gi_Permanent_Stale_Bit(unsigned
+	counter_index)
+{
+	if (counter_index % 2)
+		return G1_Permanent_Stale_Bit;
+	return G0_Permanent_Stale_Bit;
+}
+
+#define Gi_DMA_Enable_Bit	0x1
+#define Gi_DMA_Write_Bit	0x2
+#define Gi_DMA_Int_Bit		0x4
+
+#define Gi_DMA_Readbank_Bit	0x2000
+#define Gi_DRQ_Error_Bit	0x4000
+#define Gi_DRQ_Status_Bit	0x8000
+
+#define G0_Gate_Error_Confirm_Bit	0x20
+#define G0_TC_Error_Confirm_Bit		0x40
+
+#define G1_Gate_Error_Confirm_Bit	0x2
+#define G1_TC_Error_Confirm_Bit		0x4
+
+static inline unsigned int Gi_Gate_Error_Confirm_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Gate_Error_Confirm_Bit;
+	return G0_Gate_Error_Confirm_Bit;
+}
+
+static inline unsigned int Gi_TC_Error_Confirm_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_TC_Error_Confirm_Bit;
+	return G0_TC_Error_Confirm_Bit;
+}
+
+/* Bits that are the same in G0/G2 and G1/G3 interrupt acknowledge registers */
+#define Gi_TC_Interrupt_Ack_Bit		0x4000
+#define Gi_Gate_Interrupt_Ack_Bit	0x8000
+
+#define Gi_Gate_Interrupt_Bit	0x4
+#define Gi_TC_Bit		0x8
+#define Gi_Interrupt_Bit	0x8000
+
+#define G0_TC_Interrupt_Enable_Bit	0x40
+#define G0_Gate_Interrupt_Enable_Bit	0x100
+
+#define G1_TC_Interrupt_Enable_Bit	0x200
+#define G1_Gate_Interrupt_Enable_Bit	0x400
+
+static inline unsigned int Gi_Gate_Interrupt_Enable_Bit(unsigned int counter_index)
+{
+	unsigned int bit;
+
+	if (counter_index % 2) {
+		bit = G1_Gate_Interrupt_Enable_Bit;
+	} else {
+		bit = G0_Gate_Interrupt_Enable_Bit;
+	}
+	return bit;
+}
+
+#define counter_status_mask (A4L_COUNTER_ARMED | A4L_COUNTER_COUNTING)
+
+#define NI_USUAL_PFI_SELECT(x)	((x < 10) ? (0x1 + x) : (0xb + x))
+#define NI_USUAL_RTSI_SELECT(x)	((x < 7 ) ? (0xb + x) : (0x1b + x))
+
+/* Mode bits for NI general-purpose counters, set with
+   INSN_CONFIG_SET_COUNTER_MODE */
+#define NI_GPCT_COUNTING_MODE_SHIFT		16
+#define NI_GPCT_INDEX_PHASE_BITSHIFT		20
+#define NI_GPCT_COUNTING_DIRECTION_SHIFT	24
+
+#define NI_GPCT_GATE_ON_BOTH_EDGES_BIT		0x4
+#define NI_GPCT_EDGE_GATE_MODE_MASK		0x18
+#define NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS	0x0
+#define NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS	0x8
+#define NI_GPCT_EDGE_GATE_STARTS_BITS		0x10
+#define NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS 0x18
+#define NI_GPCT_STOP_MODE_MASK			0x60
+#define NI_GPCT_STOP_ON_GATE_BITS		0x00
+#define NI_GPCT_STOP_ON_GATE_OR_TC_BITS		0x20
+#define NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS	0x40
+#define NI_GPCT_LOAD_B_SELECT_BIT		0x80
+#define NI_GPCT_OUTPUT_MODE_MASK		0x300
+#define NI_GPCT_OUTPUT_TC_PULSE_BITS		0x100
+#define NI_GPCT_OUTPUT_TC_TOGGLE_BITS		0x200
+#define NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS	0x300
+#define NI_GPCT_HARDWARE_DISARM_MASK		0xc00
+#define NI_GPCT_NO_HARDWARE_DISARM_BITS		0x000
+#define NI_GPCT_DISARM_AT_TC_BITS		0x400
+#define NI_GPCT_DISARM_AT_GATE_BITS		0x800
+#define NI_GPCT_DISARM_AT_TC_OR_GATE_BITS	0xc00
+#define NI_GPCT_LOADING_ON_TC_BIT		0x1000
+#define NI_GPCT_LOADING_ON_GATE_BIT		0x4000
+#define NI_GPCT_COUNTING_MODE_MASK		0x7 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_NORMAL_BITS	0x0 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS 0x1 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS 0x2 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS 0x3 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS	0x4 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS	0x6 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_INDEX_PHASE_MASK		0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS	0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS	0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS	0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS	0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_ENABLE_BIT		0x400000
+#define NI_GPCT_COUNTING_DIRECTION_MASK		0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_DOWN_BITS	0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_UP_BITS	0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_RELOAD_SOURCE_MASK		0xc000000
+#define NI_GPCT_RELOAD_SOURCE_FIXED_BITS	0x0
+#define NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS	0x4000000
+#define NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS	0x8000000
+#define NI_GPCT_OR_GATE_BIT			0x10000000
+#define NI_GPCT_INVERT_OUTPUT_BIT		0x20000000
+
+/* Bits for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC when
+   using NI general-purpose counters. */
+#define NI_GPCT_CLOCK_SRC_SELECT_MASK		0x3f
+#define NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS	0x0
+#define NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS	0x1
+#define NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS	0x2
+#define NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS	0x3
+#define NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS	0x4
+#define NI_GPCT_NEXT_TC_CLOCK_SRC_BITS		0x5
+#define NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS	0x6 /* NI 660x-specific */
+#define NI_GPCT_PXI10_CLOCK_SRC_BITS		0x7
+#define NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS	0x8
+#define NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS 0x9
+#define NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK	0x30000000
+#define NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS	0x0
+#define NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS	0x10000000 /* divide source by 2 */
+#define NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS	0x20000000 /* divide source by 8 */
+#define NI_GPCT_INVERT_CLOCK_SRC_BIT		0x80000000
+#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x)	(0x10 + x)
+#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x)		(0x18 + x)
+#define NI_GPCT_PFI_CLOCK_SRC_BITS(x)		(0x20 + x)
+
+/* Possibilities for setting a gate source with
+   INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
+   May be bitwise-or'd with CR_EDGE or CR_INVERT. */
+/* M-series gates */
+#define NI_GPCT_TIMESTAMP_MUX_GATE_SELECT	0x0
+#define NI_GPCT_AI_START2_GATE_SELECT		0x12
+#define NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT	0x13
+#define NI_GPCT_NEXT_OUT_GATE_SELECT		0x14
+#define NI_GPCT_AI_START1_GATE_SELECT		0x1c
+#define NI_GPCT_NEXT_SOURCE_GATE_SELECT		0x1d
+#define NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT	0x1e
+#define NI_GPCT_LOGIC_LOW_GATE_SELECT		0x1f
+/* More gates for 660x */
+#define NI_GPCT_SOURCE_PIN_i_GATE_SELECT	0x100
+#define NI_GPCT_GATE_PIN_i_GATE_SELECT		0x101
+/* More gates for 660x "second gate" */
+#define NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT	0x201
+#define NI_GPCT_SELECTED_GATE_GATE_SELECT	0x21e
+/* M-series "second gate" sources are unknown, we should add them here
+   with an offset of 0x300 when known. */
+#define NI_GPCT_DISABLED_GATE_SELECT		0x8000
+#define NI_GPCT_GATE_PIN_GATE_SELECT(x)	(0x102 + x)
+#define NI_GPCT_RTSI_GATE_SELECT(x)		NI_USUAL_RTSI_SELECT(x)
+#define NI_GPCT_PFI_GATE_SELECT(x)		NI_USUAL_PFI_SELECT(x)
+#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x)	(0x202 + x)
+
+/* Possibilities for setting a source with INSN_CONFIG_SET_OTHER_SRC
+   when using NI general-purpose counters. */
+#define NI_GPCT_SOURCE_ENCODER_A 0
+#define NI_GPCT_SOURCE_ENCODER_B 1
+#define NI_GPCT_SOURCE_ENCODER_Z 2
+/* M-series gates */
+/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
+#define NI_GPCT_DISABLED_OTHER_SELECT	0x8000
+#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x)
+
+/* Start sources for ni general-purpose counters for use with
+   INSN_CONFIG_ARM */
+#define NI_GPCT_ARM_IMMEDIATE		0x0
+/* Start both the counter and the adjacent paired counter
+   simultaneously */
+#define NI_GPCT_ARM_PAIRED_IMMEDIATE	0x1
+/* NI doesn't document bits for selecting hardware arm triggers.  If
+   the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least significant
+   bits (3 bits for 660x or 5 bits for m-series) through to the
+   hardware. This will at least allow someone to figure out what the bits
+   do later. */
+#define NI_GPCT_ARM_UNKNOWN		0x1000
+
+/* Digital filtering options for ni 660x for use with
+   INSN_CONFIG_FILTER. */
+#define NI_GPCT_FILTER_OFF		0x0
+#define NI_GPCT_FILTER_TIMEBASE_3_SYNC	0x1
+#define NI_GPCT_FILTER_100x_TIMEBASE_1	0x2
+#define NI_GPCT_FILTER_20x_TIMEBASE_1	0x3
+#define NI_GPCT_FILTER_10x_TIMEBASE_1	0x4
+#define NI_GPCT_FILTER_2x_TIMEBASE_1	0x5
+#define NI_GPCT_FILTER_2x_TIMEBASE_3	0x6
+
+/* Master clock sources for ni mio boards and
+   INSN_CONFIG_SET_CLOCK_SRC */
+#define NI_MIO_INTERNAL_CLOCK		0
+#define NI_MIO_RTSI_CLOCK		1
+/* Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK() the
+   NI_MIO_PLL_* sources are m-series only */
+#define NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK 2
+#define NI_MIO_PLL_PXI10_CLOCK		3
+#define NI_MIO_PLL_RTSI0_CLOCK		4
+
+#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x))
+
+/* Signals which can be routed to an NI RTSI pin with
+   INSN_CONFIG_SET_ROUTING. The numbers assigned are not arbitrary, they
+   correspond to the bits required to program the board. */
+#define NI_RTSI_OUTPUT_ADR_START1	0
+#define NI_RTSI_OUTPUT_ADR_START2	1
+#define NI_RTSI_OUTPUT_SCLKG		2
+#define NI_RTSI_OUTPUT_DACUPDN		3
+#define NI_RTSI_OUTPUT_DA_START1	4
+#define NI_RTSI_OUTPUT_G_SRC0		5
+#define NI_RTSI_OUTPUT_G_GATE0		6
+#define NI_RTSI_OUTPUT_RGOUT0		7
+#define NI_RTSI_OUTPUT_RTSI_BRD_0	8
+/* Pre-m-series always have RTSI clock on line 7 */
+#define NI_RTSI_OUTPUT_RTSI_OSC		12
+
+#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x))
+
+
+int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+void a4l_ni_tio_init_counter(struct ni_gpct *counter);
+
+struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev,
+	void (*write_register) (struct ni_gpct * counter, unsigned int bits,
+		enum ni_gpct_register reg),
+	unsigned int (*read_register) (struct ni_gpct * counter,
+		enum ni_gpct_register reg), enum ni_gpct_variant variant,
+	unsigned int num_counters);
+void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+extern struct a4l_cmd_desc a4l_ni_tio_cmd_mask;
+
+int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum);
+int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd);
+int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd);
+int a4l_ni_tio_cancel(struct ni_gpct *counter);
+
+void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev);
+void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter,
+			     struct mite_channel *mite_chan);
+void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
+				    int *gate_error,
+				    int *tc_error,
+				    int *perm_stale_data, int *stale_data);
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#endif /* !__ANALOGY_NI_TIO_H__ */
+++ linux-patched/drivers/xenomai/analogy/Makefile	2022-03-21 12:58:31.071872560 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/subdevice.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY) += xeno_analogy.o testing/ intel/ national_instruments/ sensoray/
+
+xeno_analogy-y := \
+	buffer.o \
+	command.o \
+	device.o \
+	driver.o \
+	driver_facilities.o \
+	instruction.o \
+	rtdm_helpers.o \
+	subdevice.o \
+	transfer.o \
+	rtdm_interface.o
+++ linux-patched/drivers/xenomai/analogy/subdevice.c	2022-03-21 12:58:31.064872628 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/driver.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, subdevice, channel and range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Common ranges declarations --- */
+
+struct a4l_rngtab rng_bipolar10 = { 1, {
+		RANGE_V(-10, 10),
+	}};
+struct a4l_rngdesc a4l_range_bipolar10 = RNG_GLOBAL(rng_bipolar10);
+
+struct a4l_rngtab rng_bipolar5 = { 1, {
+		RANGE_V(-5, 5),
+	}};
+struct a4l_rngdesc a4l_range_bipolar5 = RNG_GLOBAL(rng_bipolar5);
+
+struct a4l_rngtab rng_unipolar10 = { 1, {
+		RANGE_V(0, 10),
+	}};
+struct a4l_rngdesc a4l_range_unipolar10 = RNG_GLOBAL(rng_unipolar10);
+
+struct a4l_rngtab rng_unipolar5 = { 1, {
+		RANGE_V(0, 5),
+	}};
+struct a4l_rngdesc a4l_range_unipolar5 = RNG_GLOBAL(rng_unipolar5);
+
+struct a4l_rngtab rng_unknown = { 1, {
+		RANGE(0, 1),
+	}};
+struct a4l_rngdesc a4l_range_unknown = RNG_GLOBAL(rng_unknown);
+
+struct a4l_rngtab rng_fake = { 0, {
+		RANGE(0, 0),
+	}};
+struct a4l_rngdesc a4l_range_fake = RNG_GLOBAL(rng_fake);
+
+/* --- Basic channel / range management functions --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice *sb, int idx)
+{
+	int i = (sb->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? idx : 0;
+	return &(sb->chan_desc->chans[i]);
+}
+
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice *sb, int chidx, int rngidx)
+{
+	int i = (sb->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? chidx : 0;
+	return &(sb->rng_desc->rngtabs[i]->rngs[rngidx]);
+}
+
+int a4l_check_chanlist(struct a4l_subdevice *subd,
+		       unsigned char nb_chan, unsigned int *chans)
+{
+	int i, j;
+
+	if (nb_chan > subd->chan_desc->length)
+		return -EINVAL;
+
+	for (i = 0; i < nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0;
+
+		if (CR_CHAN(chans[i]) >= subd->chan_desc->length) {
+			__a4l_err("a4l_check_chanlist: "
+				  "chan idx out_of range (%u>=%lu)\n",
+				  CR_CHAN(chans[i]), subd->chan_desc->length);
+			return -EINVAL;
+		}
+		if (CR_AREF(chans[i]) != 0 &&
+		    (CR_AREF(chans[i]) & subd->chan_desc->chans[j].flags) == 0)
+		{
+			__a4l_err("a4l_check_chanlist: "
+				  "bad channel type\n");
+			return -EINVAL;
+		}
+	}
+
+	if (subd->rng_desc == NULL)
+		return 0;
+
+	for (i = 0; i < nb_chan; i++) {
+		j = (subd->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0;
+
+		if (CR_RNG(chans[i]) > subd->rng_desc->rngtabs[j]->length) {
+			__a4l_err("a4l_check_chanlist: "
+				  "rng idx out_of range (%u>=%u)\n",
+				  CR_RNG(chans[i]),
+				  subd->rng_desc->rngtabs[j]->length);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *))
+{
+	struct a4l_subdevice *subd;
+
+	subd = rtdm_malloc(sizeof(struct a4l_subdevice) + sizeof_priv);
+
+	if(subd != NULL) {
+		memset(subd, 0 , sizeof(struct a4l_subdevice) + sizeof_priv);
+		if(setup != NULL)
+			setup(subd);
+	}
+
+	return subd;
+}
+
+int a4l_add_subd(struct a4l_device * dev, struct a4l_subdevice * subd)
+{
+	struct list_head *this;
+	int i = 0;
+
+	/* Basic checking */
+	if (dev == NULL || subd == NULL)
+		return -EINVAL;
+
+	list_add_tail(&subd->list, &dev->subdvsq);
+
+	subd->dev = dev;
+
+	list_for_each(this, &dev->subdvsq) {
+		i++;
+	}
+
+	subd->idx = --i;
+
+	return i;
+}
+
+struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx)
+{
+	int i = 0;
+	struct a4l_subdevice *subd = NULL;
+	struct list_head *this;
+
+	/* This function is not optimized as we do not go through the
+	   transfer structure */
+
+	list_for_each(this, &dev->subdvsq) {
+		if(idx == i++)
+			subd = list_entry(this, struct a4l_subdevice, list);
+	}
+
+	return subd;
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	int i, ret = 0;
+	a4l_sbinfo_t *subd_info;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_subdinfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	subd_info = rtdm_malloc(dev->transfer.nb_subd *
+				sizeof(a4l_sbinfo_t));
+	if (subd_info == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < dev->transfer.nb_subd; i++) {
+		subd_info[i].flags = dev->transfer.subds[i]->flags;
+		subd_info[i].status = dev->transfer.subds[i]->status;
+		subd_info[i].nb_chan =
+			(dev->transfer.subds[i]->chan_desc != NULL) ?
+			dev->transfer.subds[i]->chan_desc->length : 0;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   subd_info, dev->transfer.nb_subd *
+				   sizeof(a4l_sbinfo_t)) != 0)
+		ret = -EFAULT;
+
+	rtdm_free(subd_info);
+
+	return ret;
+
+}
+
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_chinfo_arg_t inarg;
+
+	/* Basic checking */
+	if (!dev->flags & A4L_DEV_ATTACHED_NR) {
+		__a4l_err("a4l_ioctl_nbchaninfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg, arg,
+				     sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_nbchaninfo: subdevice index "
+			  "out of range\n");
+		return -EINVAL;
+	}
+
+	if(dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL)
+		inarg.info = (void *)0;
+	else
+		inarg.info = (void *)(unsigned long)
+			dev->transfer.subds[inarg.idx_subd]->chan_desc->length;
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   &inarg, sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_chinfo_t *chan_info;
+	a4l_chinfo_arg_t inarg;
+	struct a4l_channels_desc *chan_desc;
+	struct a4l_rngdesc *rng_desc;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_chaninfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg, arg,
+				     sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_chaninfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	chan_desc = dev->transfer.subds[inarg.idx_subd]->chan_desc;
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+
+	if (chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_chaninfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if(rng_desc == NULL)
+		rng_desc = &a4l_range_fake;
+
+	chan_info = rtdm_malloc(chan_desc->length * sizeof(a4l_chinfo_t));
+	if (chan_info == NULL)
+		return -ENOMEM;
+
+	/* If the channel descriptor is global, the fields are filled
+	   with the same instance of channel descriptor */
+	for (i = 0; i < chan_desc->length; i++) {
+		int j =
+			(chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0;
+		int k = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0;
+
+		chan_info[i].chan_flags = chan_desc->chans[j].flags;
+		chan_info[i].nb_bits = chan_desc->chans[j].nb_bits;
+		chan_info[i].nb_rng = rng_desc->rngtabs[k]->length;
+
+		if (chan_desc->mode == A4L_CHAN_GLOBAL_CHANDESC)
+			chan_info[i].chan_flags |= A4L_CHAN_GLOBAL;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   inarg.info,
+				   chan_info,
+				   chan_desc->length *
+				   sizeof(a4l_chinfo_t)) != 0)
+		return -EFAULT;
+
+	rtdm_free(chan_info);
+
+	return ret;
+}
+
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg)
+{
+	int i;
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_rnginfo_arg_t inarg;
+	struct a4l_rngdesc *rng_desc;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_nbrnginfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg,
+				     arg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_nbrnginfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_nbrnginfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if (inarg.idx_chan >=
+	    dev->transfer.subds[inarg.idx_subd]->chan_desc->length) {
+		__a4l_err("a4l_ioctl_nbrnginfo: bad channel index\n");
+		return -EINVAL;
+	}
+
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+	if (rng_desc != NULL) {
+		i = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ?
+			inarg.idx_chan : 0;
+		inarg.info = (void *)(unsigned long)
+			rng_desc->rngtabs[i]->length;
+	} else
+		inarg.info = (void *)0;
+
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   &inarg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	unsigned int tmp;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_rngdesc *rng_desc;
+	a4l_rnginfo_t *rng_info;
+	a4l_rnginfo_arg_t inarg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_rnginfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg,
+				     arg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_rnginfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_rnginfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if (inarg.idx_chan >=
+	    dev->transfer.subds[inarg.idx_subd]->chan_desc->length) {
+		__a4l_err("a4l_ioctl_rnginfo: bad channel index\n");
+		return -EINVAL;
+	}
+
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+	if (rng_desc == NULL) {
+		__a4l_err("a4l_ioctl_rnginfo: no range descriptor "
+			  "for channel %d\n", inarg.idx_chan);
+		return -EINVAL;
+	}
+
+	/* If the range descriptor is global,
+	   we take the first instance */
+	tmp = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ?
+		inarg.idx_chan : 0;
+
+	rng_info = rtdm_malloc(rng_desc->rngtabs[tmp]->length *
+			       sizeof(a4l_rnginfo_t));
+	if (rng_info == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < rng_desc->rngtabs[tmp]->length; i++) {
+		rng_info[i].min = rng_desc->rngtabs[tmp]->rngs[i].min;
+		rng_info[i].max = rng_desc->rngtabs[tmp]->rngs[i].max;
+		rng_info[i].flags = rng_desc->rngtabs[tmp]->rngs[i].flags;
+
+		if (rng_desc->mode == A4L_RNG_GLOBAL_RNGDESC)
+			rng_info[i].flags |= A4L_RNG_GLOBAL;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   inarg.info,
+				   rng_info,
+				   rng_desc->rngtabs[tmp]->length *
+				   sizeof(a4l_rnginfo_t)) != 0)
+		return -EFAULT;
+
+	rtdm_free(rng_info);
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/driver.c	2022-03-21 12:58:31.057872696 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/device.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, driver related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+static LIST_HEAD(a4l_drvs);
+
+/* --- Driver list management functions --- */
+
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio)
+{
+	struct list_head *this;
+	int ret = -EINVAL;
+
+	__a4l_dbg(1, core_dbg, "name=%s\n", pin);
+
+	/* Goes through the linked list so as to find
+	   a driver instance with the same name */
+	list_for_each(this, &a4l_drvs) {
+		struct a4l_driver *drv = list_entry(this, struct a4l_driver, list);
+
+		if (strcmp(drv->board_name, pin) == 0) {
+			/* The argument pio can be NULL
+			   if there is no need to retrieve the pointer */
+			if (pio != NULL)
+				*pio = drv;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int a4l_register_drv(struct a4l_driver * drv)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	__a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name);
+
+	if (a4l_lct_drv(drv->board_name, NULL) != 0) {
+		list_add(&drv->list, &a4l_drvs);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+int a4l_unregister_drv(struct a4l_driver * drv)
+{
+	__a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name);
+
+	if (a4l_lct_drv(drv->board_name, NULL) == 0) {
+		/* Here, we consider the argument is pointing
+		   to a real driver struct (not a blank structure
+		   with only the name field properly set */
+		list_del(&drv->list);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/* --- Driver list proc section --- */
+
+int a4l_rdproc_drvs(struct seq_file *p, void *data)
+{
+	int i = 0;
+	struct list_head *this;
+
+	seq_printf(p, "--  Analogy drivers --\n\n");
+
+	seq_printf(p, "| idx | board name \n");
+
+	list_for_each(this, &a4l_drvs) {
+		struct a4l_driver *drv = list_entry(this, struct a4l_driver, list);
+		seq_printf(p, "|  %02d | %s \n", i++, drv->board_name);
+	}
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
+++ linux-patched/drivers/xenomai/analogy/device.c	2022-03-21 12:58:31.049872774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/proc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+static struct a4l_device a4l_devs[A4L_NB_DEVICES];
+
+/* --- Device tab management functions --- */
+
+void a4l_init_devs(void)
+{
+	int i;
+	memset(a4l_devs, 0, A4L_NB_DEVICES * sizeof(struct a4l_device));
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		rtdm_lock_init(&a4l_devs[i].lock);
+		a4l_devs[i].transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+	}
+}
+
+int a4l_check_cleanup_devs(void)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < A4L_NB_DEVICES && ret == 0; i++)
+		if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags))
+			ret = -EBUSY;
+
+	return ret;
+}
+
+void a4l_set_dev(struct a4l_device_context *cxt)
+{
+	/* Retrieve the minor index */
+	const int minor = a4l_get_minor(cxt);
+	/* Fill the dev fields accordingly */
+	cxt->dev = &(a4l_devs[minor]);
+}
+
+/* --- Device tab proc section --- */
+
+#ifdef CONFIG_PROC_FS
+
+int a4l_rdproc_devs(struct seq_file *p, void *data)
+{
+	int i;
+
+	seq_printf(p, "--  Analogy devices --\n\n");
+	seq_printf(p, "| idx | status | driver\n");
+
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		char *status, *name;
+
+		/* Gets the device's state */
+		if (a4l_devs[i].flags == 0) {
+			status = "Unused";
+			name = "No driver";
+		} else if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags)) {
+			status = "Linked";
+			name = a4l_devs[i].driver->driver_name;
+		} else {
+			status = "Broken";
+			name = "Unknown";
+		}
+
+		seq_printf(p, "|  %02d | %s | %s\n", i, status, name);
+	}
+	return 0;
+}
+
+static int a4l_proc_transfer_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_transfer, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_transfer_ops,
+			a4l_proc_transfer_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int a4l_proc_attach(struct a4l_device_context * cxt)
+{
+	int ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct proc_dir_entry *entry;
+	char *entry_name;
+
+	/* Allocate the buffer for the file name */
+	entry_name = rtdm_malloc(A4L_NAMELEN + 4);
+	if (entry_name == NULL) {
+		__a4l_err("a4l_proc_attach: failed to allocate buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Create the proc file name */
+	ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s",
+		 a4l_get_minor(cxt), dev->driver->board_name);
+
+	/* Create the proc entry */
+	entry = proc_create_data(entry_name, 0444, a4l_proc_root,
+				 &a4l_proc_transfer_ops, &dev->transfer);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_attach: "
+			  "failed to create /proc/analogy/%s\n",
+			  entry_name);
+		ret = -ENOMEM;
+	}
+
+	rtdm_free(entry_name);
+
+	return ret;
+}
+
+void a4l_proc_detach(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	char *entry_name;
+
+	entry_name = rtdm_malloc(A4L_NAMELEN + 4);
+	if (entry_name == NULL) {
+		__a4l_err("a4l_proc_detach: "
+			  "failed to allocate filename buffer\n");
+		return;
+	}
+
+	ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s",
+		 a4l_get_minor(cxt), dev->driver->board_name);
+
+	remove_proc_entry(entry_name, a4l_proc_root);
+
+	rtdm_free(entry_name);
+}
+
+#else /* !CONFIG_PROC_FS */
+
+int a4l_proc_attach(struct a4l_device_context * cxt)
+{
+	return 0;
+}
+
+void a4l_proc_detach(struct a4l_device_context * cxt)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
+/* --- Attach / detach section --- */
+
+int a4l_fill_lnkdesc(struct a4l_device_context * cxt,
+		     a4l_lnkdesc_t * link_arg, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret;
+	char *tmpname = NULL;
+	void *tmpopts = NULL;
+
+	ret = rtdm_safe_copy_from_user(fd,
+				       link_arg, arg, sizeof(a4l_lnkdesc_t));
+	if (ret != 0) {
+		__a4l_err("a4l_fill_lnkdesc: "
+			  "call1(copy_from_user) failed\n");
+		goto out_get_lnkdesc;
+	}
+
+	if (link_arg->bname_size != 0 && link_arg->bname != NULL) {
+		tmpname = rtdm_malloc(link_arg->bname_size + 1);
+		if (tmpname == NULL) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call1(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_get_lnkdesc;
+		}
+		tmpname[link_arg->bname_size] = 0;
+
+		ret = rtdm_safe_copy_from_user(fd,
+					       tmpname,
+					       link_arg->bname,
+					       link_arg->bname_size);
+		if (ret != 0) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call2(copy_from_user) failed\n");
+			goto out_get_lnkdesc;
+		}
+	} else {
+		__a4l_err("a4l_fill_lnkdesc: board name missing\n");
+		ret = -EINVAL;
+		goto out_get_lnkdesc;
+	}
+
+	if (link_arg->opts_size != 0 && link_arg->opts != NULL) {
+		tmpopts = rtdm_malloc(link_arg->opts_size);
+
+		if (tmpopts == NULL) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call2(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_get_lnkdesc;
+		}
+
+		ret = rtdm_safe_copy_from_user(fd,
+					       tmpopts,
+					       link_arg->opts,
+					       link_arg->opts_size);
+		if (ret != 0) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call3(copy_from_user) failed\n");
+			goto out_get_lnkdesc;
+		}
+	}
+
+	link_arg->bname = tmpname;
+	link_arg->opts = tmpopts;
+
+      out_get_lnkdesc:
+
+	if (tmpname == NULL) {
+		link_arg->bname = NULL;
+		link_arg->bname_size = 0;
+	}
+
+	if (tmpopts == NULL) {
+		link_arg->opts = NULL;
+		link_arg->opts_size = 0;
+	}
+
+	return ret;
+}
+
+void a4l_free_lnkdesc(struct a4l_device_context * cxt, a4l_lnkdesc_t * link_arg)
+{
+	if (link_arg->bname != NULL)
+		rtdm_free(link_arg->bname);
+
+	if (link_arg->opts != NULL)
+		rtdm_free(link_arg->opts);
+}
+
+int a4l_assign_driver(struct a4l_device_context * cxt,
+			 struct a4l_driver * drv, a4l_lnkdesc_t * link_arg)
+{
+	int ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	dev->driver = drv;
+	INIT_LIST_HEAD(&dev->subdvsq);
+
+	if (drv->privdata_size == 0)
+		__a4l_dbg(1, core_dbg, " warning! "
+				       "the field priv will not be usable\n");
+	else {
+		dev->priv = rtdm_malloc(drv->privdata_size);
+		if (dev->priv == NULL) {
+			__a4l_err("a4l_assign_driver: "
+				  "call(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_assign_driver;
+		}
+
+		/* Initialize the private data even if it not our role
+		   (the driver should do it), that may prevent hard to
+		   find bugs */
+		memset(dev->priv, 0, drv->privdata_size);
+	}
+
+	if ((ret = drv->attach(dev, link_arg)) != 0)
+		__a4l_err("a4l_assign_driver: "
+			  "call(drv->attach) failed (ret=%d)\n",
+		     ret);
+
+out_assign_driver:
+
+	/* Increments module's count */
+	if (ret == 0 && (!try_module_get(drv->owner))) {
+		__a4l_err("a4l_assign_driver: "
+			  "driver's owner field wrongly set\n");
+		ret = -ENODEV;
+	}
+
+	if (ret != 0 && dev->priv != NULL) {
+		rtdm_free(dev->priv);
+		dev->driver = NULL;
+	}
+
+	return ret;
+}
+
+int a4l_release_driver(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd, *tmp;
+	int ret = 0;
+
+	if ((ret = dev->driver->detach(dev)) != 0)
+		goto out_release_driver;
+
+	module_put(dev->driver->owner);
+
+	/* In case, the driver developer did not free the subdevices */
+	if (!list_empty(&dev->subdvsq))
+		list_for_each_entry_safe(subd, tmp, &dev->subdvsq, list) {
+			list_del(&subd->list);
+			rtdm_free(subd);
+		}
+
+	/* Free the private field */
+	if (dev->priv)
+		rtdm_free(dev->priv);
+
+	dev->driver = NULL;
+
+out_release_driver:
+	return ret;
+}
+
+int a4l_device_attach(struct a4l_device_context * cxt, void *arg)
+{
+	int ret = 0;
+	a4l_lnkdesc_t link_arg;
+	struct a4l_driver *drv = NULL;
+
+	if ((ret = a4l_fill_lnkdesc(cxt, &link_arg, arg)) != 0)
+		goto out_attach;
+
+	if ((ret = a4l_lct_drv(link_arg.bname, &drv)) != 0) {
+		__a4l_err("a4l_device_attach: "
+			  "cannot find board name %s\n", link_arg.bname);
+		goto out_attach;
+	}
+
+	if ((ret = a4l_assign_driver(cxt, drv, &link_arg)) != 0)
+		goto out_attach;
+
+      out_attach:
+	a4l_free_lnkdesc(cxt, &link_arg);
+	return ret;
+}
+
+int a4l_device_detach(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (dev->driver == NULL) {
+		__a4l_err("a4l_device_detach: "
+			  "incoherent state, driver not reachable\n");
+		return -ENXIO;
+	}
+
+	return a4l_release_driver(cxt);
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg)
+{
+	int ret = 0;
+
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+
+	if (arg == NULL) {
+		/* Basic checking */
+		if (!test_bit(A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) {
+			__a4l_err("a4l_ioctl_devcfg: "
+				  "free device, no driver to detach\n");
+			return -EINVAL;
+		}
+		/* Pre-cleanup of the transfer structure, we ensure
+		   that nothing is busy */
+		if ((ret = a4l_precleanup_transfer(cxt)) != 0)
+			return ret;
+		/* Remove the related proc file */
+		a4l_proc_detach(cxt);
+		/* Free the device and the driver from each other */
+		if ((ret = a4l_device_detach(cxt)) == 0)
+			clear_bit(A4L_DEV_ATTACHED_NR,
+				  &(a4l_get_dev(cxt)->flags));
+		/* Free the transfer structure and its related data */
+		if ((ret = a4l_cleanup_transfer(cxt)) != 0)
+			return ret;
+	} else {
+		/* Basic checking */
+		if (test_bit
+		    (A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) {
+			__a4l_err("a4l_ioctl_devcfg: "
+				  "linked device, cannot attach more driver\n");
+			return -EINVAL;
+		}
+		/* Pre-initialization of the transfer structure */
+		a4l_presetup_transfer(cxt);
+		/* Link the device with the driver */
+		if ((ret = a4l_device_attach(cxt, arg)) != 0)
+			return ret;
+		/* Create the transfer structure and
+		   the related proc file */
+		if ((ret = a4l_setup_transfer(cxt)) != 0 ||
+		    (ret = a4l_proc_attach(cxt)) != 0)
+			a4l_device_detach(cxt);
+		else
+			set_bit(A4L_DEV_ATTACHED_NR,
+				&(a4l_get_dev(cxt)->flags));
+	}
+
+	return ret;
+}
+
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	a4l_dvinfo_t info;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	memset(&info, 0, sizeof(a4l_dvinfo_t));
+
+	if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		int len = (strlen(dev->driver->board_name) > A4L_NAMELEN) ?
+		    A4L_NAMELEN : strlen(dev->driver->board_name);
+
+		memcpy(info.board_name, dev->driver->board_name, len);
+
+		len = (strlen(dev->driver->driver_name) > A4L_NAMELEN) ?
+		    A4L_NAMELEN : strlen(dev->driver->driver_name);
+
+		memcpy(info.driver_name, dev->driver->driver_name, len);
+
+		info.nb_subd = dev->transfer.nb_subd;
+		/* TODO: for API compatibility issue, find the first
+		   read subdevice and write subdevice */
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &info, sizeof(a4l_dvinfo_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+++ linux-patched/drivers/xenomai/analogy/proc.h	2022-03-21 12:58:31.042872842 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/command.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, procfs related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ANALOGY_PROC_H__
+#define __ANALOGY_PROC_H__
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PROC_FS
+extern struct proc_dir_entry *a4l_proc_root;
+#endif /* CONFIG_PROC_FS */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ANALOGY_PROC_H__ */
+++ linux-patched/drivers/xenomai/analogy/command.c	2022-03-21 12:58:31.035872910 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/parport.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, command related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Command descriptor management functions --- */
+int a4l_fill_cmddesc(struct a4l_device_context *cxt, struct a4l_cmd_desc *desc,
+		     unsigned int **chan_descs, void *arg)
+{
+	unsigned int *tmpchans = NULL;
+	int ret = 0;
+
+	ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt),
+				       desc, arg, sizeof(struct a4l_cmd_desc));
+	if (ret != 0)
+		goto out_cmddesc;
+
+
+	if (desc->nb_chan == 0) {
+		ret = -EINVAL;
+		goto out_cmddesc;
+	}
+
+	tmpchans = rtdm_malloc(desc->nb_chan * sizeof(unsigned int));
+	if (tmpchans == NULL) {
+		ret = -ENOMEM;
+		goto out_cmddesc;
+	}
+
+	ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt),
+				       tmpchans,
+				       desc->chan_descs,
+				       desc->nb_chan * sizeof(unsigned int));
+	if (ret != 0) {
+		__a4l_err("%s invalid arguments \n", __FUNCTION__);
+		goto out_cmddesc;
+	}
+
+	*chan_descs = desc->chan_descs;
+	desc->chan_descs = tmpchans;
+
+	__a4l_dbg(1, core_dbg, "desc dump: \n");
+	__a4l_dbg(1, core_dbg, "\t->idx_subd=%u\n", desc->idx_subd);
+	__a4l_dbg(1, core_dbg, "\t->flags=%lu\n", desc->flags);
+	__a4l_dbg(1, core_dbg, "\t->nb_chan=%u\n", desc->nb_chan);
+	__a4l_dbg(1, core_dbg, "\t->chan_descs=0x%x\n", *desc->chan_descs);
+	__a4l_dbg(1, core_dbg, "\t->data_len=%u\n", desc->data_len);
+	__a4l_dbg(1, core_dbg, "\t->pdata=0x%p\n", desc->data);
+
+	out_cmddesc:
+
+	if (ret != 0) {
+		__a4l_err("a4l_fill_cmddesc: %d \n", ret);
+		if (tmpchans != NULL)
+			rtdm_free(tmpchans);
+		desc->chan_descs = NULL;
+	}
+
+	return ret;
+}
+
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc)
+{
+	if (desc->chan_descs != NULL)
+		rtdm_free(desc->chan_descs);
+}
+
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd;
+
+	if (desc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_check_cmddesc: "
+			  "subdevice index out of range (idx=%u)\n",
+			  desc->idx_subd);
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[desc->idx_subd];
+
+	if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) {
+		__a4l_err("a4l_check_cmddesc: "
+			  "subdevice type incoherent\n");
+		return -EIO;
+	}
+
+	if (!(subd->flags & A4L_SUBD_CMD)) {
+		__a4l_err("a4l_check_cmddesc: operation not supported, "
+			  "synchronous only subdevice\n");
+		return -EIO;
+	}
+
+	if (test_bit(A4L_SUBD_BUSY, &subd->status)) {
+		__a4l_err("a4l_check_cmddesc: subdevice busy\n");
+		return -EBUSY;
+	}
+
+	return a4l_check_chanlist(dev->transfer.subds[desc->idx_subd],
+				  desc->nb_chan, desc->chan_descs);
+}
+
+/* --- Command checking functions --- */
+
+int a4l_check_generic_cmdcnt(struct a4l_cmd_desc * desc)
+{
+	unsigned int tmp1, tmp2;
+
+	/* Makes sure trigger sources are trivially valid */
+	tmp1 =
+	desc->start_src & ~(TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW);
+	tmp2 = desc->start_src & (TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: start_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_begin_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+	tmp2 = desc->scan_begin_src & (TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: scan_begin_src, , weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->convert_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+	tmp2 = desc->convert_src & (TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: convert_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_end_src & ~(TRIG_COUNT);
+	if (tmp1 != 0) {
+		__a4l_err("a4l_check_cmddesc: scan_end_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->stop_src & ~(TRIG_COUNT | TRIG_NONE);
+	tmp2 = desc->stop_src & (TRIG_COUNT | TRIG_NONE);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: stop_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	/* Makes sure trigger sources are unique */
+	if (desc->start_src != TRIG_NOW &&
+	    desc->start_src != TRIG_INT &&
+	    desc->start_src != TRIG_EXT && desc->start_src != TRIG_FOLLOW) {
+		__a4l_err("a4l_check_cmddesc: start_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->scan_begin_src != TRIG_TIMER &&
+	    desc->scan_begin_src != TRIG_EXT &&
+	    desc->scan_begin_src != TRIG_FOLLOW) {
+		__a4l_err("a4l_check_cmddesc: scan_begin_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->convert_src != TRIG_TIMER &&
+	    desc->convert_src != TRIG_EXT && desc->convert_src != TRIG_NOW) {
+		__a4l_err("a4l_check_cmddesc: convert_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->stop_src != TRIG_COUNT && desc->stop_src != TRIG_NONE) {
+		__a4l_err("a4l_check_cmddesc: stop_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	/* Makes sure arguments are trivially compatible */
+	tmp1 = desc->start_src & (TRIG_NOW | TRIG_FOLLOW | TRIG_INT);
+	tmp2 = desc->start_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no start_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_begin_src & TRIG_FOLLOW;
+	tmp2 = desc->scan_begin_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no scan_begin_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->convert_src & TRIG_NOW;
+	tmp2 = desc->convert_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no convert_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->stop_src & TRIG_NONE;
+	tmp2 = desc->stop_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no stop_arg expected\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int a4l_check_specific_cmdcnt(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc)
+{
+	unsigned int tmp1, tmp2;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_cmd_desc *cmd_mask = dev->transfer.subds[desc->idx_subd]->cmd_mask;
+
+	if (cmd_mask == NULL)
+		return 0;
+
+	if (cmd_mask->start_src != 0) {
+		tmp1 = desc->start_src & ~(cmd_mask->start_src);
+		tmp2 = desc->start_src & (cmd_mask->start_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: start_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->scan_begin_src != 0) {
+		tmp1 = desc->scan_begin_src & ~(cmd_mask->scan_begin_src);
+		tmp2 = desc->scan_begin_src & (cmd_mask->scan_begin_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: scan_begin_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->convert_src != 0) {
+		tmp1 = desc->convert_src & ~(cmd_mask->convert_src);
+		tmp2 = desc->convert_src & (cmd_mask->convert_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: convert_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->scan_end_src != 0) {
+		tmp1 = desc->scan_end_src & ~(cmd_mask->scan_end_src);
+		if (tmp1 != 0) {
+			__a4l_err("a4l_check_cmddesc: scan_end_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->stop_src != 0) {
+		tmp1 = desc->stop_src & ~(cmd_mask->stop_src);
+		tmp2 = desc->stop_src & (cmd_mask->stop_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: stop_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* --- IOCTL / FOPS function --- */
+
+int a4l_ioctl_cmd(struct a4l_device_context * ctx, void *arg)
+{
+	int ret = 0, simul_flag = 0;
+	struct a4l_cmd_desc *cmd_desc = NULL;
+	struct a4l_device *dev = a4l_get_dev(ctx);
+	unsigned int *chan_descs, *tmp;
+	struct a4l_subdevice *subd;
+
+	/* The command launching cannot be done in real-time because
+	   of some possible buffer allocations in the drivers */
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+
+	/* Basically check the device */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_cmd: cannot command "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	/* Allocates the command */
+	cmd_desc = (struct a4l_cmd_desc *) rtdm_malloc(sizeof(struct a4l_cmd_desc));
+	if (cmd_desc == NULL)
+		return -ENOMEM;
+	memset(cmd_desc, 0, sizeof(struct a4l_cmd_desc));
+
+	/* Gets the command */
+	ret = a4l_fill_cmddesc(ctx, cmd_desc, &chan_descs, arg);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	/* Checks the command */
+	ret = a4l_check_cmddesc(ctx, cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	ret = a4l_check_generic_cmdcnt(cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	ret = a4l_check_specific_cmdcnt(ctx, cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	__a4l_dbg(1, core_dbg,"1st cmd checks passed\n");
+	subd = dev->transfer.subds[cmd_desc->idx_subd];
+
+	/* Tests the command with the cmdtest function */
+	if (cmd_desc->flags & A4L_CMD_SIMUL) {
+		simul_flag = 1;
+
+		if (!subd->do_cmdtest) {
+			__a4l_err("a4l_ioctl_cmd: driver's cmd_test NULL\n");
+			ret = -EINVAL;
+			goto out_ioctl_cmd;
+		}
+
+		ret = subd->do_cmdtest(subd, cmd_desc);
+		if (ret != 0) {
+			__a4l_err("a4l_ioctl_cmd: driver's cmd_test failed\n");
+			goto out_ioctl_cmd;
+		}
+		__a4l_dbg(1, core_dbg, "driver's cmd checks passed\n");
+		goto out_ioctl_cmd;
+	}
+
+
+	/* Gets the transfer system ready */
+	ret = a4l_setup_buffer(ctx, cmd_desc);
+	if (ret < 0)
+		goto out_ioctl_cmd;
+
+	/* Eventually launches the command */
+	ret = subd->do_cmd(subd, cmd_desc);
+
+	if (ret != 0) {
+		a4l_cancel_buffer(ctx);
+		goto out_ioctl_cmd;
+	}
+
+	out_ioctl_cmd:
+
+	if (simul_flag) {
+		/* copy the kernel based descriptor */
+		tmp = cmd_desc->chan_descs;
+		/* return the user based descriptor */
+		cmd_desc->chan_descs = chan_descs;
+		rtdm_safe_copy_to_user(rtdm_private_to_fd(ctx), arg, cmd_desc,
+				       sizeof(struct a4l_cmd_desc));
+		/* make sure we release the memory associated to the kernel */
+		cmd_desc->chan_descs = tmp;
+
+	}
+
+	if (ret != 0 || simul_flag == 1) {
+		a4l_free_cmddesc(cmd_desc);
+		rtdm_free(cmd_desc);
+	}
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/intel/parport.c	2022-03-21 12:58:31.027872989 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy driver for standard parallel port
+ * Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+   A cheap and easy way to get a few more digital I/O lines.  Steal
+   additional parallel ports from old computers or your neighbors'
+   computers.
+
+   Attach options list:
+   0: I/O port base for the parallel port.
+   1: IRQ
+
+   Parallel Port Lines:
+
+   pin     subdev  chan    aka
+   ---     ------  ----    ---
+   1       2       0       strobe
+   2       0       0       data 0
+   3       0       1       data 1
+   4       0       2       data 2
+   5       0       3       data 3
+   6       0       4       data 4
+   7       0       5       data 5
+   8       0       6       data 6
+   9       0       7       data 7
+   10      1       3       acknowledge
+   11      1       4       busy
+   12      1       2       output
+   13      1       1       printer selected
+   14      2       1       auto LF
+   15      1       0       error
+   16      2       2       init
+   17      2       3       select printer
+   18-25   ground
+
+   Notes:
+
+   Subdevices 0 is digital I/O, subdevice 1 is digital input, and
+   subdevice 2 is digital output.  Unlike other Analogy devices,
+   subdevice 0 defaults to output.
+
+   Pins 13 and 14 are inverted once by Analogy and once by the
+   hardware, thus cancelling the effect.
+
+   Pin 1 is a strobe, thus acts like one.  There's no way in software
+   to change this, at least on a standard parallel port.
+
+   Subdevice 3 pretends to be a digital input subdevice, but it always
+   returns 0 when read.  However, if you run a command with
+   scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering
+   pin, which can be used to wake up tasks.
+
+   see http://www.beyondlogic.org/ for information.
+   or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>		/* For inb/outb */
+#include <rtdm/analogy/device.h>
+
+#define PARPORT_SIZE 3
+
+#define PARPORT_A 0
+#define PARPORT_B 1
+#define PARPORT_C 2
+
+#define DEFAULT_ADDRESS 0x378
+#define DEFAULT_IRQ 7
+
+typedef struct parport_subd_priv {
+	unsigned long io_bits;
+} parport_spriv_t;
+
+typedef struct parport_priv {
+	unsigned long io_base;
+	unsigned int a_data;
+	unsigned int c_data;
+	int enable_irq;
+} parport_priv_t;
+
+#define devpriv ((parport_priv_t *)(dev->priv))
+
+static int parport_insn_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (data[0]) {
+		devpriv->a_data &= ~data[0];
+		devpriv->a_data |= (data[0] & data[1]);
+
+		outb(devpriv->a_data, devpriv->io_base + PARPORT_A);
+	}
+
+	data[1] = inb(devpriv->io_base + PARPORT_A);
+
+	return 0;
+}
+
+static int parport_insn_config_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	parport_spriv_t *spriv = (parport_spriv_t *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	/* No need to check the channel descriptor; the input / output
+	   setting is global for all channels */
+
+	switch (data[0]) {
+
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		spriv->io_bits = 0xff;
+		devpriv->c_data &= ~(1 << 5);
+		break;
+
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		spriv->io_bits = 0;
+		devpriv->c_data |= (1 << 5);
+		break;
+
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (spriv->io_bits == 0xff) ?
+			A4L_OUTPUT: A4L_INPUT;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	return 0;
+}
+
+static int parport_insn_b(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (data[0]) {
+		/* should writes be ignored? */
+	}
+
+	data[1] = (inb(devpriv->io_base + PARPORT_B) >> 3);
+
+	return 0;
+}
+
+static int parport_insn_c(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] &= 0x0f;
+	if (data[0]) {
+		devpriv->c_data &= ~data[0];
+		devpriv->c_data |= (data[0] & data[1]);
+
+		outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+	}
+
+	data[1] = devpriv->c_data & 0xf;
+
+	return 2;
+}
+
+static int parport_intr_insn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (insn->data_size < sizeof(uint8_t))
+		return -EINVAL;
+
+	data[1] = 0;
+	return 0;
+}
+
+static struct a4l_cmd_desc parport_intr_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_FOLLOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+static int parport_intr_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc * cmd)
+{
+
+	if (cmd->start_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->scan_begin_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->convert_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != 1) {
+		return -EINVAL;
+	}
+	if (cmd->stop_arg != 0) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int parport_intr_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	devpriv->c_data |= 0x10;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	devpriv->enable_irq = 1;
+
+	return 0;
+}
+
+static void parport_intr_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	a4l_info(dev, "cancel in progress\n");
+
+	devpriv->c_data &= ~0x10;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	devpriv->enable_irq = 0;
+}
+
+static int parport_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	struct a4l_subdevice *subd = a4l_get_subd(dev, 3);
+
+	if (!devpriv->enable_irq) {
+		a4l_err(dev, "parport_interrupt: bogus irq, ignored\n");
+		return IRQ_NONE;
+	}
+
+	a4l_buf_put(subd, 0, sizeof(unsigned int));
+	a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+
+/* --- Channels descriptor --- */
+
+static struct a4l_channels_desc parport_chan_desc_a = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_b = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 5,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_c = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 4,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_intr = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 1,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+/* --- Subdevice initialization functions --- */
+
+static void setup_subd_a(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DIO;
+	subd->chan_desc = &parport_chan_desc_a;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_a;
+	subd->insn_config = parport_insn_config_a;
+}
+
+static void setup_subd_b(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DI;
+	subd->chan_desc = &parport_chan_desc_b;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_b;
+}
+
+static void setup_subd_c(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DO;
+	subd->chan_desc = &parport_chan_desc_c;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_c;
+}
+
+static void setup_subd_intr(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DI;
+	subd->chan_desc = &parport_chan_desc_intr;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_intr_insn;
+	subd->cmd_mask = &parport_intr_cmd_mask;
+	subd->do_cmdtest = parport_intr_cmdtest;
+	subd->do_cmd = parport_intr_cmd;
+	subd->cancel = parport_intr_cancel;
+}
+
+static void (*setup_subds[3])(struct a4l_subdevice *) = {
+	setup_subd_a,
+	setup_subd_b,
+	setup_subd_c
+};
+
+static int dev_parport_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int i, err = 0, irq = A4L_IRQ_UNUSED;
+	unsigned long io_base;
+
+	if(arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) {
+
+		a4l_warn(dev,
+			 "dev_parport_attach: no attach options specified, "
+			 "taking default options (addr=0x%x, irq=%d)\n",
+			 DEFAULT_ADDRESS, DEFAULT_IRQ);
+
+		io_base = DEFAULT_ADDRESS;
+		irq = DEFAULT_IRQ;
+	} else {
+
+		io_base = ((unsigned long *)arg->opts)[0];
+
+		if (arg->opts_size >= 2 * sizeof(unsigned long))
+			irq = (int) ((unsigned long *)arg->opts)[1];
+	}
+
+	if (!request_region(io_base, PARPORT_SIZE, "analogy_parport")) {
+		a4l_err(dev, "dev_parport_attach: I/O port conflict");
+		return -EIO;
+	}
+
+	a4l_info(dev, "address = 0x%lx\n", io_base);
+
+	for (i = 0; i < 3; i++) {
+
+		struct a4l_subdevice *subd = a4l_alloc_subd(sizeof(parport_spriv_t),
+						  setup_subds[i]);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i)
+			return err;
+	}
+
+	if (irq != A4L_IRQ_UNUSED) {
+
+		struct a4l_subdevice *subd;
+
+		a4l_info(dev, "irq = %d\n", irq);
+
+		err = a4l_request_irq(dev, irq, parport_interrupt, 0, dev);
+		if (err < 0) {
+			a4l_err(dev, "dev_parport_attach: irq not available\n");
+			return err;
+		}
+
+		subd = a4l_alloc_subd(0, setup_subd_intr);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err < 0)
+			return err;
+	}
+
+	devpriv->io_base = io_base;
+
+	devpriv->a_data = 0;
+	outb(devpriv->a_data, devpriv->io_base + PARPORT_A);
+
+	devpriv->c_data = 0;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	return 0;
+}
+
+static int dev_parport_detach(struct a4l_device *dev)
+{
+	int err = 0;
+
+	if (devpriv->io_base != 0)
+		release_region(devpriv->io_base, PARPORT_SIZE);
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+		a4l_free_irq(dev, a4l_get_irq(dev));
+	}
+
+
+	return err;
+}
+
+static struct a4l_driver drv_parport = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_parport",
+	.driver_name = "parport",
+	.attach = dev_parport_attach,
+	.detach = dev_parport_detach,
+	.privdata_size = sizeof(parport_priv_t),
+};
+
+static int __init drv_parport_init(void)
+{
+	return a4l_register_drv(&drv_parport);
+}
+
+static void __exit drv_parport_cleanup(void)
+{
+	a4l_unregister_drv(&drv_parport);
+}
+
+MODULE_DESCRIPTION("Analogy driver for standard parallel port");
+MODULE_LICENSE("GPL");
+
+module_init(drv_parport_init);
+module_exit(drv_parport_cleanup);
+++ linux-patched/drivers/xenomai/analogy/intel/Kconfig	2022-03-21 12:58:31.020873057 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/8255.c	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_8255
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "8255 driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_PARPORT
+	depends on XENO_DRIVERS_ANALOGY && X86
+	tristate "Standard parallel port driver"
+	default n
+++ linux-patched/drivers/xenomai/analogy/intel/8255.c	2022-03-21 12:58:31.012873135 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/8255.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy subdevice driver for 8255 chip
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <rtdm/analogy/device.h>
+
+#include "8255.h"
+
+#define CALLBACK_ARG		(((subd_8255_t *)subd->priv)->cb_arg)
+#define CALLBACK_FUNC		(((subd_8255_t *)subd->priv)->cb_func)
+
+/* Channels descriptor */
+static struct a4l_channels_desc chandesc_8255 = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 24,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, sizeof(sampl_t)},
+	},
+};
+
+/* Command options mask */
+static struct a4l_cmd_desc cmd_mask_8255 = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_FOLLOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd)
+{
+	sampl_t d;
+
+	/* Retrieve the sample... */
+	d = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG);
+	d |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8);
+
+	/* ...and send it */
+	a4l_buf_put(subd, &d, sizeof(sampl_t));
+
+	a4l_buf_evt(subd, 0);
+}
+EXPORT_SYMBOL_GPL(a4l_subdev_8255_interrupt);
+
+static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
+{
+	unsigned long iobase = arg;
+
+	if (dir) {
+		outb(data, iobase + port);
+		return 0;
+	} else {
+		return inb(iobase + port);
+	}
+}
+
+static void do_config(struct a4l_subdevice *subd)
+{
+	int config;
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+
+	config = CR_CW;
+	/* 1 in io_bits indicates output, 1 in config indicates input */
+	if (!(subd_8255->io_bits & 0x0000ff))
+		config |= CR_A_IO;
+	if (!(subd_8255->io_bits & 0x00ff00))
+		config |= CR_B_IO;
+	if (!(subd_8255->io_bits & 0x0f0000))
+		config |= CR_C_LO_IO;
+	if (!(subd_8255->io_bits & 0xf00000))
+		config |= CR_C_HI_IO;
+	CALLBACK_FUNC(1, _8255_CR, config, CALLBACK_ARG);
+}
+
+int subd_8255_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	/* FIXME */
+	return 0;
+}
+
+int subd_8255_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_begin_arg != 0) {
+		cmd->scan_begin_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != 1) {
+		cmd->scan_end_arg = 1;
+		return -EINVAL;
+	}
+	if (cmd->stop_arg != 0) {
+		cmd->stop_arg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void subd_8255_cancel(struct a4l_subdevice *subd)
+{
+	/* FIXME */
+}
+
+int subd_8255_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (data[0]) {
+
+		subd_8255->status &= ~data[0];
+		subd_8255->status |= (data[0] & data[1]);
+
+		if (data[0] & 0xff)
+			CALLBACK_FUNC(1, _8255_DATA,
+				      subd_8255->status & 0xff, CALLBACK_ARG);
+		if (data[0] & 0xff00)
+			CALLBACK_FUNC(1, _8255_DATA + 1,
+				      (subd_8255->status >> 8) & 0xff,
+				      CALLBACK_ARG);
+		if (data[0] & 0xff0000)
+			CALLBACK_FUNC(1, _8255_DATA + 2,
+				      (subd_8255->status >> 16) & 0xff,
+				      CALLBACK_ARG);
+	}
+
+	data[1] = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG);
+	data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8);
+	data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 2, 0, CALLBACK_ARG) << 16);
+
+	return 0;
+}
+
+int subd_8255_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	unsigned int mask;
+	unsigned int bits;
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	mask = 1 << CR_CHAN(insn->chan_desc);
+
+	if (mask & 0x0000ff) {
+		bits = 0x0000ff;
+	} else if (mask & 0x00ff00) {
+		bits = 0x00ff00;
+	} else if (mask & 0x0f0000) {
+		bits = 0x0f0000;
+	} else {
+		bits = 0xf00000;
+	}
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subd_8255->io_bits &= ~bits;
+		break;
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subd_8255->io_bits |= bits;
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (subd_8255->io_bits & bits) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	do_config(subd);
+
+	return 0;
+}
+
+void a4l_subdev_8255_init(struct a4l_subdevice *subd)
+{
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	/* Initializes the subdevice structure */
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	/* Subdevice filling part */
+
+	subd->flags = A4L_SUBD_DIO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->chan_desc = &chandesc_8255;
+	subd->insn_bits = subd_8255_insn_bits;
+	subd->insn_config = subd_8255_insn_config;
+
+	if(subd_8255->have_irq) {
+		subd->cmd_mask = &cmd_mask_8255;
+		subd->do_cmdtest = subd_8255_cmdtest;
+		subd->do_cmd = subd_8255_cmd;
+		subd->cancel = subd_8255_cancel;
+	}
+
+	/* 8255 setting part */
+
+	if(CALLBACK_FUNC == NULL)
+		CALLBACK_FUNC = subdev_8255_cb;
+
+	do_config(subd);
+}
+EXPORT_SYMBOL_GPL(a4l_subdev_8255_init);
+
+/*
+
+  Start of the 8255 standalone device
+
+*/
+
+static int dev_8255_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	unsigned long *addrs;
+	int i, err = 0;
+
+	if(arg->opts == NULL || arg->opts_size == 0) {
+		a4l_err(dev,
+			"dev_8255_attach: unable to detect any 8255 chip, "
+			"chips addresses must be passed as attach arguments\n");
+		return -EINVAL;
+	}
+
+	addrs = (unsigned long*) arg->opts;
+
+	for(i = 0; i < (arg->opts_size / sizeof(unsigned long)); i++) {
+		struct a4l_subdevice * subd;
+		subd_8255_t *subd_8255;
+
+		subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL);
+		if(subd == NULL) {
+			a4l_err(dev,
+				"dev_8255_attach: "
+				"unable to allocate subdevice\n");
+			/* There is no need to free previously
+			   allocated structure(s), the analogy layer will
+			   do it for us */
+			err = -ENOMEM;
+			goto out_attach;
+		}
+
+		memset(subd, 0, sizeof(struct a4l_subdevice));
+		memset(subd->priv, 0, sizeof(subd_8255_t));
+
+		subd_8255 = (subd_8255_t *)subd->priv;
+
+		if(request_region(addrs[i], _8255_SIZE, "Analogy 8255") == 0) {
+			subd->flags = A4L_SUBD_UNUSED;
+			a4l_warn(dev,
+				 "dev_8255_attach: "
+				 "I/O port conflict at 0x%lx\n", addrs[i]);
+		}
+		else {
+			subd_8255->cb_arg = addrs[i];
+			a4l_subdev_8255_init(subd);
+		}
+
+		err = a4l_add_subd(dev, subd);
+		if(err < 0) {
+			a4l_err(dev,
+				"dev_8255_attach: "
+				"a4l_add_subd() failed (err=%d)\n", err);
+			goto out_attach;
+		}
+	}
+
+out_attach:
+	return err;
+}
+
+static int dev_8255_detach(struct a4l_device *dev)
+{
+	struct a4l_subdevice *subd;
+	int i = 0;
+
+	while((subd = a4l_get_subd(dev, i++)) != NULL) {
+		subd_8255_t *subd_8255 = (subd_8255_t *) subd->priv;
+		if(subd_8255 != NULL && subd_8255->cb_arg != 0)
+			release_region(subd_8255->cb_arg, _8255_SIZE);
+	}
+
+	return 0;
+}
+
+static struct a4l_driver drv_8255 = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_8255",
+	.driver_name = "8255",
+	.attach = dev_8255_attach,
+	.detach = dev_8255_detach,
+	.privdata_size = 0,
+};
+
+static int __init drv_8255_init(void)
+{
+	return a4l_register_drv(&drv_8255);
+}
+
+static void __exit drv_8255_cleanup(void)
+{
+	a4l_unregister_drv(&drv_8255);
+}
+MODULE_DESCRIPTION("Analogy driver for 8255 chip");
+MODULE_LICENSE("GPL");
+
+module_init(drv_8255_init);
+module_exit(drv_8255_cleanup);
+++ linux-patched/drivers/xenomai/analogy/intel/8255.h	2022-03-21 12:58:31.005873203 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for 8255 chip
+ * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_8255_H__
+#define __ANALOGY_8255_H__
+
+#include <rtdm/analogy/device.h>
+
+typedef int (*a4l_8255_cb_t)(int, int, int, unsigned long);
+
+typedef struct subd_8255_struct {
+	unsigned long cb_arg;
+	a4l_8255_cb_t cb_func;
+	unsigned int status;
+	int have_irq;
+	int io_bits;
+} subd_8255_t;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_8255) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_8255_MODULE))
+
+#define _8255_SIZE 4
+
+#define _8255_DATA 0
+#define _8255_CR 3
+
+#define CR_C_LO_IO	0x01
+#define CR_B_IO		0x02
+#define CR_B_MODE	0x04
+#define CR_C_HI_IO	0x08
+#define CR_A_IO		0x10
+#define CR_A_MODE(a)	((a)<<5)
+#define CR_CW		0x80
+
+void a4l_subdev_8255_init(struct a4l_subdevice *subd);
+void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd);
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_8255 */
+
+#define a4l_subdev_8255_init(x)		do { } while(0)
+#define a4l_subdev_8255_interrupt(x)	do { } while(0)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_8255 */
+
+#endif /* !__ANALOGY_8255_H__ */
+++ linux-patched/drivers/xenomai/analogy/intel/Makefile	2022-03-21 12:58:30.997873281 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/instruction.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_8255) += analogy_8255.o
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_PARPORT) += analogy_parport.o
+
+analogy_8255-y := 8255.o
+
+analogy_parport-y := parport.o
+++ linux-patched/drivers/xenomai/analogy/instruction.c	2022-03-21 12:58:30.990873349 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/driver_facilities.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/div64.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+int a4l_do_insn_gettime(struct a4l_kernel_instruction * dsc)
+{
+	nanosecs_abs_t ns;
+	uint32_t ns2;
+
+	unsigned int *data = (unsigned int *)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size != 2 * sizeof(unsigned int)) {
+		__a4l_err("a4l_do_insn_gettime: data size should be 2\n");
+		return -EINVAL;
+	}
+
+	/* Get a timestamp */
+	ns = a4l_get_time();
+
+	/* Perform the conversion */
+	ns2 = do_div(ns, 1000000000);
+	data[0] = (unsigned int) ns;
+	data[1] = (unsigned int) ns2 / 1000;
+
+	return 0;
+}
+
+int a4l_do_insn_wait(struct a4l_kernel_instruction * dsc)
+{
+	unsigned int us;
+	unsigned int *data = (unsigned int *)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size != sizeof(unsigned int)) {
+		__a4l_err("a4l_do_insn_wait: data size should be 1\n");
+		return -EINVAL;
+	}
+
+	if (data[0] > A4L_INSN_WAIT_MAX) {
+		__a4l_err("a4l_do_insn_wait: wait duration is out of range\n");
+		return -EINVAL;
+	}
+
+	/* As we use (a4l_)udelay, we have to convert the delay into
+	   microseconds */
+	us = data[0] / 1000;
+
+	/* At least, the delay is rounded up to 1 microsecond */
+	if (us == 0)
+		us = 1;
+
+	/* Performs the busy waiting */
+	a4l_udelay(us);
+
+	return 0;
+}
+
+int a4l_do_insn_trig(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	struct a4l_subdevice *subd;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	unsigned int trignum;
+	unsigned int *data = (unsigned int*)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size > 1) {
+		__a4l_err("a4l_do_insn_trig: data size should not be > 1\n");
+		return -EINVAL;
+	}
+
+	trignum = (dsc->data_size == sizeof(unsigned int)) ? data[0] : 0;
+
+	if (dsc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_do_insn_trig: "
+			  "subdevice index is out of range\n");
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[dsc->idx_subd];
+
+	/* Checks that the concerned subdevice is trigger-compliant */
+	if ((subd->flags & A4L_SUBD_CMD) == 0 || subd->trigger == NULL) {
+		__a4l_err("a4l_do_insn_trig: subdevice does not support "
+			  "triggering or asynchronous acquisition\n");
+		return -EINVAL;
+	}
+
+	/* Performs the trigger */
+	return subd->trigger(subd, trignum);
+}
+
+int a4l_fill_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	void *tmp_data = NULL;
+
+	ret = rtdm_safe_copy_from_user(fd,
+				       dsc, arg, sizeof(a4l_insn_t));
+	if (ret != 0)
+		goto out_insndsc;
+
+	if (dsc->data_size != 0 && dsc->data == NULL) {
+		__a4l_err("a4l_fill_insndsc: no data pointer specified\n");
+		ret = -EINVAL;
+		goto out_insndsc;
+	}
+
+	if (dsc->data_size != 0 && dsc->data != NULL) {
+		tmp_data = rtdm_malloc(dsc->data_size);
+		if (tmp_data == NULL) {
+			ret = -ENOMEM;
+			goto out_insndsc;
+		}
+
+		if ((dsc->type & A4L_INSN_MASK_WRITE) != 0) {
+			ret = rtdm_safe_copy_from_user(fd,
+						       tmp_data, dsc->data,
+						       dsc->data_size);
+			if (ret < 0)
+				goto out_insndsc;
+		}
+	}
+
+	dsc->__udata = dsc->data;
+	dsc->data = tmp_data;
+
+out_insndsc:
+
+	if (ret != 0 && tmp_data != NULL)
+		rtdm_free(tmp_data);
+
+	return ret;
+}
+
+int a4l_free_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+
+	if ((dsc->type & A4L_INSN_MASK_READ) != 0)
+		ret = rtdm_safe_copy_to_user(fd,
+					     dsc->__udata,
+					     dsc->data, dsc->data_size);
+
+	if (dsc->data != NULL)
+		rtdm_free(dsc->data);
+
+	return ret;
+}
+
+int a4l_do_special_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	int ret = 0;
+
+	switch (dsc->type) {
+	case A4L_INSN_GTOD:
+		ret = a4l_do_insn_gettime(dsc);
+		break;
+	case A4L_INSN_WAIT:
+		ret = a4l_do_insn_wait(dsc);
+		break;
+	case A4L_INSN_INTTRIG:
+		ret = a4l_do_insn_trig(cxt, dsc);
+		break;
+	default:
+		__a4l_err("a4l_do_special_insn: "
+			  "incoherent instruction code\n");
+		return -EINVAL;
+	}
+
+	if (ret < 0)
+		__a4l_err("a4l_do_special_insn: "
+			  "execution of the instruction failed (err=%d)\n",
+			  ret);
+
+	return ret;
+}
+
+int a4l_do_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	int ret = 0;
+	struct a4l_subdevice *subd;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	int (*hdlr) (struct a4l_subdevice *, struct a4l_kernel_instruction *) = NULL;
+
+	/* Checks the subdevice index */
+	if (dsc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_do_insn: "
+			  "subdevice index out of range (idx=%d)\n",
+			  dsc->idx_subd);
+		return -EINVAL;
+	}
+
+	/* Recovers pointers on the proper subdevice */
+	subd = dev->transfer.subds[dsc->idx_subd];
+
+	/* Checks the subdevice's characteristics */
+	if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) {
+		__a4l_err("a4l_do_insn: wrong subdevice selected\n");
+		return -EINVAL;
+	}
+
+	/* Checks the channel descriptor */
+	if ((subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_CALIB) {
+		ret = a4l_check_chanlist(dev->transfer.subds[dsc->idx_subd],
+					 1, &dsc->chan_desc);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Choose the proper handler, we can check the pointer because
+	   the subdevice was memset to 0 at allocation time */
+	switch (dsc->type) {
+	case A4L_INSN_READ:
+		hdlr = subd->insn_read;
+		break;
+	case A4L_INSN_WRITE:
+		hdlr = subd->insn_write;
+		break;
+	case A4L_INSN_BITS:
+		hdlr = subd->insn_bits;
+		break;
+	case A4L_INSN_CONFIG:
+		hdlr = subd->insn_config;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	/* We check the instruction type */
+	if (ret < 0)
+		return ret;
+
+	/* We check whether a handler is available */
+	if (hdlr == NULL)
+		return -ENOSYS;
+
+	/* Prevents the subdevice from being used during
+	   the following operations */
+	if (test_and_set_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		ret = -EBUSY;
+		goto out_do_insn;
+	}
+
+	/* Let's the driver-specific code perform the instruction */
+	ret = hdlr(subd, dsc);
+
+	if (ret < 0)
+		__a4l_err("a4l_do_insn: "
+			  "execution of the instruction failed (err=%d)\n",
+			  ret);
+
+out_do_insn:
+
+	/* Releases the subdevice from its reserved state */
+	clear_bit(A4L_SUBD_BUSY_NR, &subd->status);
+
+	return ret;
+}
+
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	struct a4l_kernel_instruction insn;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_insn: unattached device\n");
+		return -EINVAL;
+	}
+
+	/* Recovers the instruction descriptor */
+	ret = a4l_fill_insndsc(cxt, &insn, arg);
+	if (ret != 0)
+		goto err_ioctl_insn;
+
+	/* Performs the instruction */
+	if ((insn.type & A4L_INSN_MASK_SPECIAL) != 0)
+		ret = a4l_do_special_insn(cxt, &insn);
+	else
+		ret = a4l_do_insn(cxt, &insn);
+
+	if (ret < 0)
+		goto err_ioctl_insn;
+
+	/* Frees the used memory and sends back some
+	   data, if need be */
+	ret = a4l_free_insndsc(cxt, &insn);
+
+	return ret;
+
+err_ioctl_insn:
+	a4l_free_insndsc(cxt, &insn);
+	return ret;
+}
+
+int a4l_fill_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+
+	dsc->insns = NULL;
+
+	/* Recovers the structure from user space */
+	ret = rtdm_safe_copy_from_user(fd,
+				       dsc, arg, sizeof(a4l_insnlst_t));
+	if (ret < 0)
+		return ret;
+
+	/* Some basic checking */
+	if (dsc->count == 0) {
+		__a4l_err("a4l_fill_ilstdsc: instruction list's count is 0\n");
+		return -EINVAL;
+	}
+
+	/* Keeps the user pointer in an opaque field */
+	dsc->__uinsns = (a4l_insn_t *)dsc->insns;
+
+	dsc->insns = rtdm_malloc(dsc->count * sizeof(struct a4l_kernel_instruction));
+	if (dsc->insns == NULL)
+		return -ENOMEM;
+
+	/* Recovers the instructions, one by one. This part is not
+	   optimized */
+	for (i = 0; i < dsc->count && ret == 0; i++)
+		ret = a4l_fill_insndsc(cxt,
+				       &(dsc->insns[i]),
+				       &(dsc->__uinsns[i]));
+
+	/* In case of error, frees the allocated memory */
+	if (ret < 0 && dsc->insns != NULL)
+		rtdm_free(dsc->insns);
+
+	return ret;
+}
+
+int a4l_free_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc)
+{
+	int i, ret = 0;
+
+	if (dsc->insns != NULL) {
+
+		for (i = 0; i < dsc->count && ret == 0; i++)
+			ret = a4l_free_insndsc(cxt, &(dsc->insns[i]));
+
+		while (i < dsc->count) {
+			a4l_free_insndsc(cxt, &(dsc->insns[i]));
+			i++;
+		}
+
+		rtdm_free(dsc->insns);
+	}
+
+	return ret;
+}
+
+/* This function is not optimized in terms of memory footprint and
+   CPU charge; however, the whole analogy instruction system was not
+   designed for performance issues */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	struct a4l_kernel_instruction_list ilst;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_insnlist: unattached device\n");
+		return -EINVAL;
+	}
+
+	if ((ret = a4l_fill_ilstdsc(cxt, &ilst, arg)) < 0)
+		return ret;
+
+	/* Performs the instructions */
+	for (i = 0; i < ilst.count && ret == 0; i++) {
+		if ((ilst.insns[i].type & A4L_INSN_MASK_SPECIAL) != 0)
+			ret = a4l_do_special_insn(cxt, &ilst.insns[i]);
+		else
+			ret = a4l_do_insn(cxt, &ilst.insns[i]);
+	}
+
+	if (ret < 0)
+		goto err_ioctl_ilst;
+
+	return a4l_free_ilstdsc(cxt, &ilst);
+
+err_ioctl_ilst:
+	a4l_free_ilstdsc(cxt, &ilst);
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/driver_facilities.c	2022-03-21 12:58:30.983873418 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/rtdm_helpers.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <rtdm/analogy/device.h>
+
+/**
+ * @ingroup cobalt
+ * @defgroup analogy Analogy framework
+ * A RTDM-based interface for implementing DAQ card drivers
+ */
+
+/**
+ * @ingroup analogy
+ * @defgroup analogy_driver_facilities Driver API
+ * Programming interface provided to DAQ card drivers
+ */
+
+/* --- Driver section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_driver Driver management services
+ *
+ * Analogy driver registration / unregistration
+ *
+ * In a common Linux char driver, the developer has to register a fops
+ * structure filled with callbacks for read / write / mmap / ioctl
+ * operations.
+ *
+ * Analogy drivers do not have to implement read / write / mmap /
+ * ioctl functions, these procedures are implemented in the Analogy
+ * generic layer. Then, the transfers between user-space and
+ * kernel-space are already managed. Analogy drivers work with commands
+ * and instructions which are some kind of more dedicated read / write
+ * operations. And, instead of registering a fops structure, a Analogy
+ * driver must register some a4l_driver structure.
+ *
+ * @{
+ */
+
+/**
+ * @brief Register an Analogy driver
+ *
+ * After initialising a driver structure, the driver must be made
+ * available so as to be attached.
+ *
+ * @param[in] drv Driver descriptor structure
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_register_drv(struct a4l_driver * drv);
+EXPORT_SYMBOL_GPL(a4l_register_drv);
+
+/**
+ * @brief Unregister an Analogy driver
+ *
+ * This function removes the driver descriptor from the Analogy driver
+ * list. The driver cannot be attached anymore.
+ *
+ * @param[in] drv Driver descriptor structure
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_unregister_drv(struct a4l_driver * drv);
+EXPORT_SYMBOL_GPL(a4l_unregister_drv);
+
+/** @} */
+
+/* --- Subdevice section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_subdevice Subdevice management services
+ *
+ * Subdevice declaration in a driver
+ *
+ * The subdevice structure is the most complex one in the Analogy
+ * driver layer. It contains some description fields to fill and some
+ * callbacks to declare.
+ *
+ * The description fields are:
+ * - flags: to define the subdevice type and its capabilities;
+ * - chan_desc: to describe the channels which compose the subdevice;
+ * - rng_desc: to declare the usable ranges;
+ *
+ * The functions callbakcs are:
+ * - do_cmd() and do_cmdtest(): to performe asynchronous acquisitions
+ *   thanks to commands;
+ * - cancel(): to abort a working asynchronous acquisition;
+ * - munge(): to apply modifications on the data freshly acquired
+ *   during an asynchronous transfer. Warning: using this feature with
+ *   can significantly reduce the performances (if the munge operation
+ *   is complex, it will trigger high CPU charge and if the
+ *   acquisition device is DMA capable, many cache-misses and
+ *   cache-replaces will occur (the benefits of the DMA controller
+ *   will vanish);
+ * - trigger(): optionnaly to launch an asynchronous acquisition;
+ * - insn_read(), insn_write(), insn_bits(), insn_config(): to perform
+ *   synchronous acquisition operations.
+ *
+ * Once the subdevice is filled, it must be inserted into the driver
+ * structure thanks to a4l_add_subd().
+ *
+ * @{
+ */
+
+EXPORT_SYMBOL_GPL(a4l_range_bipolar10);
+EXPORT_SYMBOL_GPL(a4l_range_bipolar5);
+EXPORT_SYMBOL_GPL(a4l_range_unipolar10);
+EXPORT_SYMBOL_GPL(a4l_range_unipolar5);
+EXPORT_SYMBOL_GPL(a4l_range_unknown);
+EXPORT_SYMBOL_GPL(a4l_range_fake);
+
+/**
+ * @brief Allocate a subdevice descriptor
+ *
+ * This is a helper function so as to get a suitable subdevice
+ * descriptor
+ *
+ * @param[in] sizeof_priv Size of the subdevice's private data
+ * @param[in] setup Setup function to be called after the allocation
+ *
+ * @return the index with which the subdevice has been registered, in
+ * case of error a negative error code is returned.
+ *
+ */
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+				  void (*setup)(struct a4l_subdevice *));
+EXPORT_SYMBOL_GPL(a4l_alloc_subd);
+
+/**
+ * @brief Add a subdevice to the driver descriptor
+ *
+ * Once the driver descriptor structure is initialized, the function
+ * a4l_add_subd() must be used so to add some subdevices to the
+ * driver.
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the index with which the subdevice has been registered, in
+ * case of error a negative error code is returned.
+ *
+ */
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_add_subd);
+
+/**
+ * @brief Get a pointer to the subdevice descriptor referenced by its
+ * registration index
+ *
+ * This function is scarcely useful as all the drivers callbacks get
+ * the related subdevice descriptor as first argument.
+ * This function is not optimized, it goes through a linked list to
+ * get the proper pointer. So it must not be used in real-time context
+ * but at initialization / cleanup time (attach / detach).
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] idx Subdevice index
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx);
+EXPORT_SYMBOL_GPL(a4l_get_subd);
+
+/** @} */
+
+/* --- Buffer section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_buffer Buffer management services
+ *
+ * Buffer management services
+ *
+ * The buffer is the key component of the Analogy infrastructure. It
+ * manages transfers between the user-space and the Analogy drivers
+ * thanks to generic functions which are described hereafter. Thanks
+ * to the buffer subsystem, the driver developer does not have to care
+ * about the way the user program retrieves or sends data.
+ *
+ * To write a classical char driver, the developer has to fill a fops
+ * structure so as to provide transfer operations to the user program
+ * (read, write, ioctl and mmap if need be).
+ *
+ * The Analogy infrastructure manages the whole interface with the
+ * userspace; the common read, write, mmap, etc. callbacks are generic
+ * Analogy functions. These functions manage (and perform, if need be)
+ * tranfers between the user-space and an asynchronous buffer thanks
+ * to lockless mechanisms.
+ *
+ * Consequently, the developer has to use the proper buffer functions
+ * in order to write / read acquired data into / from the asynchronous
+ * buffer.
+ *
+ * Here are listed the functions:
+ * - a4l_buf_prepare_(abs)put() and a4l_buf_commit_(abs)put()
+ * - a4l_buf_prepare_(abs)get() and a4l_buf_commit_(abs)get()
+ * - a4l_buf_put()
+ * - a4l_buf_get()
+ * - a4l_buf_evt().
+ *
+ * The functions count might seem high; however, the developer needs a
+ * few of them to write a driver. Having so many functions enables to
+ * manage any transfer cases:
+ * - If some DMA controller is available, there is no need to make the
+ *   driver copy the acquired data into the asynchronous buffer, the
+ *   DMA controller must directly trigger DMA shots into / from the
+ *   buffer. In that case, a function a4l_buf_prepare_*() must be used
+ *   so as to set up the DMA transfer and a function
+ *   a4l_buf_commit_*() has to be called to complete the transfer().
+ * - For DMA controllers which need to work with global counter (the
+ *   transfered data count since the beginning of the acquisition),
+ *   the functions a4l_buf_*_abs_*() have been made available.
+ * - If no DMA controller is available, the driver has to perform the
+ *   copy between the hardware component and the asynchronous
+ *   buffer. In such cases, the functions a4l_buf_get() and
+ *   a4l_buf_put() are useful.
+ *
+ * @{
+ */
+
+/**
+ * @brief Update the absolute count of data sent from the device to
+ * the buffer since the start of the acquisition and after the next
+ * DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(absg)et() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However, some
+ * pointers still have to be updated so as to monitor the tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred during the next
+ * DMA shot plus the data count which have been copied since the start
+ * of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_absput);
+
+/**
+ * @brief Set the absolute count of data which was sent from the
+ * device to the buffer since the start of the acquisition and until
+ * the last DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count transferred to the buffer during
+ * the last DMA shot plus the data count which have been sent /
+ * retrieved since the beginning of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_absput);
+
+/**
+ * @brief Set the count of data which is to be sent to the buffer at
+ * the next DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_put);
+
+/**
+ * @brief Set the count of data sent to the buffer during the last
+ * completed DMA shots
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The amount of data transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_put);
+
+/**
+ * @brief Copy some data from the device driver to the buffer
+ *
+ * The function a4l_buf_put() must copy data coming from some
+ * acquisition device to the Analogy buffer. This ring-buffer is an
+ * intermediate area between the device driver and the user-space
+ * program, which is supposed to recover the acquired data.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] bufdata The data buffer to copy into the Analogy buffer
+ * @param[in] count The amount of data to copy
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_put);
+
+/**
+ * @brief Update the absolute count of data sent from the buffer to
+ * the device since the start of the acquisition and after the next
+ * DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(absg)et() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred during the next
+ * DMA shot plus the data count which have been copied since the start
+ * of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_absget);
+
+/**
+ * @brief Set the absolute count of data which was sent from the
+ * buffer to the device since the start of the acquisition and until
+ * the last DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count transferred to the device during
+ * the last DMA shot plus the data count which have been sent since
+ * the beginning of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_absget);
+
+/**
+ * @brief Set the count of data which is to be sent from the buffer to
+ * the device at the next DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_get);
+
+/**
+ * @brief Set the count of data sent from the buffer to the device
+ * during the last completed DMA shots
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The amount of data transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_get);
+
+/**
+ * @brief Copy some data from the buffer to the device driver
+ *
+ * The function a4l_buf_get() must copy data coming from the Analogy
+ * buffer to some acquisition device. This ring-buffer is an
+ * intermediate area between the device driver and the user-space
+ * program, which is supposed to provide the data to send to the
+ * device.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] bufdata The data buffer to copy into the Analogy buffer
+ * @param[in] count The amount of data to copy
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_get);
+
+/**
+ * @brief Signal some event(s) to a user-space program involved in
+ * some read / write operation
+ *
+ * The function a4l_buf_evt() is useful in many cases:
+ * - To wake-up a process waiting for some data to read.
+ * - To wake-up a process waiting for some data to write.
+ * - To notify the user-process an error has occured during the
+ *   acquistion.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] evts Some specific event to notify:
+ * - A4L_BUF_ERROR to indicate some error has occured during the
+ *   transfer
+ * - A4L_BUF_EOA to indicate the acquisition is complete (this
+ *   event is automatically set, it should not be used).
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+EXPORT_SYMBOL_GPL(a4l_buf_evt);
+
+/**
+ * @brief Get the data amount available in the Analogy buffer
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the amount of data available in the Analogy buffer.
+ *
+ */
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_buf_count);
+
+#ifdef DOXYGEN_CPP		/* Only used for doxygen doc generation */
+
+/**
+ * @brief Get the current Analogy command descriptor
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the command descriptor.
+ *
+ */
+struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice * subd);
+
+#endif /* DOXYGEN_CPP */
+
+/**
+ * @brief Get the channel index according to its type
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the channel index.
+ *
+ */
+int a4l_get_chan(struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_get_chan);
+
+/** @} */
+
+/* --- IRQ handling section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_irq Interrupt management services
+ * @{
+ */
+
+/**
+ * @brief Get the interrupt number in use for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ *
+ * @return the line number used or A4L_IRQ_UNUSED if no interrupt
+ * is registered.
+ *
+ */
+unsigned int a4l_get_irq(struct a4l_device * dev);
+EXPORT_SYMBOL_GPL(a4l_get_irq);
+
+/**
+ * @brief Register an interrupt handler for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] irq Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags:
+ * - RTDM_IRQTYPE_SHARED: enable IRQ-sharing with other drivers
+ *   (Warning: real-time drivers and non-real-time drivers cannot
+ *   share an interrupt line).
+ * - RTDM_IRQTYPE_EDGE: mark IRQ as edge-triggered (Warning: this flag
+ *   is meaningless in RTDM-less context).
+ * - A4L_IRQ_DISABLED: keep IRQ disabled when calling the action
+ *   handler (Warning: this flag is ignored in RTDM-enabled
+ *   configuration).
+ * @param[in] cookie Pointer to be passed to the interrupt handler on
+ * invocation
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_request_irq(struct a4l_device * dev,
+		       unsigned int irq,
+		       a4l_irq_hdlr_t handler,
+		       unsigned long flags, void *cookie);
+EXPORT_SYMBOL_GPL(a4l_request_irq);
+
+/**
+ * @brief Release an interrupt handler for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] irq Line number of the addressed IRQ
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_free_irq(struct a4l_device * dev, unsigned int irq);
+EXPORT_SYMBOL_GPL(a4l_free_irq);
+
+/** @} */
+
+/* --- Misc section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_misc Misc services
+ * @{
+ */
+
+/**
+ * @brief Get the absolute time in nanoseconds
+ *
+ * @return the absolute time expressed in nanoseconds
+ *
+ */
+unsigned long long a4l_get_time(void);
+EXPORT_SYMBOL_GPL(a4l_get_time);
+
+/** @} */
+++ linux-patched/drivers/xenomai/analogy/rtdm_helpers.c	2022-03-21 12:58:30.975873496 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/buffer.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, RTDM helpers
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <asm/atomic.h>
+
+#include <rtdm/analogy/rtdm_helpers.h>
+
+/* --- Time section --- */
+
+static nanosecs_abs_t a4l_clkofs;
+
+void a4l_init_time(void)
+{
+	nanosecs_abs_t t1, t2;
+	t1 = rtdm_clock_read();
+	t2 = ktime_to_ns(ktime_get_real());
+	a4l_clkofs = t2 - t1;
+}
+
+nanosecs_abs_t a4l_get_time(void)
+{
+	return a4l_clkofs + rtdm_clock_read();
+}
+
+/* --- IRQ section --- */
+
+static int a4l_handle_irq(rtdm_irq_t *irq_handle)
+{
+	struct a4l_irq_descriptor *dsc =
+		rtdm_irq_get_arg(irq_handle, struct a4l_irq_descriptor);
+
+	if (dsc->handler((unsigned int)irq_handle->irq, dsc->cookie) == 0)
+		return RTDM_IRQ_HANDLED;
+	else
+		return RTDM_IRQ_NONE;
+}
+
+int __a4l_request_irq(struct a4l_irq_descriptor *dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie)
+{
+	/* Fills the IRQ descriptor */
+	dsc->handler = handler;
+	dsc->cookie = cookie;
+	dsc->irq = irq;
+
+	/* Registers the RT IRQ handler */
+	return rtdm_irq_request(&dsc->rtdm_desc,
+				(int)irq,
+				a4l_handle_irq, flags, "Analogy device", dsc);
+}
+
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc)
+{
+	return rtdm_irq_free(&dsc->rtdm_desc);
+}
+
+/* --- Synchronization section --- */
+
+static void a4l_nrt_sync_handler(rtdm_nrtsig_t *nrt_sig, void *arg)
+{
+	struct a4l_sync *snc = (struct a4l_sync *) arg;
+	wake_up_interruptible(&snc->wq);
+}
+
+int a4l_init_sync(struct a4l_sync *snc)
+{
+	int ret = 0;
+
+	/* Initializes the flags field */
+	snc->status = 0;
+
+	/* If the process is NRT, we need a wait queue structure */
+	init_waitqueue_head(&snc->wq);
+
+	/* Initializes the RTDM event */
+	rtdm_event_init(&snc->rtdm_evt, 0);
+
+	/* Initializes the gateway to NRT context */
+	rtdm_nrtsig_init(&snc->nrt_sig, a4l_nrt_sync_handler, snc);
+
+	return ret;
+}
+
+void a4l_cleanup_sync(struct a4l_sync *snc)
+{
+	rtdm_nrtsig_destroy(&snc->nrt_sig);
+	rtdm_event_destroy(&snc->rtdm_evt);
+}
+
+int a4l_wait_sync(struct a4l_sync *snc, int rt)
+{
+	int ret = 0;
+
+	if (test_bit(__EVT_PDING, &snc->status))
+		goto out_wait;
+
+	if (rt != 0) {
+		/* If the calling process is in primary mode,
+		   we can use RTDM API ... */
+		set_bit(__RT_WAITER, &snc->status);
+		ret = rtdm_event_wait(&snc->rtdm_evt);
+	} else {
+		/* ... else if the process is NRT,
+		   the Linux wait queue system is used */
+		set_bit(__NRT_WAITER, &snc->status);
+		ret = wait_event_interruptible(snc->wq,
+					       test_bit(__EVT_PDING,
+							&snc->status));
+	}
+
+out_wait:
+
+	clear_bit(__EVT_PDING, &snc->status);
+
+	return ret;
+}
+
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout)
+{
+	int ret = 0;
+	unsigned long timeout;
+
+	if (test_bit(__EVT_PDING, &snc->status))
+		goto out_wait;
+
+	if (rt != 0) {
+		/* If the calling process is in primary mode,
+		   we can use RTDM API ... */
+		set_bit(__RT_WAITER, &snc->status);
+		ret = rtdm_event_timedwait(&snc->rtdm_evt, ns_timeout, NULL);
+	} else {
+		/* ... else if the process is NRT,
+		   the Linux wait queue system is used */
+
+		timeout = do_div(ns_timeout, 1000);
+
+		/* We consider the Linux kernel cannot tick at a frequency
+		   higher than 1 MHz
+		   If the timeout value is lower than 1us, we round up to 1us */
+		timeout = (timeout == 0) ? 1 : usecs_to_jiffies(timeout);
+
+		set_bit(__NRT_WAITER, &snc->status);
+
+		ret = wait_event_interruptible_timeout(snc->wq,
+						       test_bit(__EVT_PDING,
+								&snc->status),
+						       timeout);
+	}
+
+out_wait:
+
+	clear_bit(__EVT_PDING, &snc->status);
+
+	return ret;
+}
+
+void a4l_flush_sync(struct a4l_sync * snc)
+{
+	/* Clear the status bitfield */
+	snc->status = 0;
+
+	/* Flush the RTDM event */
+	rtdm_event_clear(&snc->rtdm_evt);
+}
+
+void a4l_signal_sync(struct a4l_sync * snc)
+{
+	int hit = 0;
+
+	set_bit(__EVT_PDING, &snc->status);
+
+	/* a4l_signal_sync() is bound not to be called upon the right
+	   user process context; so, the status flags stores its mode.
+	   Thus the proper event signaling function is called */
+	if (test_and_clear_bit(__RT_WAITER, &snc->status)) {
+		rtdm_event_signal(&snc->rtdm_evt);
+		hit++;
+	}
+
+	if (test_and_clear_bit(__NRT_WAITER, &snc->status)) {
+		rtdm_nrtsig_pend(&snc->nrt_sig);
+		hit++;
+	}
+
+	if (hit == 0) {
+		/* At first signaling, we may not know the proper way
+		   to send the event */
+		rtdm_event_signal(&snc->rtdm_evt);
+		rtdm_nrtsig_pend(&snc->nrt_sig);
+	}
+}
+++ linux-patched/drivers/xenomai/analogy/buffer.c	2022-03-21 12:58:30.968873564 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/rtdm_interface.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/vmalloc.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Initialization functions (init, alloc, free) --- */
+
+/* The buffer charactistic is very close to the Comedi one: it is
+   allocated with vmalloc() and all physical addresses of the pages which
+   compose the virtual buffer are hold in a table */
+
+void a4l_free_buffer(struct a4l_buffer * buf_desc)
+{
+	__a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf);
+
+	if (buf_desc->pg_list != NULL) {
+		rtdm_free(buf_desc->pg_list);
+		buf_desc->pg_list = NULL;
+	}
+
+	if (buf_desc->buf != NULL) {
+		char *vaddr, *vabase = buf_desc->buf;
+		for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+		     vaddr += PAGE_SIZE)
+			ClearPageReserved(vmalloc_to_page(vaddr));
+		vfree(buf_desc->buf);
+		buf_desc->buf = NULL;
+	}
+}
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size)
+{
+	int ret = 0;
+	char *vaddr, *vabase;
+
+	buf_desc->size = buf_size;
+	buf_desc->size = PAGE_ALIGN(buf_desc->size);
+
+	buf_desc->buf = vmalloc_32(buf_desc->size);
+	if (buf_desc->buf == NULL) {
+		ret = -ENOMEM;
+		goto out_virt_contig_alloc;
+	}
+
+	vabase = buf_desc->buf;
+
+	for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+	     vaddr += PAGE_SIZE)
+		SetPageReserved(vmalloc_to_page(vaddr));
+
+	buf_desc->pg_list = rtdm_malloc(((buf_desc->size) >> PAGE_SHIFT) *
+					sizeof(unsigned long));
+	if (buf_desc->pg_list == NULL) {
+		ret = -ENOMEM;
+		goto out_virt_contig_alloc;
+	}
+
+	for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+	     vaddr += PAGE_SIZE)
+		buf_desc->pg_list[(vaddr - vabase) >> PAGE_SHIFT] =
+			(unsigned long) page_to_phys(vmalloc_to_page(vaddr));
+
+	__a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf);
+
+out_virt_contig_alloc:
+	if (ret != 0)
+		a4l_free_buffer(buf_desc);
+
+	return ret;
+}
+
+static void a4l_reinit_buffer(struct a4l_buffer *buf_desc)
+{
+	/* No command to process yet */
+	buf_desc->cur_cmd = NULL;
+
+	/* No more (or not yet) linked with a subdevice */
+	buf_desc->subd = NULL;
+
+	/* Initializes counts and flags */
+	buf_desc->end_count = 0;
+	buf_desc->prd_count = 0;
+	buf_desc->cns_count = 0;
+	buf_desc->tmp_count = 0;
+	buf_desc->mng_count = 0;
+
+	/* Flush pending events */
+	buf_desc->flags = 0;
+	a4l_flush_sync(&buf_desc->sync);
+}
+
+void a4l_init_buffer(struct a4l_buffer *buf_desc)
+{
+	memset(buf_desc, 0, sizeof(struct a4l_buffer));
+	a4l_init_sync(&buf_desc->sync);
+	a4l_reinit_buffer(buf_desc);
+}
+
+void a4l_cleanup_buffer(struct a4l_buffer *buf_desc)
+{
+	a4l_cleanup_sync(&buf_desc->sync);
+}
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_buffer *buf_desc = cxt->buffer;
+	int i;
+
+	/* Retrieve the related subdevice */
+	buf_desc->subd = a4l_get_subd(cxt->dev, cmd->idx_subd);
+	if (buf_desc->subd == NULL) {
+		__a4l_err("a4l_setup_buffer: subdevice index "
+			  "out of range (%d)\n", cmd->idx_subd);
+		return -EINVAL;
+	}
+
+	if (test_and_set_bit(A4L_SUBD_BUSY_NR, &buf_desc->subd->status)) {
+		__a4l_err("a4l_setup_buffer: subdevice %d already busy\n",
+			  cmd->idx_subd);
+		return -EBUSY;
+	}
+
+	/* Checks if the transfer system has to work in bulk mode */
+	if (cmd->flags & A4L_CMD_BULK)
+		set_bit(A4L_BUF_BULK_NR, &buf_desc->flags);
+
+	/* Sets the working command */
+	buf_desc->cur_cmd = cmd;
+
+	/* Link the subdevice with the context's buffer */
+	buf_desc->subd->buf = buf_desc;
+
+	/* Computes the count to reach, if need be */
+	if (cmd->stop_src == TRIG_COUNT) {
+		for (i = 0; i < cmd->nb_chan; i++) {
+			struct a4l_channel *chft;
+			chft = a4l_get_chfeat(buf_desc->subd,
+					      CR_CHAN(cmd->chan_descs[i]));
+			buf_desc->end_count += chft->nb_bits / 8;
+		}
+		buf_desc->end_count *= cmd->stop_arg;
+	}
+
+	__a4l_dbg(1, core_dbg, "end_count=%lu\n", buf_desc->end_count);
+
+	return 0;
+}
+
+void a4l_cancel_buffer(struct a4l_device_context *cxt)
+{
+	struct a4l_buffer *buf_desc = cxt->buffer;
+	struct a4l_subdevice *subd = buf_desc->subd;
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return;
+
+	/* If a "cancel" function is registered, call it
+	   (Note: this function is called before having checked
+	   if a command is under progress; we consider that
+	   the "cancel" function can be used as as to (re)initialize
+	   some component) */
+	if (subd->cancel != NULL)
+		subd->cancel(subd);
+
+	if (buf_desc->cur_cmd != NULL) {
+		a4l_free_cmddesc(buf_desc->cur_cmd);
+		rtdm_free(buf_desc->cur_cmd);
+		buf_desc->cur_cmd = NULL;
+	}
+
+	a4l_reinit_buffer(buf_desc);
+
+	clear_bit(A4L_SUBD_BUSY_NR, &subd->status);
+	subd->buf = NULL;
+}
+
+/* --- Munge related function --- */
+
+int a4l_get_chan(struct a4l_subdevice *subd)
+{
+	int i, j, tmp_count, tmp_size = 0;
+	struct a4l_cmd_desc *cmd;
+
+	cmd = a4l_get_cmd(subd);
+	if (!cmd)
+		return -EINVAL;
+
+	/* There is no need to check the channel idx,
+	   it has already been controlled in command_test */
+
+	/* We assume channels can have different sizes;
+	   so, we have to compute the global size of the channels
+	   in this command... */
+	for (i = 0; i < cmd->nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ?
+			CR_CHAN(cmd->chan_descs[i]) : 0;
+		tmp_size += subd->chan_desc->chans[j].nb_bits;
+	}
+
+	/* Translation bits -> bytes */
+	tmp_size /= 8;
+
+	tmp_count = subd->buf->mng_count % tmp_size;
+
+	/* Translation bytes -> bits */
+	tmp_count *= 8;
+
+	/* ...and find the channel the last munged sample
+	   was related with */
+	for (i = 0; tmp_count > 0 && i < cmd->nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ?
+			CR_CHAN(cmd->chan_descs[i]) : 0;
+		tmp_count -= subd->chan_desc->chans[j].nb_bits;
+	}
+
+	if (tmp_count == 0)
+		return i;
+	else
+		return -EINVAL;
+}
+
+/* --- Transfer / copy functions --- */
+
+/* The following functions are explained in the Doxygen section
+   "Buffer management services" in driver_facilities.c */
+
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __pre_abs_put(buf, count);
+}
+
+
+int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __abs_put(buf, count);
+}
+
+int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __pre_put(buf, count);
+}
+
+int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __put(buf, count);
+}
+
+int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+	int err;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	if (__count_to_put(buf) < count)
+		return -EAGAIN;
+
+	err = __produce(NULL, buf, bufdata, count);
+	if (err < 0)
+		return err;
+
+	err = __put(buf, count);
+
+	return err;
+}
+
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __pre_abs_get(buf, count);
+}
+
+int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __abs_get(buf, count);
+}
+
+int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __pre_get(buf, count);
+}
+
+int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	/* Basic checkings */
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __get(buf, count);
+}
+
+int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+	int err;
+
+	/* Basic checkings */
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	if (__count_to_get(buf) < count)
+		return -EAGAIN;
+
+	/* Update the counter */
+	err = __consume(NULL, buf, bufdata, count);
+	if (err < 0)
+		return err;
+
+	/* Perform the transfer */
+	err = __get(buf, count);
+
+	return err;
+}
+
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts)
+{
+	struct a4l_buffer *buf = subd->buf;
+	int tmp;
+	unsigned long wake = 0, count = ULONG_MAX;
+
+	/* Warning: here, there may be a condition race : the cancel
+	   function is called by the user side and a4l_buf_evt and all
+	   the a4l_buf_... functions are called by the kernel
+	   side. Nonetheless, the driver should be in charge of such
+	   race conditions, not the framework */
+
+	/* Basic checking */
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	/* Here we save the data count available for the user side */
+	if (evts == 0) {
+		count = a4l_subd_is_input(subd) ?
+			__count_to_get(buf) : __count_to_put(buf);
+		wake = __count_to_end(buf) < buf->wake_count ?
+			__count_to_end(buf) : buf->wake_count;
+	} else {
+		/* Even if it is a little more complex, atomic
+		   operations are used so as to prevent any kind of
+		   corner case */
+		while ((tmp = ffs(evts) - 1) != -1) {
+			set_bit(tmp, &buf->flags);
+			clear_bit(tmp, &evts);
+		}
+	}
+
+	if (count >= wake)
+		/* Notify the user-space side */
+		a4l_signal_sync(&buf->sync);
+
+	return 0;
+}
+
+unsigned long a4l_buf_count(struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned long ret = 0;
+
+	/* Basic checking */
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (a4l_subd_is_input(subd))
+		ret = __count_to_put(buf);
+	else if (a4l_subd_is_output(subd))
+		ret = __count_to_get(buf);
+
+	return ret;
+}
+
+/* --- Mmap functions --- */
+
+void a4l_map(struct vm_area_struct *area)
+{
+	unsigned long *status = (unsigned long *)area->vm_private_data;
+	set_bit(A4L_BUF_MAP_NR, status);
+}
+
+void a4l_unmap(struct vm_area_struct *area)
+{
+	unsigned long *status = (unsigned long *)area->vm_private_data;
+	clear_bit(A4L_BUF_MAP_NR, status);
+}
+
+static struct vm_operations_struct a4l_vm_ops = {
+	.open = a4l_map,
+	.close = a4l_unmap,
+};
+
+int a4l_ioctl_mmap(struct a4l_device_context *cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	a4l_mmap_t map_cfg;
+	struct a4l_device *dev;
+	struct a4l_buffer *buf;
+	int ret;
+
+	/* The mmap operation cannot be performed in a
+	   real-time context */
+	if (rtdm_in_rt_context()) {
+		return -ENOSYS;
+	}
+
+	dev = a4l_get_dev(cxt);
+	buf = cxt->buffer;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_mmap: cannot mmap on "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	if (test_bit(A4L_BUF_MAP_NR, &buf->flags)) {
+		__a4l_err("a4l_ioctl_mmap: buffer already mapped\n");
+		return -EBUSY;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &map_cfg, arg, sizeof(a4l_mmap_t)) != 0)
+		return -EFAULT;
+
+	/* Check the size to be mapped */
+	if ((map_cfg.size & ~(PAGE_MASK)) != 0 || map_cfg.size > buf->size)
+		return -EFAULT;
+
+	/* All the magic is here */
+	ret = rtdm_mmap_to_user(fd,
+				buf->buf,
+				map_cfg.size,
+				PROT_READ | PROT_WRITE,
+				&map_cfg.ptr, &a4l_vm_ops, &buf->flags);
+
+	if (ret < 0) {
+		__a4l_err("a4l_ioctl_mmap: internal error, "
+			  "rtdm_mmap_to_user failed (err=%d)\n", ret);
+		return ret;
+	}
+
+	return rtdm_safe_copy_to_user(fd,
+				      arg, &map_cfg, sizeof(a4l_mmap_t));
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg)
+{
+	unsigned int idx_subd = (unsigned long)arg;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd;
+
+	/* Basically check the device */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_cancel: operation not supported on "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	if (cxt->buffer->subd == NULL) {
+		__a4l_err("a4l_ioctl_cancel: "
+			  "no acquisition to cancel on this context\n");
+		return -EINVAL;
+	}
+
+	if (idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_cancel: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[idx_subd];
+
+	if (subd != cxt->buffer->subd) {
+		__a4l_err("a4l_ioctl_cancel: "
+			  "current context works on another subdevice "
+			  "(%d!=%d)\n", cxt->buffer->subd->idx, subd->idx);
+		return -EINVAL;
+	}
+
+	a4l_cancel_buffer(cxt);
+	return 0;
+}
+
+/* The ioctl BUFCFG is only useful for changing the size of the
+   asynchronous buffer.
+   (BUFCFG = free of the current buffer + allocation of a new one) */
+
+int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	a4l_bufcfg_t buf_cfg;
+
+	/* As Linux API is used to allocate a virtual buffer,
+	   the calling process must not be in primary mode */
+	if (rtdm_in_rt_context()) {
+		return -ENOSYS;
+	}
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &buf_cfg,
+				     arg, sizeof(a4l_bufcfg_t)) != 0)
+		return -EFAULT;
+
+	if (buf_cfg.buf_size > A4L_BUF_MAXSIZE) {
+		__a4l_err("a4l_ioctl_bufcfg: buffer size too big (<=16MB)\n");
+		return -EINVAL;
+	}
+
+	if (buf_cfg.idx_subd == A4L_BUF_DEFMAGIC) {
+		cxt->dev->transfer.default_bufsize = buf_cfg.buf_size;
+		return 0;
+	}
+
+	if (subd && test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_ioctl_bufcfg: acquisition in progress\n");
+		return -EBUSY;
+	}
+
+	if (test_bit(A4L_BUF_MAP, &buf->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg: please unmap before "
+			  "configuring buffer\n");
+		return -EPERM;
+	}
+
+	/* Free the buffer... */
+	a4l_free_buffer(buf);
+
+	/* ...to reallocate it */
+	return a4l_alloc_buffer(buf, buf_cfg.buf_size);
+}
+
+/* The ioctl BUFCFG2 allows the user space process to define the
+   minimal amount of data which should trigger a wake-up. If the ABI
+   could be broken, this facility would be handled by the original
+   BUFCFG ioctl. At the next major release, this ioctl will vanish. */
+
+int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	a4l_bufcfg2_t buf_cfg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg2: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &buf_cfg,
+				     arg, sizeof(a4l_bufcfg2_t)) != 0)
+		return -EFAULT;
+
+	if (buf_cfg.wake_count > buf->size) {
+		__a4l_err("a4l_ioctl_bufcfg2: "
+			  "wake-up threshold too big (> buffer size: %lu)\n",
+			  buf->size);
+		return -EINVAL;
+	}
+
+	buf->wake_count = buf_cfg.wake_count;
+
+	return 0;
+}
+
+/* The BUFINFO ioctl provides two basic roles:
+   - tell the user app the size of the asynchronous buffer
+   - display the read/write counters (how many bytes to read/write) */
+
+int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	a4l_bufinfo_t info;
+
+	unsigned long tmp_cnt;
+	int ret;
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufinfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &info, arg, sizeof(a4l_bufinfo_t)) != 0)
+		return -EFAULT;
+
+
+	/* If a transfer is not occuring, simply return buffer
+	   informations, otherwise make the transfer progress */
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		info.rw_count = 0;
+		goto a4l_ioctl_bufinfo_out;
+	}
+
+	ret = __handle_event(buf);
+
+	if (a4l_subd_is_input(subd)) {
+
+		/* Updates consume count if rw_count is not null */
+		if (info.rw_count != 0)
+			buf->cns_count += info.rw_count;
+
+		/* Retrieves the data amount to read */
+		tmp_cnt = info.rw_count = __count_to_get(buf);
+
+		__a4l_dbg(1, core_dbg, "count to read=%lu\n", tmp_cnt);
+
+		if ((ret < 0 && ret != -ENOENT) ||
+		    (ret == -ENOENT && tmp_cnt == 0)) {
+			a4l_cancel_buffer(cxt);
+			return ret;
+		}
+	} else if (a4l_subd_is_output(subd)) {
+
+		if (ret < 0) {
+			a4l_cancel_buffer(cxt);
+			if (info.rw_count != 0)
+				return ret;
+		}
+
+		/* If rw_count is not null,
+		   there is something to write / munge  */
+		if (info.rw_count != 0 && info.rw_count <= __count_to_put(buf)) {
+
+			/* Updates the production pointer */
+			buf->prd_count += info.rw_count;
+
+			/* Sets the munge count */
+			tmp_cnt = info.rw_count;
+		} else
+			tmp_cnt = 0;
+
+		/* Retrieves the data amount which is writable */
+		info.rw_count = __count_to_put(buf);
+
+		__a4l_dbg(1, core_dbg, " count to write=%lu\n", info.rw_count);
+
+	} else {
+		__a4l_err("a4l_ioctl_bufinfo: inappropriate subdevice\n");
+		return -EINVAL;
+	}
+
+	/* Performs the munge if need be */
+	if (subd->munge != NULL) {
+
+		/* Call the munge callback */
+		__munge(subd, subd->munge, buf, tmp_cnt);
+
+		/* Updates munge count */
+		buf->mng_count += tmp_cnt;
+	}
+
+a4l_ioctl_bufinfo_out:
+
+	/* Sets the buffer size */
+	info.buf_size = buf->size;
+
+	/* Sends the structure back to user space */
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &info, sizeof(a4l_bufinfo_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/* The ioctl BUFINFO2 tells the user application the minimal amount of
+data which should trigger a wake-up. If the ABI could be broken, this
+facility would be handled by the original BUFINFO ioctl. At the next
+major release, this ioctl will vanish. */
+
+int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	a4l_bufcfg2_t buf_cfg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg2: unattached device\n");
+		return -EINVAL;
+	}
+
+	buf_cfg.wake_count = buf->wake_count;
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &buf_cfg, sizeof(a4l_bufcfg2_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/* The function a4l_read_buffer can be considered as the kernel entry
+   point of the RTDM syscall read. This syscall is supposed to be used
+   only during asynchronous acquisitions */
+ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	ssize_t count = 0;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_read: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_read: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	if (!a4l_subd_is_input(subd)) {
+		__a4l_err("a4l_read: operation requires an input subdevice \n");
+		return -EINVAL;
+	}
+
+	while (count < nbytes) {
+
+		unsigned long tmp_cnt;
+
+		/* Check the events */
+		int ret = __handle_event(buf);
+
+		__dump_buffer_counters(buf);
+
+		/* Compute the data amount to copy */
+		tmp_cnt = __count_to_get(buf);
+
+		/* Check tmp_cnt count is not higher than
+		   the global count to read */
+		if (tmp_cnt > nbytes - count)
+			tmp_cnt = nbytes - count;
+
+		/* We check whether there is an error */
+		if (ret < 0 && ret != -ENOENT) {
+			__a4l_err("a4l_read: failed to handle event %d \n", ret);
+			a4l_cancel_buffer(cxt);
+			count = ret;
+			goto out_a4l_read;
+		}
+
+		/* We check whether the acquisition is over */
+		if (ret == -ENOENT && tmp_cnt == 0) {
+			__a4l_info("a4l_read: acquisition done - all data "
+				   "requested by the client was delivered \n");
+			a4l_cancel_buffer(cxt);
+			count = 0;
+			goto out_a4l_read;
+		}
+
+		if (tmp_cnt > 0) {
+
+			/* Performs the munge if need be */
+			if (subd->munge != NULL) {
+				__munge(subd, subd->munge, buf, tmp_cnt);
+
+				/* Updates munge count */
+				buf->mng_count += tmp_cnt;
+			}
+
+			/* Performs the copy */
+			ret = __consume(cxt, buf, bufdata + count, tmp_cnt);
+
+			if (ret < 0) {
+				count = ret;
+				goto out_a4l_read;
+			}
+
+			/* Updates consume count */
+			buf->cns_count += tmp_cnt;
+			a4l_dbg(1, core_dbg, dev, "buf->cns_cnt=%ld \n", buf->cns_count);
+
+			/* Updates the return value */
+			count += tmp_cnt;
+
+			/* If the driver does not work in bulk mode,
+			   we must leave this function */
+			if (!test_bit(A4L_BUF_BULK, &buf->flags))
+				goto out_a4l_read;
+		}
+		else {
+			/* If the acquisition is not over, we must not
+			   leave the function without having read a least byte */
+			ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context());
+			if (ret < 0) {
+				if (ret == -ERESTARTSYS)
+					ret = -EINTR;
+				count = ret;
+				goto out_a4l_read;
+			}
+		}
+	}
+
+out_a4l_read:
+
+	return count;
+}
+
+/* The function a4l_write_buffer can be considered as the kernel entry
+   point of the RTDM syscall write. This syscall is supposed to be
+   used only during asynchronous acquisitions */
+ssize_t a4l_write_buffer(struct a4l_device_context *cxt, const void *bufdata, size_t nbytes)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	ssize_t count = 0;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_write: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_write: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	if (!a4l_subd_is_output(subd)) {
+		__a4l_err("a4l_write: operation requires an output subdevice \n");
+		return -EINVAL;
+	}
+
+	while (count < nbytes) {
+
+		unsigned long tmp_cnt;
+
+		/* Check the events */
+		int ret = __handle_event(buf);
+
+		__dump_buffer_counters(buf);
+
+		/* Compute the data amount to copy */
+		tmp_cnt = __count_to_put(buf);
+
+		/* Check tmp_cnt count is not higher than
+		   the global count to write */
+		if (tmp_cnt > nbytes - count)
+			tmp_cnt = nbytes - count;
+
+		if (ret < 0) {
+			count = (ret == -ENOENT) ? -EINVAL : ret;
+			__a4l_err("a4l_write: failed to handle event %d \n", ret);
+			a4l_cancel_buffer(cxt);
+			goto out_a4l_write;
+		}
+
+		if (tmp_cnt > 0) {
+
+
+			/* Performs the copy */
+			ret = __produce(cxt,
+					buf, (void *)bufdata + count, tmp_cnt);
+			if (ret < 0) {
+				count = ret;
+				goto out_a4l_write;
+			}
+
+			/* Performs the munge if need be */
+			if (subd->munge != NULL) {
+				__munge(subd, subd->munge, buf, tmp_cnt);
+
+				/* Updates munge count */
+				buf->mng_count += tmp_cnt;
+			}
+
+			/* Updates produce count */
+			buf->prd_count += tmp_cnt;
+			a4l_dbg(1, core_dbg, dev , "buf->prd_cnt=%ld \n", buf->prd_count);
+
+			/* Updates the return value */
+			count += tmp_cnt;
+
+			/* If the driver does not work in bulk mode,
+			   we must leave this function */
+			if (!test_bit(A4L_BUF_BULK, &buf->flags))
+				goto out_a4l_write;
+		} else {
+			/* The buffer is full, we have to wait for a slot to free */
+			ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context());
+			if (ret < 0) {
+				__a4l_err("a4l_write: failed to wait for free slot (%d)\n", ret);
+				if (ret == -ERESTARTSYS)
+					ret = -EINTR;
+				count = ret;
+				goto out_a4l_write;
+			}
+		}
+	}
+
+out_a4l_write:
+
+	return count;
+}
+
+int a4l_select(struct a4l_device_context *cxt,
+	       rtdm_selector_t *selector,
+	       enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_select: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY, &subd->status)) {
+		__a4l_err("a4l_select: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	/* Check the RTDM select type
+	   (RTDM_SELECTTYPE_EXCEPT is not supported) */
+
+	if(type != RTDM_SELECTTYPE_READ &&
+	   type != RTDM_SELECTTYPE_WRITE) {
+		__a4l_err("a4l_select: wrong select argument\n");
+		return -EINVAL;
+	}
+
+	if (type == RTDM_SELECTTYPE_READ && !a4l_subd_is_input(subd)) {
+		__a4l_err("a4l_select: current context "
+			  "does not work with an input subdevice\n");
+		return -EINVAL;
+	}
+
+	if (type == RTDM_SELECTTYPE_WRITE && !a4l_subd_is_output(subd)) {
+		__a4l_err("a4l_select: current context "
+			  "does not work with an input subdevice\n");
+		return -EINVAL;
+	}
+
+	/* Performs a bind on the Analogy synchronization element */
+	return a4l_select_sync(&(buf->sync), selector, type, fd_index);
+}
+
+int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	unsigned long tmp_cnt = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	a4l_poll_t poll;
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_poll: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_poll: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &poll, arg, sizeof(a4l_poll_t)) != 0)
+		return -EFAULT;
+
+	/* Checks the buffer events */
+	a4l_flush_sync(&buf->sync);
+	ret = __handle_event(buf);
+
+	/* Retrieves the data amount to compute
+	   according to the subdevice type */
+	if (a4l_subd_is_input(subd)) {
+
+		tmp_cnt = __count_to_get(buf);
+
+		/* Check if some error occured */
+		if (ret < 0 && ret != -ENOENT) {
+			a4l_cancel_buffer(cxt);
+			return ret;
+		}
+
+		/* Check whether the acquisition is over */
+		if (ret == -ENOENT && tmp_cnt == 0) {
+			a4l_cancel_buffer(cxt);
+			return 0;
+		}
+	} else {
+
+		/* If some error was detected, cancel the transfer */
+		if (ret < 0) {
+			a4l_cancel_buffer(cxt);
+			return ret;
+		}
+
+		tmp_cnt = __count_to_put(buf);
+	}
+
+	if (poll.arg == A4L_NONBLOCK || tmp_cnt != 0)
+		goto out_poll;
+
+	if (poll.arg == A4L_INFINITE)
+		ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context());
+	else {
+		unsigned long long ns = ((unsigned long long)poll.arg) *
+			((unsigned long long)NSEC_PER_MSEC);
+		ret = a4l_timedwait_sync(&(buf->sync), rtdm_in_rt_context(), ns);
+	}
+
+	if (ret == 0) {
+		/* Retrieves the count once more */
+		if (a4l_subd_is_input(dev->transfer.subds[poll.idx_subd]))
+			tmp_cnt = __count_to_get(buf);
+		else
+			tmp_cnt = __count_to_put(buf);
+	}
+	else
+		return ret;
+
+out_poll:
+
+	poll.arg = tmp_cnt;
+
+	ret = rtdm_safe_copy_to_user(fd,
+				     arg, &poll, sizeof(a4l_poll_t));
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/rtdm_interface.c	2022-03-21 12:58:30.960873642 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/sensoray/s526.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, user interface (open, read, write, ioctl, proc)
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <rtdm/driver.h>
+#include <rtdm/analogy/device.h>
+
+MODULE_AUTHOR("Alexis Berlemont");
+MODULE_DESCRIPTION("Analogy core driver");
+MODULE_LICENSE("GPL");
+
+int (* const a4l_ioctl_functions[]) (struct a4l_device_context *, void *) = {
+	[_IOC_NR(A4L_DEVCFG)] = a4l_ioctl_devcfg,
+	[_IOC_NR(A4L_DEVINFO)] = a4l_ioctl_devinfo,
+	[_IOC_NR(A4L_SUBDINFO)] = a4l_ioctl_subdinfo,
+	[_IOC_NR(A4L_CHANINFO)] = a4l_ioctl_chaninfo,
+	[_IOC_NR(A4L_RNGINFO)] = a4l_ioctl_rnginfo,
+	[_IOC_NR(A4L_CMD)] = a4l_ioctl_cmd,
+	[_IOC_NR(A4L_CANCEL)] = a4l_ioctl_cancel,
+	[_IOC_NR(A4L_INSNLIST)] = a4l_ioctl_insnlist,
+	[_IOC_NR(A4L_INSN)] = a4l_ioctl_insn,
+	[_IOC_NR(A4L_BUFCFG)] = a4l_ioctl_bufcfg,
+	[_IOC_NR(A4L_BUFINFO)] = a4l_ioctl_bufinfo,
+	[_IOC_NR(A4L_POLL)] = a4l_ioctl_poll,
+	[_IOC_NR(A4L_MMAP)] = a4l_ioctl_mmap,
+	[_IOC_NR(A4L_NBCHANINFO)] = a4l_ioctl_nbchaninfo,
+	[_IOC_NR(A4L_NBRNGINFO)] = a4l_ioctl_nbrnginfo,
+	[_IOC_NR(A4L_BUFCFG2)] = a4l_ioctl_bufcfg2,
+	[_IOC_NR(A4L_BUFINFO2)] = a4l_ioctl_bufinfo2
+};
+
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry *a4l_proc_root;
+
+static int a4l_proc_devs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_devs, NULL);
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_devs_ops,
+			a4l_proc_devs_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int a4l_proc_drvs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_drvs, NULL);
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_drvs_ops,
+			a4l_proc_drvs_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int a4l_init_proc(void)
+{
+	int ret = 0;
+	struct proc_dir_entry *entry;
+
+	/* Creates the global directory */
+	a4l_proc_root = proc_mkdir("analogy", NULL);
+	if (a4l_proc_root == NULL) {
+		__a4l_err("a4l_proc_init: "
+			  "failed to create /proc/analogy\n");
+		return -ENOMEM;
+	}
+
+	/* Creates the devices related file */
+	entry = proc_create("devices", 0444, a4l_proc_root,
+			    &a4l_proc_devs_ops);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_init: "
+			  "failed to create /proc/analogy/devices\n");
+		ret = -ENOMEM;
+		goto err_proc_init;
+	}
+
+	/* Creates the drivers related file */
+	entry = proc_create("drivers", 0444, a4l_proc_root,
+			    &a4l_proc_drvs_ops);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_init: "
+			  "failed to create /proc/analogy/drivers\n");
+		ret = -ENOMEM;
+		goto err_proc_init;
+	}
+
+	return 0;
+
+err_proc_init:
+	remove_proc_entry("devices", a4l_proc_root);
+	remove_proc_entry("analogy", NULL);
+	return ret;
+}
+
+void a4l_cleanup_proc(void)
+{
+	remove_proc_entry("drivers", a4l_proc_root);
+	remove_proc_entry("devices", a4l_proc_root);
+	remove_proc_entry("analogy", NULL);
+}
+
+#else /* !CONFIG_PROC_FS */
+
+#define a4l_init_proc() 0
+#define a4l_cleanup_proc()
+
+#endif /* CONFIG_PROC_FS */
+
+int a4l_open(struct rtdm_fd *fd, int flags)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Get a pointer on the selected device (thanks to minor index) */
+	a4l_set_dev(cxt);
+
+	/* Initialize the buffer structure */
+	cxt->buffer = rtdm_malloc(sizeof(struct a4l_buffer));
+
+	a4l_init_buffer(cxt->buffer);
+	/* Allocate the asynchronous buffer
+	   NOTE: it should be interesting to allocate the buffer only
+	   on demand especially if the system is short of memory */
+	if (cxt->dev->transfer.default_bufsize)
+		a4l_alloc_buffer(cxt->buffer,
+				 cxt->dev->transfer.default_bufsize);
+
+	__a4l_dbg(1, core_dbg, "cxt=%p cxt->buf=%p, cxt->buf->buf=%p\n",
+		cxt, cxt->buffer, cxt->buffer->buf);
+
+	return 0;
+}
+
+void a4l_close(struct rtdm_fd *fd)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Cancel the maybe occuring asynchronous transfer */
+	a4l_cancel_buffer(cxt);
+
+	/* Free the buffer which was linked with this context and... */
+	a4l_free_buffer(cxt->buffer);
+
+	/* ...free the other buffer resources (sync) and... */
+	a4l_cleanup_buffer(cxt->buffer);
+
+	/* ...free the structure */
+	rtdm_free(cxt->buffer);
+}
+
+ssize_t a4l_read(struct rtdm_fd *fd, void *buf, size_t nbytes)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Jump into the RT domain if possible */
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	if (nbytes == 0)
+		return 0;
+
+	return a4l_read_buffer(cxt, buf, nbytes);
+}
+
+ssize_t a4l_write(struct rtdm_fd *fd, const void *buf, size_t nbytes)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Jump into the RT domain if possible */
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	if (nbytes == 0)
+		return 0;
+
+	return a4l_write_buffer(cxt, buf, nbytes);
+}
+
+int a4l_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	return a4l_ioctl_functions[_IOC_NR(request)] (cxt, arg);
+}
+
+int a4l_rt_select(struct rtdm_fd *fd,
+		  rtdm_selector_t *selector,
+		  enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	return a4l_select(cxt, selector, type, fd_index);
+}
+
+static struct rtdm_driver analogy_driver = {
+	.profile_info =		RTDM_PROFILE_INFO(analogy,
+						  RTDM_CLASS_EXPERIMENTAL,
+						  RTDM_SUBCLASS_ANALOGY,
+						  0),
+	.device_flags =		RTDM_NAMED_DEVICE,
+	.device_count =		A4L_NB_DEVICES,
+	.context_size =		sizeof(struct a4l_device_context),
+	.ops = {
+		.open =		a4l_open,
+		.close =	a4l_close,
+		.ioctl_rt =	a4l_ioctl,
+		.read_rt =	a4l_read,
+		.write_rt =	a4l_write,
+		.ioctl_nrt =	a4l_ioctl,
+		.read_nrt =	a4l_read,
+		.write_nrt =	a4l_write,
+		.select =	a4l_rt_select,
+	},
+};
+
+static struct rtdm_device rtdm_devs[A4L_NB_DEVICES] = {
+	[0 ... A4L_NB_DEVICES - 1] = {
+		.driver = &analogy_driver,
+		.label = "analogy%d",
+	}
+};
+
+int a4l_register(void)
+{
+	int i, ret;
+
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		ret = rtdm_dev_register(rtdm_devs + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(rtdm_devs + i);
+
+	return ret;
+}
+
+void a4l_unregister(void)
+{
+	int i;
+	for (i = 0; i < A4L_NB_DEVICES; i++)
+		rtdm_dev_unregister(&(rtdm_devs[i]));
+}
+
+static int __init a4l_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	/* Initializes the devices */
+	a4l_init_devs();
+
+	/* Initializes Analogy time management */
+	a4l_init_time();
+
+	/* Registers RTDM / fops interface */
+	ret = a4l_register();
+	if (ret != 0) {
+		a4l_unregister();
+		goto out_a4l_init;
+	}
+
+	/* Initializes Analogy proc layer */
+	ret = a4l_init_proc();
+
+out_a4l_init:
+	return ret;
+}
+
+static void __exit a4l_cleanup(void)
+{
+	/* Removes Analogy proc files */
+	a4l_cleanup_proc();
+
+	/* Unregisters RTDM / fops interface */
+	a4l_unregister();
+}
+
+module_init(a4l_init);
+module_exit(a4l_cleanup);
+++ linux-patched/drivers/xenomai/analogy/sensoray/s526.c	2022-03-21 12:58:30.953873710 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/sensoray/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy driver for Sensoray Model 526 board
+ *
+ * Copyright (C) 2009 Simon Boulay <simon.boulay@gmail.com>
+ *
+ * Derived from comedi:
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *               2006 Everett Wang <everett.wang@everteq.com>
+ *               2009 Ian Abbott <abbotti@mev.co.uk>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * Original code comes from comedi linux-next staging driver (2009.12.20)
+ * Board documentation: http://www.sensoray.com/products/526data.htm
+ * Everything should work as in comedi:
+ *   - Encoder works
+ *   - Analog input works
+ *   - Analog output works
+ *   - PWM output works
+ *   - Commands are not supported yet.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <rtdm/analogy/device.h>
+
+/* Board description */
+#define S526_GPCT_CHANS	4
+#define S526_GPCT_BITS	24
+#define S526_AI_CHANS	10	/* 8 regular differential inputs
+				 * channel 8 is "reference 0" (+10V)
+				 * channel 9 is "reference 1" (0V) */
+#define S526_AI_BITS	16
+#define S526_AI_TIMEOUT 100
+#define S526_AO_CHANS	4
+#define S526_AO_BITS	16
+#define S526_DIO_CHANS	8
+#define S526_DIO_BITS	1
+
+/* Ports */
+#define S526_IOSIZE		0x40  /* 64 bytes */
+#define S526_DEFAULT_ADDRESS	0x2C0 /* Manufacturing default */
+
+/* Registers */
+#define REG_TCR 0x00
+#define REG_WDC 0x02
+#define REG_DAC 0x04
+#define REG_ADC 0x06
+#define REG_ADD 0x08
+#define REG_DIO 0x0A
+#define REG_IER 0x0C
+#define REG_ISR 0x0E
+#define REG_MSC 0x10
+#define REG_C0L 0x12
+#define REG_C0H 0x14
+#define REG_C0M 0x16
+#define REG_C0C 0x18
+#define REG_C1L 0x1A
+#define REG_C1H 0x1C
+#define REG_C1M 0x1E
+#define REG_C1C 0x20
+#define REG_C2L 0x22
+#define REG_C2H 0x24
+#define REG_C2M 0x26
+#define REG_C2C 0x28
+#define REG_C3L 0x2A
+#define REG_C3H 0x2C
+#define REG_C3M 0x2E
+#define REG_C3C 0x30
+#define REG_EED 0x32
+#define REG_EEC 0x34
+
+#define ISR_ADC_DONE 0x4
+
+struct counter_mode_register_t {
+#if defined (__LITTLE_ENDIAN_BITFIELD)
+	unsigned short coutSource:1;
+	unsigned short coutPolarity:1;
+	unsigned short autoLoadResetRcap:3;
+	unsigned short hwCtEnableSource:2;
+	unsigned short ctEnableCtrl:2;
+	unsigned short clockSource:2;
+	unsigned short countDir:1;
+	unsigned short countDirCtrl:1;
+	unsigned short outputRegLatchCtrl:1;
+	unsigned short preloadRegSel:1;
+	unsigned short reserved:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	unsigned short reserved:1;
+	unsigned short preloadRegSel:1;
+	unsigned short outputRegLatchCtrl:1;
+	unsigned short countDirCtrl:1;
+	unsigned short countDir:1;
+	unsigned short clockSource:2;
+	unsigned short ctEnableCtrl:2;
+	unsigned short hwCtEnableSource:2;
+	unsigned short autoLoadResetRcap:3;
+	unsigned short coutPolarity:1;
+	unsigned short coutSource:1;
+#else
+#error Unknown bit field order
+#endif
+};
+
+union cmReg {
+	struct counter_mode_register_t reg;
+	unsigned short value;
+};
+
+/* Application Classes for GPCT Subdevices */
+enum S526_GPCT_APP_CLASS {
+	CountingAndTimeMeasurement,
+	SinglePulseGeneration,
+	PulseTrainGeneration,
+	PositionMeasurement,
+	Miscellaneous
+};
+
+/* GPCT subdevices configuration */
+#define MAX_GPCT_CONFIG_DATA 6
+struct s526GPCTConfig {
+	enum S526_GPCT_APP_CLASS app;
+	int data[MAX_GPCT_CONFIG_DATA];
+};
+
+typedef struct s526_priv {
+	unsigned long io_base;
+} s526_priv_t;
+
+struct s526_subd_gpct_priv {
+	struct s526GPCTConfig config[4];
+};
+
+struct s526_subd_ai_priv {
+	uint16_t config;
+};
+
+struct s526_subd_ao_priv {
+	uint16_t readback[2];
+};
+
+struct s526_subd_dio_priv {
+	int io_bits;
+	unsigned int state;
+};
+
+#define devpriv ((s526_priv_t*)(dev->priv))
+
+#define ADDR_REG(reg) (devpriv->io_base + (reg))
+#define ADDR_CHAN_REG(reg, chan) (devpriv->io_base + (reg) + (chan) * 8)
+
+
+static int s526_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_gpct_priv *subdpriv =
+	    (struct s526_subd_gpct_priv *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+	int subdev_channel = CR_CHAN(insn->chan_desc);
+	int i;
+	short value;
+	union cmReg cmReg;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_gpct_insn_config: Configuring Channel %d\n",
+		subdev_channel);
+
+	for (i = 0; i < MAX_GPCT_CONFIG_DATA; i++) {
+		subdpriv->config[subdev_channel].data[i] = data[i];
+		a4l_dbg(1, drv_dbg, dev, "data[%d]=%x\n", i, data[i]);
+	}
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
+		/*
+		 * data[0]: Application Type
+		 * data[1]: Counter Mode Register Value
+		 * data[2]: Pre-load Register Value
+		 * data[3]: Conter Control Register
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring Encoder\n");
+		subdpriv->config[subdev_channel].app = PositionMeasurement;
+
+		/* Set Counter Mode Register */
+		cmReg.value = data[1] & 0xFFFF;
+
+		a4l_dbg(1, drv_dbg, dev, "Counter Mode register=%x\n", cmReg.value);
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Reset the counter if it is software preload */
+		if (cmReg.reg.autoLoadResetRcap == 0) {
+			outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */
+			/* outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/\* Load the counter from PR0 *\/ */
+		}
+		break;
+
+	case A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
+		/*
+		 * data[0]: Application Type
+		 * data[1]: Counter Mode Register Value
+		 * data[2]: Pre-load Register 0 Value
+		 * data[3]: Pre-load Register 1 Value
+		 * data[4]: Conter Control Register
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring SPG\n");
+		subdpriv->config[subdev_channel].app = SinglePulseGeneration;
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 0; /* PR0 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 0 high word */
+		value = (short)((data[2] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 0 low word */
+		value = (short)(data[2] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 1; /* PR1 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 1 high word */
+		value = (short)((data[3] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 1 low word */
+		value = (short)(data[3] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Write the Counter Control Register */
+		if (data[4] != 0) {
+			value = (short)(data[4] & 0xFFFF);
+			outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+		}
+		break;
+
+	case A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
+		/*
+		 * data[0]: Application Type
+		 * data[1]: Counter Mode Register Value
+		 * data[2]: Pre-load Register 0 Value
+		 * data[3]: Pre-load Register 1 Value
+		 * data[4]: Conter Control Register
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring PTG\n");
+		subdpriv->config[subdev_channel].app = PulseTrainGeneration;
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 0; /* PR0 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 0 high word */
+		value = (short)((data[2] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 0 low word */
+		value = (short)(data[2] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 1; /* PR1 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 1 high word */
+		value = (short)((data[3] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 1 low word */
+		value = (short)(data[3] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Write the Counter Control Register */
+		if (data[4] != 0) {
+			value = (short)(data[4] & 0xFFFF);
+			outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+		}
+		break;
+
+	default:
+		a4l_err(dev, "s526_gpct_insn_config: unsupported GPCT_insn_config\n");
+		return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+static int s526_gpct_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint32_t *data = (uint32_t *)insn->data;
+	int counter_channel = CR_CHAN(insn->chan_desc);
+	unsigned short datalow;
+	unsigned short datahigh;
+	int i;
+
+	if (insn->data_size <= 0) {
+		a4l_err(dev, "s526_gpct_rinsn: data size should be > 0\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < insn->data_size / sizeof(uint32_t); i++) {
+		datalow = inw(ADDR_CHAN_REG(REG_C0L, counter_channel));
+		datahigh = inw(ADDR_CHAN_REG(REG_C0H, counter_channel));
+		data[i] = (int)(datahigh & 0x00FF);
+		data[i] = (data[i] << 16) | (datalow & 0xFFFF);
+		a4l_dbg(1, drv_dbg, dev,
+			"s526_gpct_rinsn GPCT[%d]: %x(0x%04x, 0x%04x)\n",
+			counter_channel, data[i], datahigh, datalow);
+	}
+
+	return 0;
+}
+
+static int s526_gpct_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_gpct_priv *subdpriv =
+	    (struct s526_subd_gpct_priv *)subd->priv;
+	uint32_t *data = (uint32_t *)insn->data;
+	int subdev_channel = CR_CHAN(insn->chan_desc);
+	short value;
+	union cmReg cmReg;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_gpct_winsn: GPCT_INSN_WRITE on channel %d\n",
+		subdev_channel);
+
+	cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel));
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_gpct_winsn: Counter Mode Register: %x\n", cmReg.value);
+
+	/* Check what Application of Counter this channel is configured for */
+	switch (subdpriv->config[subdev_channel].app) {
+	case PositionMeasurement:
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: PM\n");
+		outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
+							     subdev_channel));
+		outw(0xFFFF & (*data),
+		     ADDR_CHAN_REG(REG_C0L, subdev_channel));
+		break;
+
+	case SinglePulseGeneration:
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: SPG\n");
+		outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
+							     subdev_channel));
+		outw(0xFFFF & (*data),
+		     ADDR_CHAN_REG(REG_C0L, subdev_channel));
+		break;
+
+	case PulseTrainGeneration:
+		/*
+		 * data[0] contains the PULSE_WIDTH
+		 * data[1] contains the PULSE_PERIOD
+		 * @pre PULSE_PERIOD > PULSE_WIDTH > 0
+		 * The above periods must be expressed as a multiple of the
+		 * pulse frequency on the selected source
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: PTG\n");
+		if ((data[1] > data[0]) && (data[0] > 0)) {
+			(subdpriv->config[subdev_channel]).data[0] = data[0];
+			(subdpriv->config[subdev_channel]).data[1] = data[1];
+		} else {
+			a4l_err(dev,
+				"s526_gpct_winsn: INSN_WRITE: PTG: Problem with Pulse params -> %du %du\n",
+				data[0], data[1]);
+			return -EINVAL;
+		}
+
+		value = (short)((*data >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+		value = (short)(*data & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+		break;
+	default:		/* Impossible */
+		a4l_err(dev,
+			"s526_gpct_winsn: INSN_WRITE: Functionality %d not implemented yet\n",
+			 subdpriv->config[subdev_channel].app);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int s526_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_ai_priv *subdpriv =
+	    (struct s526_subd_ai_priv *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	/* data[0] : channels was set in relevant bits.
+	 * data[1] : delay
+	 */
+	/* COMMENT: abbotti 2008-07-24: I don't know why you'd want to
+	 * enable channels here.  The channel should be enabled in the
+	 * INSN_READ handler. */
+
+	/* Enable ADC interrupt */
+	outw(ISR_ADC_DONE, ADDR_REG(REG_IER));
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_ai_insn_config: ADC current value: 0x%04x\n",
+		inw(ADDR_REG(REG_ADC)));
+
+	subdpriv->config = (data[0] & 0x3FF) << 5;
+	if (data[1] > 0)
+		subdpriv->config |= 0x8000; /* set the delay */
+
+	subdpriv->config |= 0x0001; /* ADC start bit. */
+
+	return 0;
+}
+
+static int s526_ai_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_ai_priv *subdpriv =
+	    (struct s526_subd_ai_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int n, i;
+	int chan = CR_CHAN(insn->chan_desc);
+	uint16_t value;
+	uint16_t d;
+	uint16_t status;
+
+	/* Set configured delay, enable channel for this channel only,
+	 * select "ADC read" channel, set "ADC start" bit. */
+	value = (subdpriv->config & 0x8000) |
+	    ((1 << 5) << chan) | (chan << 1) | 0x0001;
+
+	/* convert n samples */
+	for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+		/* trigger conversion */
+		outw(value, ADDR_REG(REG_ADC));
+		a4l_dbg(1, drv_dbg, dev, "s526_ai_rinsn: Wrote 0x%04x to ADC\n",
+			value);
+
+		/* wait for conversion to end */
+		for (i = 0; i < S526_AI_TIMEOUT; i++) {
+			status = inw(ADDR_REG(REG_ISR));
+			if (status & ISR_ADC_DONE) {
+				outw(ISR_ADC_DONE, ADDR_REG(REG_ISR));
+				break;
+			}
+		}
+		if (i == S526_AI_TIMEOUT) {
+			a4l_warn(dev, "s526_ai_rinsn: ADC(0x%04x) timeout\n",
+				 inw(ADDR_REG(REG_ISR)));
+			return -ETIMEDOUT;
+		}
+
+		/* read data */
+		d = inw(ADDR_REG(REG_ADD));
+		a4l_dbg(1, drv_dbg, dev, "s526_ai_rinsn: AI[%d]=0x%04x\n",
+			n, (uint16_t)(d & 0xFFFF));
+
+		/* munge data */
+		data[n] = d ^ 0x8000;
+	}
+
+	return 0;
+}
+
+static int s526_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_ao_priv *subdpriv =
+	    (struct s526_subd_ao_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+	int chan = CR_CHAN(insn->chan_desc);
+	uint16_t val;
+
+	val = chan << 1;
+	outw(val, ADDR_REG(REG_DAC));
+
+	for (i = 0; i < insn->data_size / sizeof(uint16_t); i++) {
+		outw(data[i], ADDR_REG(REG_ADD)); /* write the data to preload register */
+		subdpriv->readback[chan] = data[i];
+		outw(val + 1, ADDR_REG(REG_DAC)); /* starts the D/A conversion. */
+	}
+
+	return 0;
+}
+
+static int s526_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct s526_subd_ao_priv *subdpriv =
+		(struct s526_subd_ao_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+	int chan = CR_CHAN(insn->chan_desc);
+
+	for (i = 0; i < insn->data_size / sizeof(uint16_t); i++)
+		data[i] = subdpriv->readback[chan];
+
+	return 0;
+}
+
+static int s526_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_dio_priv *subdpriv =
+	    (struct s526_subd_dio_priv *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	int group, mask;
+
+	group = chan >> 2;
+	mask = 0xF << (group << 2);
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subdpriv->state |= 1 << (group + 10); /* bit 10/11 set the
+						       * group 1/2's mode */
+		subdpriv->io_bits |= mask;
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subdpriv->state &= ~(1 << (group + 10)); /* 1 is output, 0 is
+							  * input. */
+		subdpriv->io_bits &= ~mask;
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] =
+		    (subdpriv->io_bits & mask) ? A4L_OUTPUT : A4L_INPUT;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	outw(subdpriv->state, ADDR_REG(REG_DIO));
+
+	return 0;
+}
+
+static int s526_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_dio_priv *subdpriv =
+		(struct s526_subd_dio_priv *)subd->priv;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		subdpriv->state &= ~(data[0]);
+		subdpriv->state |= data[0] & data[1];
+
+		outw(subdpriv->state, ADDR_REG(REG_DIO));
+	}
+
+	data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */
+
+	return 0;
+}
+
+/* --- Channels descriptor --- */
+
+static struct a4l_channels_desc s526_chan_desc_gpct = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_GPCT_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_GPCT_BITS},
+	},
+};
+
+static struct a4l_channels_desc s526_chan_desc_ai = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_AI_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_AI_BITS},
+	},
+};
+
+static struct a4l_channels_desc s526_chan_desc_ao = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_AO_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_AO_BITS},
+	},
+};
+
+static struct a4l_channels_desc s526_chan_desc_dio = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_DIO_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_DIO_BITS},
+	},
+};
+
+/* --- Subdevice initialization functions --- */
+
+/* General purpose counter/timer (gpct) */
+static void setup_subd_gpct(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_COUNTER;
+	subd->chan_desc = &s526_chan_desc_gpct;
+	subd->insn_read = s526_gpct_rinsn;
+	subd->insn_config = s526_gpct_insn_config;
+	subd->insn_write = s526_gpct_winsn;
+}
+
+/* Analog input subdevice */
+static void setup_subd_ai(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_AI;
+	subd->chan_desc = &s526_chan_desc_ai;
+	subd->rng_desc = &a4l_range_bipolar10;
+	subd->insn_read = s526_ai_rinsn;
+	subd->insn_config = s526_ai_insn_config;
+}
+
+/* Analog output subdevice */
+static void setup_subd_ao(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_AO;
+	subd->chan_desc = &s526_chan_desc_ao;
+	subd->rng_desc = &a4l_range_bipolar10;
+	subd->insn_write = s526_ao_winsn;
+	subd->insn_read = s526_ao_rinsn;
+}
+
+/* Digital i/o subdevice */
+static void setup_subd_dio(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DIO;
+	subd->chan_desc = &s526_chan_desc_dio;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = s526_dio_insn_bits;
+	subd->insn_config = s526_dio_insn_config;
+}
+
+struct setup_subd {
+	void (*setup_func) (struct a4l_subdevice *);
+	int sizeof_priv;
+};
+
+static struct setup_subd setup_subds[4] = {
+	{
+		.setup_func = setup_subd_gpct,
+		.sizeof_priv = sizeof(struct s526_subd_gpct_priv),
+	},
+	{
+		.setup_func = setup_subd_ai,
+		.sizeof_priv = sizeof(struct s526_subd_ai_priv),
+	},
+	{
+		.setup_func = setup_subd_ao,
+		.sizeof_priv = sizeof(struct s526_subd_ao_priv),
+	},
+	{
+		.setup_func = setup_subd_dio,
+		.sizeof_priv = sizeof(struct s526_subd_dio_priv),
+	},
+};
+
+static int dev_s526_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int io_base;
+	int i;
+	int err = 0;
+
+	if (arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) {
+		a4l_warn(dev,
+			 "dev_s526_attach: no attach options specified; "
+			 "using defaults: addr=0x%x\n",
+			 S526_DEFAULT_ADDRESS);
+		io_base = S526_DEFAULT_ADDRESS;
+	} else {
+		io_base = ((unsigned long *)arg->opts)[0];
+	}
+
+	if (!request_region(io_base, S526_IOSIZE, "s526")) {
+		a4l_err(dev, "dev_s526_attach: I/O port conflict\n");
+		return -EIO;
+	}
+
+	/* Allocate the subdevice structures. */
+	for (i = 0; i < 4; i++) {
+		struct a4l_subdevice *subd = a4l_alloc_subd(setup_subds[i].sizeof_priv,
+						  setup_subds[i].setup_func);
+
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i)
+			return err;
+	}
+
+	devpriv->io_base = io_base;
+
+	a4l_info(dev, " attached (address = 0x%x)\n", io_base);
+
+	return 0;
+}
+
+static int dev_s526_detach(struct a4l_device *dev)
+{
+	int err = 0;
+
+	if (devpriv->io_base != 0)
+		release_region(devpriv->io_base, S526_IOSIZE);
+
+	return err;
+}
+
+static struct a4l_driver drv_s526 = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_s526",
+	.driver_name = "s526",
+	.attach = dev_s526_attach,
+	.detach = dev_s526_detach,
+	.privdata_size = sizeof(s526_priv_t),
+};
+
+static int __init drv_s526_init(void)
+{
+	return a4l_register_drv(&drv_s526);
+}
+
+static void __exit drv_s526_cleanup(void)
+{
+	a4l_unregister_drv(&drv_s526);
+}
+
+MODULE_DESCRIPTION("Analogy driver for Sensoray Model 526 board.");
+MODULE_LICENSE("GPL");
+
+module_init(drv_s526_init);
+module_exit(drv_s526_cleanup);
+++ linux-patched/drivers/xenomai/analogy/sensoray/Kconfig	2022-03-21 12:58:30.946873778 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/analogy/sensoray/Makefile	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_S526
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "Sensoray Model 526 driver"
+	default n
+++ linux-patched/drivers/xenomai/analogy/sensoray/Makefile	2022-03-21 12:58:30.938873857 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:3 @
--- linux/drivers/xenomai/autotune/Kconfig	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_S526) += analogy_s526.o
+
+analogy_s526-y := s526.o
+++ linux-patched/drivers/xenomai/autotune/Kconfig	2022-03-21 12:58:30.931873925 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/drivers/xenomai/autotune/Makefile	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_AUTOTUNE
+	tristate
+++ linux-patched/drivers/xenomai/autotune/Makefile	2022-03-21 12:58:30.923874003 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/drivers/xenomai/autotune/autotune.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENO_DRIVERS_AUTOTUNE) += xeno_autotune.o
+
+xeno_autotune-y := autotune.o
+++ linux-patched/drivers/xenomai/autotune/autotune.c	2022-03-21 12:58:30.916874071 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/sort.h>
+#include <cobalt/kernel/arith.h>
+#include <rtdm/driver.h>
+#include <rtdm/autotune.h>
+
+MODULE_DESCRIPTION("Xenomai/cobalt core clock autotuner");
+MODULE_AUTHOR("Philippe Gerum <rpm@xenomai.org>");
+MODULE_LICENSE("GPL");
+
+/* Auto-tuning services for the Cobalt core clock. */
+
+#define SAMPLING_TIME	500000000UL
+#define ADJUSTMENT_STEP 500
+#define WARMUP_STEPS	10
+#define AUTOTUNE_STEPS  40
+
+#define progress(__tuner, __fmt, __args...)				\
+	do {								\
+		if (!(__tuner)->quiet)					\
+			printk(XENO_INFO "autotune(%s) " __fmt "\n",	\
+			       (__tuner)->name, ##__args);		\
+	} while (0)
+
+struct tuning_score {
+	int pmean;
+	int stddev;
+	int minlat;
+	unsigned int step;
+	unsigned int gravity;
+};
+
+struct tuner_state {
+	xnticks_t ideal;
+	xnticks_t step;
+	int min_lat;
+	int max_lat;
+	int prev_mean;
+	long long prev_sqs;
+	long long cur_sqs;
+	unsigned int sum;
+	unsigned int cur_samples;
+	unsigned int max_samples;
+};
+
+struct gravity_tuner {
+	const char *name;
+	unsigned int (*get_gravity)(struct gravity_tuner *tuner);
+	void (*set_gravity)(struct gravity_tuner *tuner, unsigned int gravity);
+	unsigned int (*adjust_gravity)(struct gravity_tuner *tuner, int adjust);
+	int (*init_tuner)(struct gravity_tuner *tuner);
+	int (*start_tuner)(struct gravity_tuner *tuner, xnticks_t start_time,
+			   xnticks_t interval);
+	void (*destroy_tuner)(struct gravity_tuner *tuner);
+	struct tuner_state state;
+	rtdm_event_t done;
+	int status;
+	int quiet;
+	struct tuning_score scores[AUTOTUNE_STEPS];
+	int nscores;
+	atomic_t refcount;
+};
+
+struct irq_gravity_tuner {
+	rtdm_timer_t timer;
+	struct gravity_tuner tuner;
+};
+
+struct kthread_gravity_tuner {
+	rtdm_task_t task;
+	rtdm_event_t barrier;
+	xnticks_t start_time;
+	xnticks_t interval;
+	struct gravity_tuner tuner;
+};
+
+struct uthread_gravity_tuner {
+	rtdm_timer_t timer;
+	rtdm_event_t pulse;
+	struct gravity_tuner tuner;
+};
+
+struct autotune_context {
+	struct gravity_tuner *tuner;
+	struct autotune_setup setup;
+	rtdm_lock_t tuner_lock;
+};
+
+static inline void init_tuner(struct gravity_tuner *tuner)
+{
+	rtdm_event_init(&tuner->done, 0);
+	tuner->status = 0;
+	atomic_set(&tuner->refcount, 0);
+}
+
+static inline void destroy_tuner(struct gravity_tuner *tuner)
+{
+	rtdm_event_destroy(&tuner->done);
+}
+
+static inline void done_sampling(struct gravity_tuner *tuner,
+				 int status)
+{
+	tuner->status = status;
+	rtdm_event_signal(&tuner->done);
+}
+
+static int add_sample(struct gravity_tuner *tuner, xnticks_t timestamp)
+{
+	struct tuner_state *state;
+	int n, delta, cur_mean;
+
+	state = &tuner->state;
+
+	delta = (int)(timestamp - state->ideal);
+	if (delta < state->min_lat)
+		state->min_lat = delta;
+	if (delta > state->max_lat)
+		state->max_lat = delta;
+	if (delta < 0)
+		delta = 0;
+
+	state->sum += delta;
+	state->ideal += state->step;
+	n = ++state->cur_samples;
+
+	/*
+	 * Knuth citing Welford in TAOCP (Vol 2), single-pass
+	 * computation of variance using a recurrence relation.
+	 */
+	if (n == 1)
+		state->prev_mean = delta;
+	else {
+		cur_mean = state->prev_mean + (delta - state->prev_mean) / n;
+                state->cur_sqs = state->prev_sqs + (delta - state->prev_mean)
+			* (delta - cur_mean);
+                state->prev_mean = cur_mean; 
+                state->prev_sqs = state->cur_sqs;
+	}
+
+	if (n >= state->max_samples) {
+		done_sampling(tuner, 0);
+		return 1;	/* Finished. */
+	}
+
+	return 0;	/* Keep going. */
+}
+
+static void timer_handler(rtdm_timer_t *timer)
+{
+	struct irq_gravity_tuner *irq_tuner;
+	xnticks_t now;
+
+	irq_tuner = container_of(timer, struct irq_gravity_tuner, timer);
+	now = xnclock_read_raw(&nkclock);
+
+	if (add_sample(&irq_tuner->tuner, now))
+		rtdm_timer_stop_in_handler(timer);
+}
+
+static int init_irq_tuner(struct gravity_tuner *tuner)
+{
+	struct irq_gravity_tuner *irq_tuner;
+	int ret;
+
+	irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner);
+	ret = rtdm_timer_init(&irq_tuner->timer, timer_handler, "autotune");
+	if (ret)
+		return ret;
+
+	init_tuner(tuner);
+
+	return 0;
+}
+
+static void destroy_irq_tuner(struct gravity_tuner *tuner)
+{
+	struct irq_gravity_tuner *irq_tuner;
+
+	irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner);
+	rtdm_timer_destroy(&irq_tuner->timer);
+	destroy_tuner(tuner);
+}
+
+static unsigned int get_irq_gravity(struct gravity_tuner *tuner)
+{
+	return nkclock.gravity.irq;
+}
+
+static void set_irq_gravity(struct gravity_tuner *tuner, unsigned int gravity)
+{
+	nkclock.gravity.irq = gravity;
+}
+
+static unsigned int adjust_irq_gravity(struct gravity_tuner *tuner, int adjust)
+{
+	return nkclock.gravity.irq += adjust;
+}
+
+static int start_irq_tuner(struct gravity_tuner *tuner,
+			   xnticks_t start_time, xnticks_t interval)
+{
+	struct irq_gravity_tuner *irq_tuner;
+
+	irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner);
+
+	return rtdm_timer_start(&irq_tuner->timer, start_time,
+				interval, RTDM_TIMERMODE_ABSOLUTE);
+}
+
+struct irq_gravity_tuner irq_tuner = {
+	.tuner = {
+		.name = "irqhand",
+		.init_tuner = init_irq_tuner,
+		.destroy_tuner = destroy_irq_tuner,
+		.get_gravity = get_irq_gravity,
+		.set_gravity = set_irq_gravity,
+		.adjust_gravity = adjust_irq_gravity,
+		.start_tuner = start_irq_tuner,
+	},
+};
+
+void task_handler(void *arg)
+{
+	struct kthread_gravity_tuner *k_tuner = arg;
+	xnticks_t now;
+	int ret = 0;
+
+	for (;;) {
+		if (rtdm_task_should_stop())
+			break;
+
+		ret = rtdm_event_wait(&k_tuner->barrier);
+		if (ret)
+			break;
+
+		ret = rtdm_task_set_period(&k_tuner->task, k_tuner->start_time,
+					   k_tuner->interval);
+		if (ret)
+			break;
+
+		for (;;) {
+			ret = rtdm_task_wait_period(NULL);
+			if (ret && ret != -ETIMEDOUT)
+				goto out;
+
+			now = xnclock_read_raw(&nkclock);
+			if (add_sample(&k_tuner->tuner, now)) {
+				rtdm_task_set_period(&k_tuner->task, 0, 0);
+				break;
+			}
+		}
+	}
+out:
+	done_sampling(&k_tuner->tuner, ret);
+	rtdm_task_destroy(&k_tuner->task);
+}
+
+static int init_kthread_tuner(struct gravity_tuner *tuner)
+{
+	struct kthread_gravity_tuner *k_tuner;
+
+	init_tuner(tuner);
+	k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner);
+	rtdm_event_init(&k_tuner->barrier, 0);
+
+	return rtdm_task_init(&k_tuner->task, "autotune",
+			      task_handler, k_tuner,
+			      RTDM_TASK_HIGHEST_PRIORITY, 0);
+}
+
+static void destroy_kthread_tuner(struct gravity_tuner *tuner)
+{
+	struct kthread_gravity_tuner *k_tuner;
+
+	k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner);
+	rtdm_task_destroy(&k_tuner->task);
+	rtdm_event_destroy(&k_tuner->barrier);
+}
+
+static unsigned int get_kthread_gravity(struct gravity_tuner *tuner)
+{
+	return nkclock.gravity.kernel;
+}
+
+static void set_kthread_gravity(struct gravity_tuner *tuner, unsigned int gravity)
+{
+	nkclock.gravity.kernel = gravity;
+}
+
+static unsigned int adjust_kthread_gravity(struct gravity_tuner *tuner, int adjust)
+{
+	return nkclock.gravity.kernel += adjust;
+}
+
+static int start_kthread_tuner(struct gravity_tuner *tuner,
+			       xnticks_t start_time, xnticks_t interval)
+{
+	struct kthread_gravity_tuner *k_tuner;
+
+	k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner);
+
+	k_tuner->start_time = start_time;
+	k_tuner->interval = interval;
+	rtdm_event_signal(&k_tuner->barrier);
+
+	return 0;
+}
+
+struct kthread_gravity_tuner kthread_tuner = {
+	.tuner = {
+		.name = "kthread",
+		.init_tuner = init_kthread_tuner,
+		.destroy_tuner = destroy_kthread_tuner,
+		.get_gravity = get_kthread_gravity,
+		.set_gravity = set_kthread_gravity,
+		.adjust_gravity = adjust_kthread_gravity,
+		.start_tuner = start_kthread_tuner,
+	},
+};
+
+static void pulse_handler(rtdm_timer_t *timer)
+{
+	struct uthread_gravity_tuner *u_tuner;
+
+	u_tuner = container_of(timer, struct uthread_gravity_tuner, timer);
+	rtdm_event_signal(&u_tuner->pulse);
+}
+
+static int init_uthread_tuner(struct gravity_tuner *tuner)
+{
+	struct uthread_gravity_tuner *u_tuner;
+	int ret;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+	ret = rtdm_timer_init(&u_tuner->timer, pulse_handler, "autotune");
+	if (ret)
+		return ret;
+
+	xntimer_set_gravity(&u_tuner->timer, XNTIMER_UGRAVITY); /* gasp... */
+	rtdm_event_init(&u_tuner->pulse, 0);
+	init_tuner(tuner);
+
+	return 0;
+}
+
+static void destroy_uthread_tuner(struct gravity_tuner *tuner)
+{
+	struct uthread_gravity_tuner *u_tuner;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+	rtdm_timer_destroy(&u_tuner->timer);
+	rtdm_event_destroy(&u_tuner->pulse);
+}
+
+static unsigned int get_uthread_gravity(struct gravity_tuner *tuner)
+{
+	return nkclock.gravity.user;
+}
+
+static void set_uthread_gravity(struct gravity_tuner *tuner, unsigned int gravity)
+{
+	nkclock.gravity.user = gravity;
+}
+
+static unsigned int adjust_uthread_gravity(struct gravity_tuner *tuner, int adjust)
+{
+	return nkclock.gravity.user += adjust;
+}
+
+static int start_uthread_tuner(struct gravity_tuner *tuner,
+			       xnticks_t start_time, xnticks_t interval)
+{
+	struct uthread_gravity_tuner *u_tuner;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+
+	return rtdm_timer_start(&u_tuner->timer, start_time,
+				interval, RTDM_TIMERMODE_ABSOLUTE);
+}
+
+static int add_uthread_sample(struct gravity_tuner *tuner,
+			      nanosecs_abs_t user_timestamp)
+{
+	struct uthread_gravity_tuner *u_tuner;
+	int ret;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+
+	if (user_timestamp &&
+	    add_sample(tuner, xnclock_ns_to_ticks(&nkclock, user_timestamp))) {
+		rtdm_timer_stop(&u_tuner->timer);
+		/* Tell the caller to park until next round. */
+		ret = -EPIPE;
+	} else
+		ret = rtdm_event_wait(&u_tuner->pulse);
+
+	return ret;
+}
+
+struct uthread_gravity_tuner uthread_tuner = {
+	.tuner = {
+		.name = "uthread",
+		.init_tuner = init_uthread_tuner,
+		.destroy_tuner = destroy_uthread_tuner,
+		.get_gravity = get_uthread_gravity,
+		.set_gravity = set_uthread_gravity,
+		.adjust_gravity = adjust_uthread_gravity,
+		.start_tuner = start_uthread_tuner,
+	},
+};
+
+static inline void build_score(struct gravity_tuner *tuner, int step)
+{
+	struct tuner_state *state = &tuner->state;
+	unsigned int variance, n;
+
+	n = state->cur_samples;
+	tuner->scores[step].pmean = state->sum / n;
+	variance = n > 1 ? xnarch_llimd(state->cur_sqs, 1, n - 1) : 0;
+	tuner->scores[step].stddev = int_sqrt(variance);
+	tuner->scores[step].minlat = state->min_lat;
+	tuner->scores[step].gravity = tuner->get_gravity(tuner);
+	tuner->scores[step].step = step;
+	tuner->nscores++;
+}
+
+static int cmp_score_mean(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->pmean - sr->pmean;
+}
+
+static int cmp_score_stddev(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->stddev - sr->stddev;
+}
+
+static int cmp_score_minlat(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->minlat - sr->minlat;
+}
+
+static int cmp_score_gravity(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->gravity - sr->gravity;
+}
+
+static int filter_mean(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_mean, NULL);
+
+	/* Top half of the best pondered means. */
+
+	return (tuner->nscores + 1) / 2;
+}
+
+static int filter_stddev(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_stddev, NULL);
+
+	/* Top half of the best standard deviations. */
+
+	return (tuner->nscores + 1) / 2;
+}
+
+static int filter_minlat(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_minlat, NULL);
+
+	/* Top half of the minimum latencies. */
+
+	return (tuner->nscores + 1) / 2;
+}
+
+static int filter_gravity(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_gravity, NULL);
+
+	/* Smallest gravity required among the shortest latencies. */
+
+	return tuner->nscores;
+}
+
+static void dump_scores(struct gravity_tuner *tuner)
+{
+	int n;
+
+	if (tuner->quiet)
+		return;
+
+	for (n = 0; n < tuner->nscores; n++)
+		printk(KERN_INFO
+		       ".. S%.2d pmean=%Ld stddev=%Lu minlat=%Lu gravity=%Lu\n",
+		       tuner->scores[n].step,
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].pmean),
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].stddev),
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].minlat),
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].gravity));
+}
+
+static inline void filter_score(struct gravity_tuner *tuner,
+				int (*filter)(struct gravity_tuner *tuner))
+{
+	tuner->nscores = filter(tuner);
+	dump_scores(tuner);
+}
+
+static int tune_gravity(struct gravity_tuner *tuner, int period)
+{
+	struct tuner_state *state = &tuner->state;
+	int ret, step, gravity_limit, adjust;
+	unsigned int orig_gravity;
+
+	state->step = xnclock_ns_to_ticks(&nkclock, period);
+	state->max_samples = SAMPLING_TIME / (period ?: 1);
+	orig_gravity = tuner->get_gravity(tuner);
+	tuner->set_gravity(tuner, 0);
+	tuner->nscores = 0;
+	/* Gravity adjustment step */
+	adjust = xnclock_ns_to_ticks(&nkclock, ADJUSTMENT_STEP) ?: 1;
+	gravity_limit = 0;
+	progress(tuner, "warming up...");
+
+	for (step = 0; step < WARMUP_STEPS + AUTOTUNE_STEPS; step++) {
+		state->ideal = xnclock_read_raw(&nkclock) + state->step * WARMUP_STEPS;
+		state->min_lat = xnclock_ns_to_ticks(&nkclock, SAMPLING_TIME);
+		state->max_lat = 0;
+		state->prev_mean = 0;
+		state->prev_sqs = 0;
+		state->cur_sqs = 0;
+		state->sum = 0;
+		state->cur_samples = 0;
+
+		ret = tuner->start_tuner(tuner,
+					 xnclock_ticks_to_ns(&nkclock, state->ideal),
+					 period);
+		if (ret)
+			goto fail;
+
+		/* Tuner stops when posting. */
+		ret = rtdm_event_wait(&tuner->done);
+		if (ret)
+			goto fail;
+
+		ret = tuner->status;
+		if (ret)
+			goto fail;
+
+		if (step < WARMUP_STEPS) {
+			if (state->min_lat > gravity_limit) {
+				gravity_limit = state->min_lat;
+				progress(tuner, "gravity limit set to %Lu ns (%d)",
+					 xnclock_ticks_to_ns(&nkclock, gravity_limit), state->min_lat);
+			}
+			continue;
+		}
+
+		/*
+		 * We should not be early by more than the gravity
+		 * value minus one tick, to account for the rounding
+		 * error involved when the timer frequency is lower
+		 * than 1e9 / ADJUSTMENT_STEP.
+		 */
+		if (state->min_lat < 0) {
+			if (tuner->get_gravity(tuner) < -state->min_lat - 1) {
+				printk(XENO_WARNING
+				       "autotune(%s) failed with early shot (%Ld ns)\n",
+				       tuner->name,
+				       xnclock_ticks_to_ns(&nkclock,
+						   -(tuner->get_gravity(tuner) +
+						     state->min_lat)));
+				ret = -EAGAIN;
+				goto fail;
+			}
+			break;
+		}
+
+		if (((step - WARMUP_STEPS) % 5) == 0)
+			progress(tuner, "calibrating... (slice %d)",
+				 (step - WARMUP_STEPS) / 5 + 1);
+
+		build_score(tuner, step - WARMUP_STEPS);
+
+		/*
+		 * Anticipating by more than the minimum latency
+		 * detected at warmup would make no sense: cap the
+		 * gravity we may try.
+		 */
+		if (tuner->adjust_gravity(tuner, adjust) > gravity_limit) {
+			progress(tuner, "beyond gravity limit at %Lu ns",
+				 xnclock_ticks_to_ns(&nkclock,
+						     tuner->get_gravity(tuner)));
+			break;
+		}
+	}
+
+	progress(tuner, "calibration scores");
+	dump_scores(tuner);
+	progress(tuner, "pondered mean filter");
+	filter_score(tuner, filter_mean);
+	progress(tuner, "standard deviation filter");
+	filter_score(tuner, filter_stddev);
+	progress(tuner, "minimum latency filter");
+	filter_score(tuner, filter_minlat);
+	progress(tuner, "gravity filter");
+	filter_score(tuner, filter_gravity);
+	tuner->set_gravity(tuner, tuner->scores[0].gravity);
+
+	return 0;
+fail:
+	tuner->set_gravity(tuner, orig_gravity);
+
+	return ret;
+}
+
+static int autotune_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct autotune_context *context;
+	struct autotune_setup setup;
+	struct gravity_tuner *tuner, *old_tuner;
+	rtdm_lockctx_t lock_ctx;
+	int ret;
+
+	switch (request) {
+	case AUTOTUNE_RTIOC_RESET:
+		xnclock_reset_gravity(&nkclock);
+		return 0;
+	case AUTOTUNE_RTIOC_IRQ:
+		tuner = &irq_tuner.tuner;
+		break;
+	case AUTOTUNE_RTIOC_KERN:
+		tuner = &kthread_tuner.tuner;
+		break;
+	case AUTOTUNE_RTIOC_USER:
+		tuner = &uthread_tuner.tuner;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	ret = rtdm_copy_from_user(fd, &setup, arg, sizeof(setup));
+	if (ret)
+		return ret;
+
+	ret = tuner->init_tuner(tuner);
+	if (ret)
+		return ret;
+
+	context = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&context->tuner_lock, lock_ctx);
+
+	old_tuner = context->tuner;
+	if (old_tuner && atomic_read(&old_tuner->refcount) > 0) {
+		rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx);
+		tuner->destroy_tuner(tuner);
+		return -EBUSY;
+	}
+
+	context->tuner = tuner;
+	context->setup = setup;
+
+	rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx);
+
+	if (old_tuner)
+		old_tuner->destroy_tuner(old_tuner);
+
+	if (setup.quiet <= 1)
+		printk(XENO_INFO "autotune(%s) started\n", tuner->name);
+
+	return ret;
+}
+
+static int autotune_ioctl_rt(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct autotune_context *context;
+	struct gravity_tuner *tuner;
+	rtdm_lockctx_t lock_ctx;
+	__u64 timestamp;
+	__u32 gravity;
+	int ret;
+
+	context = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&context->tuner_lock, lock_ctx);
+
+	tuner = context->tuner;
+	if (tuner)
+		atomic_inc(&tuner->refcount);
+
+	rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx);
+
+	if (tuner == NULL)
+		return -ENOSYS;
+
+	switch (request) {
+	case AUTOTUNE_RTIOC_RUN:
+		tuner->quiet = context->setup.quiet;
+		ret = tune_gravity(tuner, context->setup.period);
+		if (ret)
+			break;
+		gravity = xnclock_ticks_to_ns(&nkclock,
+					      tuner->get_gravity(tuner));
+		ret = rtdm_safe_copy_to_user(fd, arg, &gravity,
+					     sizeof(gravity));
+		break;
+	case AUTOTUNE_RTIOC_PULSE:
+		if (tuner != &uthread_tuner.tuner) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = rtdm_safe_copy_from_user(fd, &timestamp, arg,
+					       sizeof(timestamp));
+		if (ret)
+			break;
+		ret = add_uthread_sample(tuner, timestamp);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	atomic_dec(&tuner->refcount);
+
+	return ret;
+}
+
+static int autotune_open(struct rtdm_fd *fd, int oflags)
+{
+	struct autotune_context *context;
+
+	context = rtdm_fd_to_private(fd);
+	context->tuner = NULL;
+	rtdm_lock_init(&context->tuner_lock);
+
+	return 0;
+}
+
+static void autotune_close(struct rtdm_fd *fd)
+{
+	struct autotune_context *context;
+	struct gravity_tuner *tuner;
+
+	context = rtdm_fd_to_private(fd);
+	tuner = context->tuner;
+	if (tuner) {
+		if (context->setup.quiet <= 1)
+			printk(XENO_INFO "autotune finished [%Lui/%Luk/%Luu]\n",
+			       xnclock_ticks_to_ns(&nkclock,
+						   xnclock_get_gravity(&nkclock, irq)),
+			       xnclock_ticks_to_ns(&nkclock,
+						   xnclock_get_gravity(&nkclock, kernel)),
+			       xnclock_ticks_to_ns(&nkclock,
+						   xnclock_get_gravity(&nkclock, user)));
+		tuner->destroy_tuner(tuner);
+	}
+}
+
+static struct rtdm_driver autotune_driver = {
+	.profile_info		=	RTDM_PROFILE_INFO(autotune,
+							  RTDM_CLASS_AUTOTUNE,
+							  RTDM_SUBCLASS_AUTOTUNE,
+							  0),
+	.device_flags		=	RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE,
+	.device_count		=	1,
+	.context_size		=	sizeof(struct autotune_context),
+	.ops = {
+		.open		=	autotune_open,
+		.ioctl_rt	=	autotune_ioctl_rt,
+		.ioctl_nrt	=	autotune_ioctl_nrt,
+		.close		=	autotune_close,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &autotune_driver,
+	.label = "autotune",
+};
+
+static int __init autotune_init(void)
+{
+	return rtdm_dev_register(&device);
+}
+
+static void __exit autotune_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(autotune_init);
+module_exit(autotune_exit);
+++ linux-patched/drivers/xenomai/net/Kconfig	2022-03-21 12:58:30.909874139 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/Makefile	1970-01-01 01:00:00.000000000 +0100
+menu "RTnet"
+
+config XENO_DRIVERS_NET
+    depends on m
+    select NET
+    tristate "RTnet, TCP/IP socket interface"
+
+if XENO_DRIVERS_NET
+
+config XENO_DRIVERS_RTNET_CHECKED
+    bool "Internal Bug Checks"
+    default n
+    help
+    Switch on if you face crashes when RTnet is running or if you suspect
+    any other RTnet-related issues. This feature will add a few sanity
+    checks at critical points that will produce warnings on the kernel
+    console in case certain internal bugs are detected.
+
+source "drivers/xenomai/net/stack/Kconfig"
+source "drivers/xenomai/net/drivers/Kconfig"
+source "drivers/xenomai/net/addons/Kconfig"
+
+endif
+
+endmenu
+++ linux-patched/drivers/xenomai/net/Makefile	2022-03-21 12:58:30.901874217 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/drivers/xenomai/net/stack/socket.c	1970-01-01 01:00:00.000000000 +0100
+obj-$(CONFIG_XENO_DRIVERS_NET) += stack/ drivers/ addons/
+++ linux-patched/drivers/xenomai/net/stack/socket.c	2022-03-21 12:58:30.894874286 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtdev_mgr.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/socket.c - sockets implementation for rtnet
+ *
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/err.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <asm/bitops.h>
+
+#include <rtdm/net.h>
+#include <rtnet_internal.h>
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+#include <ipv4/protocol.h>
+
+#define SKB_POOL_CLOSED 0
+
+static unsigned int socket_rtskbs = DEFAULT_SOCKET_RTSKBS;
+module_param(socket_rtskbs, uint, 0444);
+MODULE_PARM_DESC(socket_rtskbs,
+		 "Default number of realtime socket buffers in socket pools");
+
+/************************************************************************
+ *  internal socket functions                                           *
+ ************************************************************************/
+
+int rt_bare_socket_init(struct rtdm_fd *fd, unsigned short protocol,
+			unsigned int priority, unsigned int pool_size)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int err;
+
+	err = rtskb_pool_init(&sock->skb_pool, pool_size, NULL, fd);
+	if (err < 0)
+		return err;
+
+	sock->protocol = protocol;
+	sock->priority = priority;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rt_bare_socket_init);
+
+/***
+ *  rt_socket_init - initialises a new socket structure
+ */
+int rt_socket_init(struct rtdm_fd *fd, unsigned short protocol)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	unsigned int pool_size;
+
+	sock->flags = 0;
+	sock->callback_func = NULL;
+
+	rtskb_queue_init(&sock->incoming);
+
+	sock->timeout = 0;
+
+	rtdm_lock_init(&sock->param_lock);
+	rtdm_sem_init(&sock->pending_sem, 0);
+
+	pool_size = rt_bare_socket_init(fd, protocol,
+					RTSKB_PRIO_VALUE(SOCK_DEF_PRIO,
+							 RTSKB_DEF_RT_CHANNEL),
+					socket_rtskbs);
+	sock->pool_size = pool_size;
+	mutex_init(&sock->pool_nrt_lock);
+
+	if (pool_size < socket_rtskbs) {
+		/* fix statistics */
+		if (pool_size == 0)
+			rtskb_pools--;
+
+		rt_socket_cleanup(fd);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt_socket_init);
+
+/***
+ *  rt_socket_cleanup - releases resources allocated for the socket
+ */
+void rt_socket_cleanup(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+
+	rtdm_sem_destroy(&sock->pending_sem);
+
+	mutex_lock(&sock->pool_nrt_lock);
+
+	set_bit(SKB_POOL_CLOSED, &sock->flags);
+
+	if (sock->pool_size > 0)
+		rtskb_pool_release(&sock->skb_pool);
+
+	mutex_unlock(&sock->pool_nrt_lock);
+}
+EXPORT_SYMBOL_GPL(rt_socket_cleanup);
+
+/***
+ *  rt_socket_common_ioctl
+ */
+int rt_socket_common_ioctl(struct rtdm_fd *fd, int request, void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int ret = 0;
+	struct rtnet_callback *callback;
+	const unsigned int *val;
+	unsigned int _val;
+	const nanosecs_rel_t *timeout;
+	nanosecs_rel_t _timeout;
+	rtdm_lockctx_t context;
+
+	switch (request) {
+	case RTNET_RTIOC_XMITPARAMS:
+		val = rtnet_get_arg(fd, &_val, arg, sizeof(_val));
+		if (IS_ERR(val))
+			return PTR_ERR(val);
+		sock->priority = *val;
+		break;
+
+	case RTNET_RTIOC_TIMEOUT:
+		timeout = rtnet_get_arg(fd, &_timeout, arg, sizeof(_timeout));
+		if (IS_ERR(timeout))
+			return PTR_ERR(timeout);
+		sock->timeout = *timeout;
+		break;
+
+	case RTNET_RTIOC_CALLBACK:
+		if (rtdm_fd_is_user(fd))
+			return -EACCES;
+
+		rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+		callback = arg;
+		sock->callback_func = callback->func;
+		sock->callback_arg = callback->arg;
+
+		rtdm_lock_put_irqrestore(&sock->param_lock, context);
+		break;
+
+	case RTNET_RTIOC_EXTPOOL:
+		val = rtnet_get_arg(fd, &_val, arg, sizeof(_val));
+		if (IS_ERR(val))
+			return PTR_ERR(val);
+
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		mutex_lock(&sock->pool_nrt_lock);
+
+		if (test_bit(SKB_POOL_CLOSED, &sock->flags)) {
+			mutex_unlock(&sock->pool_nrt_lock);
+			return -EBADF;
+		}
+		ret = rtskb_pool_extend(&sock->skb_pool, *val);
+		sock->pool_size += ret;
+
+		mutex_unlock(&sock->pool_nrt_lock);
+
+		if (ret == 0 && *val > 0)
+			ret = -ENOMEM;
+
+		break;
+
+	case RTNET_RTIOC_SHRPOOL:
+		val = rtnet_get_arg(fd, &_val, arg, sizeof(_val));
+		if (IS_ERR(val))
+			return PTR_ERR(val);
+
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		mutex_lock(&sock->pool_nrt_lock);
+
+		ret = rtskb_pool_shrink(&sock->skb_pool, *val);
+		sock->pool_size -= ret;
+
+		mutex_unlock(&sock->pool_nrt_lock);
+
+		if (ret == 0 && *val > 0)
+			ret = -EBUSY;
+
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rt_socket_common_ioctl);
+
+/***
+ *  rt_socket_if_ioctl
+ */
+int rt_socket_if_ioctl(struct rtdm_fd *fd, int request, void __user *arg)
+{
+	struct rtnet_device *rtdev;
+	struct ifreq _ifr, *ifr, *u_ifr;
+	struct sockaddr_in _sin;
+	struct ifconf _ifc, *ifc, *u_ifc;
+	int ret = 0, size = 0, i;
+	short flags;
+
+	if (request == SIOCGIFCONF) {
+		u_ifc = arg;
+		ifc = rtnet_get_arg(fd, &_ifc, u_ifc, sizeof(_ifc));
+		if (IS_ERR(ifc))
+			return PTR_ERR(ifc);
+
+		for (u_ifr = ifc->ifc_req, i = 1; i <= MAX_RT_DEVICES;
+		     i++, u_ifr++) {
+			rtdev = rtdev_get_by_index(i);
+			if (rtdev == NULL)
+				continue;
+
+			if ((rtdev->flags & IFF_UP) == 0) {
+				rtdev_dereference(rtdev);
+				continue;
+			}
+
+			size += sizeof(struct ifreq);
+			if (size > ifc->ifc_len) {
+				rtdev_dereference(rtdev);
+				size = ifc->ifc_len;
+				break;
+			}
+
+			ret = rtnet_put_arg(fd, u_ifr->ifr_name, rtdev->name,
+					    IFNAMSIZ);
+			if (ret == 0) {
+				memset(&_sin, 0, sizeof(_sin));
+				_sin.sin_family = AF_INET;
+				_sin.sin_addr.s_addr = rtdev->local_ip;
+				ret = rtnet_put_arg(fd, &u_ifr->ifr_addr, &_sin,
+						    sizeof(_sin));
+			}
+
+			rtdev_dereference(rtdev);
+			if (ret)
+				return ret;
+		}
+
+		return rtnet_put_arg(fd, &u_ifc->ifc_len, &size, sizeof(size));
+	}
+
+	u_ifr = arg;
+	ifr = rtnet_get_arg(fd, &_ifr, u_ifr, sizeof(_ifr));
+	if (IS_ERR(ifr))
+		return PTR_ERR(ifr);
+
+	if (request == SIOCGIFNAME) {
+		rtdev = rtdev_get_by_index(ifr->ifr_ifindex);
+		if (rtdev == NULL)
+			return -ENODEV;
+		ret = rtnet_put_arg(fd, u_ifr->ifr_name, rtdev->name, IFNAMSIZ);
+		goto out;
+	}
+
+	rtdev = rtdev_get_by_name(ifr->ifr_name);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	switch (request) {
+	case SIOCGIFINDEX:
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_ifindex, &rtdev->ifindex,
+				    sizeof(u_ifr->ifr_ifindex));
+		break;
+
+	case SIOCGIFFLAGS:
+		flags = rtdev->flags;
+		if ((ifr->ifr_flags & IFF_UP) &&
+		    (rtdev->link_state &
+		     (RTNET_LINK_STATE_PRESENT | RTNET_LINK_STATE_NOCARRIER)) ==
+			    RTNET_LINK_STATE_PRESENT)
+			flags |= IFF_RUNNING;
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_flags, &flags,
+				    sizeof(u_ifr->ifr_flags));
+		break;
+
+	case SIOCGIFHWADDR:
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_hwaddr.sa_data,
+				    rtdev->dev_addr, rtdev->addr_len);
+		if (!ret)
+			ret = rtnet_put_arg(
+				fd, &u_ifr->ifr_hwaddr.sa_family, &rtdev->type,
+				sizeof(u_ifr->ifr_hwaddr.sa_family));
+		break;
+
+	case SIOCGIFADDR:
+		memset(&_sin, 0, sizeof(_sin));
+		_sin.sin_family = AF_INET;
+		_sin.sin_addr.s_addr = rtdev->local_ip;
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_addr, &_sin, sizeof(_sin));
+		break;
+
+	case SIOCETHTOOL:
+		if (rtdev->do_ioctl != NULL) {
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+			ret = rtdev->do_ioctl(rtdev, ifr, request);
+		} else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15:
+		if (rtdev->do_ioctl != NULL)
+			ret = rtdev->do_ioctl(rtdev, ifr, request);
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+out:
+	rtdev_dereference(rtdev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rt_socket_if_ioctl);
+
+int rt_socket_select_bind(struct rtdm_fd *fd, rtdm_selector_t *selector,
+			  enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+
+	switch (type) {
+	case XNSELECT_READ:
+		return rtdm_sem_select(&sock->pending_sem, selector,
+				       XNSELECT_READ, fd_index);
+	default:
+		return -EBADF;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(rt_socket_select_bind);
+
+void *rtnet_get_arg(struct rtdm_fd *fd, void *tmp, const void *src, size_t len)
+{
+	int ret;
+
+	if (!rtdm_fd_is_user(fd))
+		return (void *)src;
+
+	ret = rtdm_copy_from_user(fd, tmp, src, len);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return tmp;
+}
+EXPORT_SYMBOL_GPL(rtnet_get_arg);
+
+int rtnet_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		if (dst != src)
+			memcpy(dst, src, len);
+		return 0;
+	}
+
+	return rtdm_copy_to_user(fd, dst, src, len);
+}
+EXPORT_SYMBOL_GPL(rtnet_put_arg);
+++ linux-patched/drivers/xenomai/net/stack/rtdev_mgr.c	2022-03-21 12:58:30.886874364 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/stack_mgr.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/rtdev_mgr.c - device error manager
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtnet_internal.h>
+
+/***
+ *  rtnetif_err_rx: will be called from the  driver
+ *
+ *
+ *  @rtdev - the network-device
+ */
+void rtnetif_err_rx(struct rtnet_device *rtdev)
+{
+}
+
+/***
+ *  rtnetif_err_tx: will be called from the  driver
+ *
+ *
+ *  @rtdev - the network-device
+ */
+void rtnetif_err_tx(struct rtnet_device *rtdev)
+{
+}
+
+/***
+ *  do_rtdev_task
+ */
+/*static void do_rtdev_task(int mgr_id)
+{
+    struct rtnet_msg msg;
+    struct rtnet_mgr *mgr = (struct rtnet_mgr *)mgr_id;
+
+    while (1) {
+        rt_mbx_receive(&(mgr->mbx), &msg, sizeof(struct rtnet_msg));
+        if (msg.rtdev) {
+            rt_printk("RTnet: error on rtdev %s\n", msg.rtdev->name);
+        }
+    }
+}*/
+
+/***
+ *  rt_rtdev_connect
+ */
+void rt_rtdev_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr)
+{
+	/*    rtdev->rtdev_mbx=&(mgr->mbx);*/
+}
+
+/***
+ *  rt_rtdev_disconnect
+ */
+void rt_rtdev_disconnect(struct rtnet_device *rtdev)
+{
+	/*    rtdev->rtdev_mbx=NULL;*/
+}
+
+/***
+ *  rt_rtdev_mgr_start
+ */
+int rt_rtdev_mgr_start(struct rtnet_mgr *mgr)
+{
+	return /*(rt_task_resume(&(mgr->task)))*/ 0;
+}
+
+/***
+ *  rt_rtdev_mgr_stop
+ */
+int rt_rtdev_mgr_stop(struct rtnet_mgr *mgr)
+{
+	return /*(rt_task_suspend(&(mgr->task)))*/ 0;
+}
+
+/***
+ *  rt_rtdev_mgr_init
+ */
+int rt_rtdev_mgr_init(struct rtnet_mgr *mgr)
+{
+	int ret = 0;
+
+	/*    if ( (ret=rt_mbx_init (&(mgr->mbx), sizeof(struct rtnet_msg))) )
+        return ret;
+    if ( (ret=rt_task_init(&(mgr->task), &do_rtdev_task, (int)mgr, 4096, RTNET_RTDEV_PRIORITY, 0, 0)) )
+        return ret;
+    if ( (ret=rt_task_resume(&(mgr->task))) )
+        return ret;*/
+
+	return (ret);
+}
+
+/***
+ *  rt_rtdev_mgr_delete
+ */
+void rt_rtdev_mgr_delete(struct rtnet_mgr *mgr)
+{
+	/*    rt_task_delete(&(mgr->task));
+    rt_mbx_delete(&(mgr->mbx));*/
+}
+
+EXPORT_SYMBOL_GPL(rtnetif_err_rx);
+EXPORT_SYMBOL_GPL(rtnetif_err_tx);
+
+EXPORT_SYMBOL_GPL(rt_rtdev_connect);
+EXPORT_SYMBOL_GPL(rt_rtdev_disconnect);
+++ linux-patched/drivers/xenomai/net/stack/stack_mgr.c	2022-03-21 12:58:30.879874432 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/stack_mgr.c - Stack-Manager
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *  Copyright (C) 2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *  Copyright (C) 2006 Jorge Almeida <j-almeida@criticalsoftware.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtskb_fifo.h>
+#include <stack_mgr.h>
+
+static unsigned int stack_mgr_prio = RTNET_DEF_STACK_PRIORITY;
+module_param(stack_mgr_prio, uint, 0444);
+MODULE_PARM_DESC(stack_mgr_prio, "Priority of the stack manager task");
+
+#if (CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE &                                    \
+     (CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE - 1)) != 0
+#error CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE must be power of 2!
+#endif
+static DECLARE_RTSKB_FIFO(rx, CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE);
+
+struct list_head rt_packets[RTPACKET_HASH_TBL_SIZE];
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+struct list_head rt_packets_all;
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+DEFINE_RTDM_LOCK(rt_packets_lock);
+
+/***
+ *  rtdev_add_pack:         add protocol (Layer 3)
+ *  @pt:                    the new protocol
+ */
+int __rtdev_add_pack(struct rtpacket_type *pt, struct module *module)
+{
+	int ret = 0;
+	rtdm_lockctx_t context;
+
+	INIT_LIST_HEAD(&pt->list_entry);
+	pt->refcount = 0;
+	if (pt->trylock == NULL)
+		pt->trylock = rtdev_lock_pack;
+	if (pt->unlock == NULL)
+		pt->unlock = rtdev_unlock_pack;
+	pt->owner = module;
+
+	rtdm_lock_get_irqsave(&rt_packets_lock, context);
+
+	if (pt->type == htons(ETH_P_ALL))
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+		list_add_tail(&pt->list_entry, &rt_packets_all);
+#else /* !CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+		ret = -EINVAL;
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+	else
+		list_add_tail(
+			&pt->list_entry,
+			&rt_packets[ntohs(pt->type) & RTPACKET_HASH_KEY_MASK]);
+
+	rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(__rtdev_add_pack);
+
+/***
+ *  rtdev_remove_pack:  remove protocol (Layer 3)
+ *  @pt:                protocol
+ */
+void rtdev_remove_pack(struct rtpacket_type *pt)
+{
+	rtdm_lockctx_t context;
+
+	RTNET_ASSERT(pt != NULL, return;);
+
+	rtdm_lock_get_irqsave(&rt_packets_lock, context);
+	list_del(&pt->list_entry);
+	rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+}
+
+EXPORT_SYMBOL_GPL(rtdev_remove_pack);
+
+/***
+ *  rtnetif_rx: will be called from the driver interrupt handler
+ *  (IRQs disabled!) and send a message to rtdev-owned stack-manager
+ *
+ *  @skb - the packet
+ */
+void rtnetif_rx(struct rtskb *skb)
+{
+	RTNET_ASSERT(skb != NULL, return;);
+	RTNET_ASSERT(skb->rtdev != NULL, return;);
+
+	if (unlikely(rtskb_fifo_insert_inirq(&rx.fifo, skb) < 0)) {
+		rtdm_printk("RTnet: dropping packet in %s()\n", __FUNCTION__);
+		kfree_rtskb(skb);
+	}
+}
+
+EXPORT_SYMBOL_GPL(rtnetif_rx);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK)
+#define __DELIVER_PREFIX
+#else /* !CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+#define __DELIVER_PREFIX static inline
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+
+__DELIVER_PREFIX void rt_stack_deliver(struct rtskb *rtskb)
+{
+	unsigned short hash;
+	struct rtpacket_type *pt_entry;
+	rtdm_lockctx_t context;
+	struct rtnet_device *rtdev = rtskb->rtdev;
+	int err;
+	int eth_p_all_hit = 0;
+
+	rtcap_report_incoming(rtskb);
+
+	rtskb->nh.raw = rtskb->data;
+
+	rtdm_lock_get_irqsave(&rt_packets_lock, context);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+	eth_p_all_hit = 0;
+	list_for_each_entry (pt_entry, &rt_packets_all, list_entry) {
+		if (!pt_entry->trylock(pt_entry))
+			continue;
+		rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+		pt_entry->handler(rtskb, pt_entry);
+
+		rtdm_lock_get_irqsave(&rt_packets_lock, context);
+		pt_entry->unlock(pt_entry);
+		eth_p_all_hit = 1;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+
+	hash = ntohs(rtskb->protocol) & RTPACKET_HASH_KEY_MASK;
+
+	list_for_each_entry (pt_entry, &rt_packets[hash], list_entry)
+		if (pt_entry->type == rtskb->protocol) {
+			if (!pt_entry->trylock(pt_entry))
+				continue;
+			rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+			err = pt_entry->handler(rtskb, pt_entry);
+
+			rtdm_lock_get_irqsave(&rt_packets_lock, context);
+			pt_entry->unlock(pt_entry);
+
+			if (likely(!err)) {
+				rtdm_lock_put_irqrestore(&rt_packets_lock,
+							 context);
+				return;
+			}
+		}
+
+	rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+	/* Don't warn if ETH_P_ALL listener were present or when running in
+       promiscuous mode (RTcap). */
+	if (unlikely(!eth_p_all_hit && !(rtdev->flags & IFF_PROMISC)))
+		rtdm_printk("RTnet: no one cared for packet with layer 3 "
+			    "protocol type 0x%04x\n",
+			    ntohs(rtskb->protocol));
+
+	kfree_rtskb(rtskb);
+}
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK)
+EXPORT_SYMBOL_GPL(rt_stack_deliver);
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+
+static void rt_stack_mgr_task(void *arg)
+{
+	rtdm_event_t *mgr_event = &((struct rtnet_mgr *)arg)->event;
+	struct rtskb *rtskb;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(mgr_event) < 0)
+			break;
+
+		/* we are the only reader => no locking required */
+		while ((rtskb = __rtskb_fifo_remove(&rx.fifo)))
+			rt_stack_deliver(rtskb);
+	}
+}
+
+/***
+ *  rt_stack_connect
+ */
+void rt_stack_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr)
+{
+	rtdev->stack_event = &mgr->event;
+}
+
+EXPORT_SYMBOL_GPL(rt_stack_connect);
+
+/***
+ *  rt_stack_disconnect
+ */
+void rt_stack_disconnect(struct rtnet_device *rtdev)
+{
+	rtdev->stack_event = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rt_stack_disconnect);
+
+/***
+ *  rt_stack_mgr_init
+ */
+int rt_stack_mgr_init(struct rtnet_mgr *mgr)
+{
+	int i;
+
+	rtskb_fifo_init(&rx.fifo, CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE);
+
+	for (i = 0; i < RTPACKET_HASH_TBL_SIZE; i++)
+		INIT_LIST_HEAD(&rt_packets[i]);
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+	INIT_LIST_HEAD(&rt_packets_all);
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+
+	rtdm_event_init(&mgr->event, 0);
+
+	return rtdm_task_init(&mgr->task, "rtnet-stack", rt_stack_mgr_task, mgr,
+			      stack_mgr_prio, 0);
+}
+
+/***
+ *  rt_stack_mgr_delete
+ */
+void rt_stack_mgr_delete(struct rtnet_mgr *mgr)
+{
+	rtdm_event_destroy(&mgr->event);
+	rtdm_task_destroy(&mgr->task);
+}
+++ linux-patched/drivers/xenomai/net/stack/Kconfig	2022-03-21 12:58:30.872874500 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/ip_fragment.c	1970-01-01 01:00:00.000000000 +0100
+menu "Protocol Stack"
+    depends on XENO_DRIVERS_NET
+
+comment "Stack parameters"
+
+config XENO_DRIVERS_NET_RX_FIFO_SIZE
+    int "Size of central RX-FIFO"
+    depends on XENO_DRIVERS_NET
+    default 32
+    help
+    Size of FIFO between NICs and stack manager task. Must be power
+    of two! Effectively, only CONFIG_RTNET_RX_FIFO_SIZE-1 slots will
+    be usable.
+
+config XENO_DRIVERS_NET_ETH_P_ALL
+    depends on XENO_DRIVERS_NET
+    bool "Support for ETH_P_ALL"
+    help
+    Enables core support for registering listeners on all layer 3
+    protocols (ETH_P_ALL). Internally this is currently realised by
+    clone-copying incoming frames for those listners, future versions
+    will implement buffer sharing for efficiency reasons. Use with
+    care, every ETH_P_ALL-listener adds noticable overhead to the
+    reception path.
+
+config XENO_DRIVERS_NET_RTWLAN
+    depends on XENO_DRIVERS_NET
+    bool "Real-Time WLAN"
+    help
+    Enables core support for real-time wireless LAN. RT-WLAN is based
+    on low-level access to 802.11-compliant adapters and is currently
+    in an experimental stage.
+
+comment "Protocols"
+
+source "drivers/xenomai/net/stack/ipv4/Kconfig"
+source "drivers/xenomai/net/stack/packet/Kconfig"
+source "drivers/xenomai/net/stack/rtmac/Kconfig"
+source "drivers/xenomai/net/stack/rtcfg/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/net/stack/ipv4/ip_fragment.c	2022-03-21 12:58:30.864874578 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/protocol.c	1970-01-01 01:00:00.000000000 +0100
+/* ip_fragment.c
+ *
+ * Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003      Mathias Koehrer <mathias_koehrer@yahoo.de>
+ *               2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_socket.h>
+
+#include <linux/ip.h>
+#include <linux/in.h>
+
+#include <ipv4/ip_fragment.h>
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+#include <ipv4/ip_input.h>
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+
+/*
+ * This defined sets the number of incoming fragmented IP messages that
+ * can be handled in parallel.
+ */
+#define COLLECTOR_COUNT 10
+
+struct ip_collector {
+	int in_use;
+	__u32 saddr;
+	__u32 daddr;
+	__u16 id;
+	__u8 protocol;
+
+	struct rtskb_queue frags;
+	struct rtsocket *sock;
+	unsigned int buf_size;
+};
+
+static struct ip_collector collector[COLLECTOR_COUNT];
+
+static void alloc_collector(struct rtskb *skb, struct rtsocket *sock)
+{
+	int i;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+	struct iphdr *iph = skb->nh.iph;
+
+	/*
+     * Find a free collector
+     *
+     * Note: We once used to clean up probably outdated chains, but the
+     * algorithm was not stable enough and could cause incorrect drops even
+     * under medium load. If we run in overload, we will loose data anyhow.
+     * What we should do in the future is to account collectors per socket or
+     * socket owner and set quotations.
+     * Garbage collection is now performed only on socket close.
+     */
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if (!p_coll->in_use) {
+			p_coll->in_use = 1;
+			p_coll->buf_size = skb->len;
+			p_coll->frags.first = skb;
+			p_coll->frags.last = skb;
+			p_coll->saddr = iph->saddr;
+			p_coll->daddr = iph->daddr;
+			p_coll->id = iph->id;
+			p_coll->protocol = iph->protocol;
+			p_coll->sock = sock;
+
+			rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+
+			return;
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+
+	rtdm_printk("RTnet: IP fragmentation - no collector available\n");
+	kfree_rtskb(skb);
+}
+
+/*
+ * Return a pointer to the collector that holds the message which
+ * fits to the iphdr of the passed rtskb.
+ * */
+static struct rtskb *add_to_collector(struct rtskb *skb, unsigned int offset,
+				      int more_frags)
+{
+	int i, err;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+	struct iphdr *iph = skb->nh.iph;
+	struct rtskb *first_skb;
+
+	/* Search in existing collectors */
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if (p_coll->in_use && (iph->saddr == p_coll->saddr) &&
+		    (iph->daddr == p_coll->daddr) && (iph->id == p_coll->id) &&
+		    (iph->protocol == p_coll->protocol)) {
+			first_skb = p_coll->frags.first;
+
+			/* Acquire the rtskb at the expense of the protocol pool */
+			if (rtskb_acquire(skb, &p_coll->sock->skb_pool) != 0) {
+				/* We have to drop this fragment => clean up the whole chain */
+				p_coll->in_use = 0;
+
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+
+#ifdef FRAG_DBG
+				rtdm_printk(
+					"RTnet: Compensation pool empty - IP fragments "
+					"dropped (saddr:%x, daddr:%x)\n",
+					iph->saddr, iph->daddr);
+#endif
+
+				kfree_rtskb(first_skb);
+				kfree_rtskb(skb);
+				return NULL;
+			}
+
+			/* Optimized version of __rtskb_queue_tail */
+			skb->next = NULL;
+			p_coll->frags.last->next = skb;
+			p_coll->frags.last = skb;
+
+			/* Extend the chain */
+			first_skb->chain_end = skb;
+
+			/* Sanity check: unordered fragments are not allowed! */
+			if (offset != p_coll->buf_size) {
+				/* We have to drop this fragment => clean up the whole chain */
+				p_coll->in_use = 0;
+				skb = first_skb;
+
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+				break; /* leave the for loop */
+			}
+
+			p_coll->buf_size += skb->len;
+
+			if (!more_frags) {
+				p_coll->in_use = 0;
+
+				err = rt_socket_reference(p_coll->sock);
+
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+
+				if (err < 0) {
+					kfree_rtskb(first_skb);
+					return NULL;
+				}
+
+				return first_skb;
+			} else {
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+				return NULL;
+			}
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+	if (rt_ip_fallback_handler) {
+		__rtskb_push(skb, iph->ihl * 4);
+		rt_ip_fallback_handler(skb);
+		return NULL;
+	}
+#endif
+
+#ifdef FRAG_DBG
+	rtdm_printk("RTnet: Unordered IP fragment (saddr:%x, daddr:%x)"
+		    " - dropped\n",
+		    iph->saddr, iph->daddr);
+#endif
+
+	kfree_rtskb(skb);
+	return NULL;
+}
+
+/*
+ * Cleans up all collectors referring to the specified socket.
+ * This is now the only kind of garbage collection we do.
+ */
+void rt_ip_frag_invalidate_socket(struct rtsocket *sock)
+{
+	int i;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if ((p_coll->in_use) && (p_coll->sock == sock)) {
+			p_coll->in_use = 0;
+			kfree_rtskb(p_coll->frags.first);
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+}
+EXPORT_SYMBOL_GPL(rt_ip_frag_invalidate_socket);
+
+/*
+ * Cleans up all existing collectors
+ */
+static void cleanup_all_collectors(void)
+{
+	int i;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if (p_coll->in_use) {
+			p_coll->in_use = 0;
+			kfree_rtskb(p_coll->frags.first);
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+}
+
+/*
+ * This function returns an rtskb that contains the complete, accumulated IP message.
+ * If not all fragments of the IP message have been received yet, it returns NULL
+ * Note: the IP header must have already been pulled from the rtskb!
+ * */
+struct rtskb *rt_ip_defrag(struct rtskb *skb, struct rtinet_protocol *ipprot)
+{
+	unsigned int more_frags;
+	unsigned int offset;
+	struct rtsocket *sock;
+	struct iphdr *iph = skb->nh.iph;
+	int ret;
+
+	/* Parse the IP header */
+	offset = ntohs(iph->frag_off);
+	more_frags = offset & IP_MF;
+	offset &= IP_OFFSET;
+	offset <<= 3; /* offset is in 8-byte chunks */
+
+	/* First fragment? */
+	if (offset == 0) {
+		/* Get the destination socket */
+		if ((sock = ipprot->dest_socket(skb)) == NULL) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+			if (rt_ip_fallback_handler) {
+				__rtskb_push(skb, iph->ihl * 4);
+				rt_ip_fallback_handler(skb);
+				return NULL;
+			}
+#endif
+			/* Drop the rtskb */
+			kfree_rtskb(skb);
+			return NULL;
+		}
+
+		/* Acquire the rtskb, to unlock the device skb pool */
+		ret = rtskb_acquire(skb, &sock->skb_pool);
+
+		if (ret != 0) {
+			/* Drop the rtskb */
+			kfree_rtskb(skb);
+		} else {
+			/* Allocates a new collector */
+			alloc_collector(skb, sock);
+		}
+
+		/* Packet is queued or freed, socket can be released */
+		rt_socket_dereference(sock);
+
+		return NULL;
+	} else {
+		/* Add to an existing collector */
+		return add_to_collector(skb, offset, more_frags);
+	}
+}
+
+int __init rt_ip_fragment_init(void)
+{
+	int i;
+
+	/* Probably not needed (static variable...) */
+	memset(collector, 0, sizeof(collector));
+
+	for (i = 0; i < COLLECTOR_COUNT; i++)
+		rtdm_lock_init(&collector[i].frags.lock);
+
+	return 0;
+}
+
+void rt_ip_fragment_cleanup(void)
+{
+	cleanup_all_collectors();
+}
+++ linux-patched/drivers/xenomai/net/stack/ipv4/protocol.c	2022-03-21 12:58:30.857874646 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/af_inet.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/protocol.c
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+
+#include <rtnet_socket.h>
+#include <ipv4/protocol.h>
+
+struct rtinet_protocol *rt_inet_protocols[MAX_RT_INET_PROTOCOLS];
+
+/***
+ * rt_inet_add_protocol
+ */
+void rt_inet_add_protocol(struct rtinet_protocol *prot)
+{
+	unsigned char hash = rt_inet_hashkey(prot->protocol);
+
+	if (rt_inet_protocols[hash] == NULL)
+		rt_inet_protocols[hash] = prot;
+}
+EXPORT_SYMBOL_GPL(rt_inet_add_protocol);
+
+/***
+ * rt_inet_del_protocol
+ */
+void rt_inet_del_protocol(struct rtinet_protocol *prot)
+{
+	unsigned char hash = rt_inet_hashkey(prot->protocol);
+
+	if (prot == rt_inet_protocols[hash])
+		rt_inet_protocols[hash] = NULL;
+}
+EXPORT_SYMBOL_GPL(rt_inet_del_protocol);
+
+/***
+ * rt_inet_socket - initialize an Internet socket
+ * @sock: socket structure
+ * @protocol: protocol id
+ */
+int rt_inet_socket(struct rtdm_fd *fd, int protocol)
+{
+	struct rtinet_protocol *prot;
+
+	if (protocol == 0)
+		switch (rtdm_fd_to_context(fd)->device->driver->socket_type) {
+		case SOCK_DGRAM:
+			protocol = IPPROTO_UDP;
+			break;
+		case SOCK_STREAM:
+			protocol = IPPROTO_TCP;
+			break;
+		}
+
+	prot = rt_inet_protocols[rt_inet_hashkey(protocol)];
+
+	/* create the socket (call the socket creator) */
+	if ((prot != NULL) && (prot->protocol == protocol))
+		return prot->init_socket(fd);
+	else {
+		rtdm_printk("RTnet: protocol with id %d not found\n", protocol);
+
+		return -ENOPROTOOPT;
+	}
+}
+EXPORT_SYMBOL_GPL(rt_inet_socket);
+++ linux-patched/drivers/xenomai/net/stack/ipv4/af_inet.c	2022-03-21 12:58:30.850874714 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/af_inet.c
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <ipv4_chrdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_rtpc.h>
+#include <ipv4/arp.h>
+#include <ipv4/icmp.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+
+MODULE_LICENSE("GPL");
+
+struct route_solicit_params {
+	struct rtnet_device *rtdev;
+	__u32 ip_addr;
+};
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_directory ipv4_proc_root;
+EXPORT_SYMBOL_GPL(ipv4_proc_root);
+#endif
+
+static int route_solicit_handler(struct rt_proc_call *call)
+{
+	struct route_solicit_params *param;
+	struct rtnet_device *rtdev;
+
+	param = rtpc_get_priv(call, struct route_solicit_params);
+	rtdev = param->rtdev;
+
+	if ((rtdev->flags & IFF_UP) == 0)
+		return -ENODEV;
+
+	rt_arp_solicit(rtdev, param->ip_addr);
+
+	return 0;
+}
+
+static void cleanup_route_solicit(void *priv_data)
+{
+	struct route_solicit_params *param;
+
+	param = (struct route_solicit_params *)priv_data;
+	rtdev_dereference(param->rtdev);
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP
+static int ping_handler(struct rt_proc_call *call)
+{
+	struct ipv4_cmd *cmd;
+	int err;
+
+	cmd = rtpc_get_priv(call, struct ipv4_cmd);
+
+	rt_icmp_queue_echo_request(call);
+
+	err = rt_icmp_send_echo(cmd->args.ping.ip_addr, cmd->args.ping.id,
+				cmd->args.ping.sequence,
+				cmd->args.ping.msg_size);
+	if (err < 0) {
+		rt_icmp_dequeue_echo_request(call);
+		return err;
+	}
+
+	return -CALL_PENDING;
+}
+
+static void ping_complete_handler(struct rt_proc_call *call, void *priv_data)
+{
+	struct ipv4_cmd *cmd;
+	struct ipv4_cmd *usr_cmd = (struct ipv4_cmd *)priv_data;
+
+	if (rtpc_get_result(call) < 0)
+		return;
+
+	cmd = rtpc_get_priv(call, struct ipv4_cmd);
+	usr_cmd->args.ping.ip_addr = cmd->args.ping.ip_addr;
+	usr_cmd->args.ping.rtt = cmd->args.ping.rtt;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+
+static int ipv4_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		      unsigned long arg)
+{
+	struct ipv4_cmd cmd;
+	struct route_solicit_params params;
+	int ret;
+
+	ret = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+	if (ret != 0)
+		return -EFAULT;
+
+	switch (request) {
+	case IOC_RT_HOST_ROUTE_ADD:
+		if (mutex_lock_interruptible(&rtdev->nrt_lock))
+			return -ERESTARTSYS;
+
+		ret = rt_ip_route_add_host(cmd.args.addhost.ip_addr,
+					   cmd.args.addhost.dev_addr, rtdev);
+
+		mutex_unlock(&rtdev->nrt_lock);
+		break;
+
+	case IOC_RT_HOST_ROUTE_SOLICIT:
+		if (mutex_lock_interruptible(&rtdev->nrt_lock))
+			return -ERESTARTSYS;
+
+		if (!rtdev_reference(rtdev)) {
+			mutex_unlock(&rtdev->nrt_lock);
+			return -EIDRM;
+		}
+
+		params.rtdev = rtdev;
+		params.ip_addr = cmd.args.solicit.ip_addr;
+
+		/* We need the rtpc wrapping because rt_arp_solicit can block on a
+	     * real-time lock in the NIC's xmit routine. */
+		ret = rtpc_dispatch_call(route_solicit_handler, 0, &params,
+					 sizeof(params), NULL,
+					 cleanup_route_solicit);
+
+		mutex_unlock(&rtdev->nrt_lock);
+		break;
+
+	case IOC_RT_HOST_ROUTE_DELETE:
+	case IOC_RT_HOST_ROUTE_DELETE_DEV:
+		ret = rt_ip_route_del_host(cmd.args.delhost.ip_addr, rtdev);
+		break;
+
+	case IOC_RT_HOST_ROUTE_GET:
+	case IOC_RT_HOST_ROUTE_GET_DEV:
+		ret = rt_ip_route_get_host(cmd.args.gethost.ip_addr,
+					   cmd.head.if_name,
+					   cmd.args.gethost.dev_addr, rtdev);
+		if (ret >= 0) {
+			if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+				ret = -EFAULT;
+		}
+		break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	case IOC_RT_NET_ROUTE_ADD:
+		ret = rt_ip_route_add_net(cmd.args.addnet.net_addr,
+					  cmd.args.addnet.net_mask,
+					  cmd.args.addnet.gw_addr);
+		break;
+
+	case IOC_RT_NET_ROUTE_DELETE:
+		ret = rt_ip_route_del_net(cmd.args.delnet.net_addr,
+					  cmd.args.delnet.net_mask);
+		break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP
+	case IOC_RT_PING:
+		ret = rtpc_dispatch_call(ping_handler, cmd.args.ping.timeout,
+					 &cmd, sizeof(cmd),
+					 ping_complete_handler, NULL);
+		if (ret >= 0) {
+			if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+				ret = -EFAULT;
+		}
+		if (ret < 0)
+			rt_icmp_cleanup_echo_requests();
+		break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+unsigned long rt_inet_aton(const char *ip)
+{
+	int p, n, c;
+	union {
+		unsigned long l;
+		char c[4];
+	} u;
+	p = n = 0;
+	while ((c = *ip++)) {
+		if (c != '.') {
+			n = n * 10 + c - '0';
+		} else {
+			if (n > 0xFF) {
+				return 0;
+			}
+			u.c[p++] = n;
+			n = 0;
+		}
+	}
+	u.c[3] = n;
+	return u.l;
+}
+
+static void rt_ip_ifup(struct rtnet_device *rtdev,
+		       struct rtnet_core_cmd *up_cmd)
+{
+	struct rtnet_device *tmp;
+	int i;
+
+	rt_ip_route_del_all(rtdev); /* cleanup routing table */
+
+	if (up_cmd->args.up.ip_addr != 0xFFFFFFFF) {
+		rtdev->local_ip = up_cmd->args.up.ip_addr;
+		rtdev->broadcast_ip = up_cmd->args.up.broadcast_ip;
+	}
+
+	if (rtdev->local_ip != 0) {
+		if (rtdev->flags & IFF_LOOPBACK) {
+			for (i = 0; i < MAX_RT_DEVICES; i++)
+				if ((tmp = rtdev_get_by_index(i)) != NULL) {
+					rt_ip_route_add_host(tmp->local_ip,
+							     rtdev->dev_addr,
+							     rtdev);
+					rtdev_dereference(tmp);
+				}
+		} else if ((tmp = rtdev_get_loopback()) != NULL) {
+			rt_ip_route_add_host(rtdev->local_ip, tmp->dev_addr,
+					     tmp);
+			rtdev_dereference(tmp);
+		}
+
+		if (rtdev->flags & IFF_BROADCAST)
+			rt_ip_route_add_host(up_cmd->args.up.broadcast_ip,
+					     rtdev->broadcast, rtdev);
+	}
+}
+
+static void rt_ip_ifdown(struct rtnet_device *rtdev)
+{
+	rt_ip_route_del_all(rtdev);
+}
+
+static struct rtdev_event_hook rtdev_hook = { .unregister_device = rt_ip_ifdown,
+					      .ifup = rt_ip_ifup,
+					      .ifdown = rt_ip_ifdown };
+
+static struct rtnet_ioctls ipv4_ioctls = { .service_name = "IPv4",
+					   .ioctl_type = RTNET_IOC_TYPE_IPV4,
+					   .handler = ipv4_ioctl };
+
+static int __init rt_ipv4_proto_init(void)
+{
+	int i;
+	int result;
+
+	/* Network-Layer */
+	rt_ip_init();
+	rt_arp_init();
+
+	/* Transport-Layer */
+	for (i = 0; i < MAX_RT_INET_PROTOCOLS; i++)
+		rt_inet_protocols[i] = NULL;
+
+	rt_icmp_init();
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	result = xnvfile_init_dir("ipv4", &ipv4_proc_root, &rtnet_proc_root);
+	if (result < 0)
+		goto err1;
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	if ((result = rt_ip_routing_init()) < 0)
+		goto err2;
+	if ((result = rtnet_register_ioctls(&ipv4_ioctls)) < 0)
+		goto err3;
+
+	rtdev_add_event_hook(&rtdev_hook);
+
+	return 0;
+
+err3:
+	rt_ip_routing_release();
+
+err2:
+#ifdef CONFIG_XENO_OPT_VFILE
+	xnvfile_destroy_dir(&ipv4_proc_root);
+err1:
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	rt_icmp_release();
+	rt_arp_release();
+	rt_ip_release();
+
+	return result;
+}
+
+static void __exit rt_ipv4_proto_release(void)
+{
+	rt_ip_release();
+
+	rtdev_del_event_hook(&rtdev_hook);
+	rtnet_unregister_ioctls(&ipv4_ioctls);
+	rt_ip_routing_release();
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	xnvfile_destroy_dir(&ipv4_proc_root);
+#endif
+
+	/* Transport-Layer */
+	rt_icmp_release();
+
+	/* Network-Layer */
+	rt_arp_release();
+}
+
+module_init(rt_ipv4_proto_init);
+module_exit(rt_ipv4_proto_release);
+
+EXPORT_SYMBOL_GPL(rt_inet_aton);
+++ linux-patched/drivers/xenomai/net/stack/ipv4/Kconfig	2022-03-21 12:58:30.842874793 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/route.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_RTIPV4
+    depends on XENO_DRIVERS_NET
+    tristate "Real-Time IPv4"
+    default y
+    help
+    Enables the real-time capable IPv4 support of RTnet. The protocol is
+    implemented as a separate module. Supplementing tools (rtroute,
+    rtping) and examples are provided as well. Moreover, RTcfg will
+    include IPv4 support when this option is switched on.
+
+    For further information see also Documentation/README.routing and
+    Documentation/README.ipfragmentation.
+
+config XENO_DRIVERS_NET_RTIPV4_ICMP
+    bool "ICMP support"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default y
+    help
+    Enables ICMP support of the RTnet Real-Time IPv4 protocol.
+
+    When the RTnet-Proxy is enabled while this feature is disabled, ICMP
+    will be forwarded to the Linux network stack.
+
+config XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES
+    int "Maximum host routing table entries"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default 32
+    help
+    Each IPv4 supporting interface and each remote host that is directly
+    reachable via via some output interface requires a host routing table
+    entry. If you run larger networks with may hosts per subnet, you may
+    have to increase this limit. Must be power of 2!
+
+config XENO_DRIVERS_NET_RTIPV4_NETROUTING
+    bool "IP Network Routing"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    help
+    Enables routing across IPv4 real-time networks. You will only require
+    this feature in complex networks, while switching it off for flat,
+    single-segment networks improves code size and the worst-case routing
+    decision delay.
+
+    See Documentation/README.routing for further information.
+
+config XENO_DRIVERS_NET_RTIPV4_NET_ROUTES
+    int "Maximum network routing table entries"
+    depends on XENO_DRIVERS_NET_RTIPV4_NETROUTING
+    default 16
+    help
+    Each route describing a target network reachable via a router
+    requires an entry in the network routing table. If you run very
+    complex realtime networks, you may have to increase this limit. Must
+    be power of 2!
+
+config XENO_DRIVERS_NET_RTIPV4_ROUTER
+    bool "IP Router"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    help
+    When switched on, the RTnet station will be able to forward IPv4
+    packets that are not directed to the station itself. Typically used in
+    combination with CONFIG_RTNET_RTIPV4_NETROUTING.
+
+    See Documentation/README.routing for further information.
+
+config XENO_DRIVERS_NET_RTIPV4_DEBUG
+    bool "RTipv4 Debugging"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default n
+    
+    help
+    Enables debug message output of the RTipv4 layer. Typically, you
+    may want to turn this on for tracing issues in packet delivery.
+
+source "drivers/xenomai/net/stack/ipv4/udp/Kconfig"
+source "drivers/xenomai/net/stack/ipv4/tcp/Kconfig"
+++ linux-patched/drivers/xenomai/net/stack/ipv4/route.c	2022-03-21 12:58:30.835874861 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/route.c - real-time routing
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  Rewritten version of the original route by David Schleef and Ulrich Marx
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <net/ip.h>
+
+#include <rtnet_internal.h>
+#include <rtnet_port.h>
+#include <rtnet_chrdev.h>
+#include <ipv4/af_inet.h>
+#include <ipv4/route.h>
+
+/* FIXME: should also become some tunable parameter */
+#define ROUTER_FORWARD_PRIO                                                    \
+	RTSKB_PRIO_VALUE(QUEUE_MAX_PRIO +                                      \
+				 (QUEUE_MIN_PRIO - QUEUE_MAX_PRIO + 1) / 2,    \
+			 RTSKB_DEF_RT_CHANNEL)
+
+/* First-level routing: explicite host routes */
+struct host_route {
+	struct host_route *next;
+	struct dest_route dest_host;
+};
+
+/* Second-level routing: routes to other networks */
+struct net_route {
+	struct net_route *next;
+	u32 dest_net_ip;
+	u32 dest_net_mask;
+	u32 gw_ip;
+};
+
+#if (CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES &                              \
+     (CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES - 1))
+#error CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES must be power of 2
+#endif
+#if CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES < 256
+#define HOST_HASH_TBL_SIZE 64
+#else
+#define HOST_HASH_TBL_SIZE                                                     \
+	((CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES / 256) * 64)
+#endif
+#define HOST_HASH_KEY_MASK (HOST_HASH_TBL_SIZE - 1)
+
+static struct host_route host_routes[CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES];
+static struct host_route *free_host_route;
+static int allocated_host_routes;
+static struct host_route *host_hash_tbl[HOST_HASH_TBL_SIZE];
+static DEFINE_RTDM_LOCK(host_table_lock);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+#if (CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES &                               \
+     (CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES - 1))
+#error CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES must be power of 2
+#endif
+#if CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES < 256
+#define NET_HASH_TBL_SIZE 64
+#else
+#define NET_HASH_TBL_SIZE                                                      \
+	((CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES / 256) * 64)
+#endif
+#define NET_HASH_KEY_MASK (NET_HASH_TBL_SIZE - 1)
+#define NET_HASH_KEY_SHIFT 8
+
+static struct net_route net_routes[CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES];
+static struct net_route *free_net_route;
+static int allocated_net_routes;
+static struct net_route *net_hash_tbl[NET_HASH_TBL_SIZE + 1];
+static unsigned int net_hash_key_shift = NET_HASH_KEY_SHIFT;
+static DEFINE_RTDM_LOCK(net_table_lock);
+
+module_param(net_hash_key_shift, uint, 0444);
+MODULE_PARM_DESC(net_hash_key_shift, "destination right shift for "
+				     "network hash key (default: 8)");
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+/***
+ *  proc filesystem section
+ */
+#ifdef CONFIG_XENO_OPT_VFILE
+static int rtnet_ipv4_route_show(struct xnvfile_regular_iterator *it, void *d)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	u32 mask;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	xnvfile_printf(it,
+		       "Host routes allocated/total:\t%d/%d\n"
+		       "Host hash table size:\t\t%d\n",
+		       allocated_host_routes,
+		       CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES,
+		       HOST_HASH_TBL_SIZE);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	mask = NET_HASH_KEY_MASK << net_hash_key_shift;
+	xnvfile_printf(it,
+		       "Network routes allocated/total:\t%d/%d\n"
+		       "Network hash table size:\t%d\n"
+		       "Network hash key shift/mask:\t%d/%08X\n",
+		       allocated_net_routes,
+		       CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES,
+		       NET_HASH_TBL_SIZE, net_hash_key_shift, mask);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+	xnvfile_printf(it, "IP Router:\t\t\tyes\n");
+#else
+	xnvfile_printf(it, "IP Router:\t\t\tno\n");
+#endif
+
+	return 0;
+}
+
+static int rtnet_ipv4_module_lock(struct xnvfile *vfile)
+{
+	bool res = try_module_get(THIS_MODULE);
+	if (!res)
+		return -EIDRM;
+
+	return 0;
+}
+
+static void rtnet_ipv4_module_unlock(struct xnvfile *vfile)
+{
+	module_put(THIS_MODULE);
+}
+
+static struct xnvfile_lock_ops rtnet_ipv4_module_lock_ops = {
+	.get = rtnet_ipv4_module_lock,
+	.put = rtnet_ipv4_module_unlock,
+};
+
+static struct xnvfile_regular_ops rtnet_ipv4_route_vfile_ops = {
+	.show = rtnet_ipv4_route_show,
+};
+
+static struct xnvfile_regular rtnet_ipv4_route_vfile = {
+    .entry = {
+	.lockops = &rtnet_ipv4_module_lock_ops,
+    },
+    .ops = &rtnet_ipv4_route_vfile_ops,
+};
+
+static rtdm_lockctx_t rtnet_ipv4_host_route_lock_ctx;
+
+static int rtnet_ipv4_host_route_lock(struct xnvfile *vfile)
+{
+	rtdm_lock_get_irqsave(&host_table_lock, rtnet_ipv4_host_route_lock_ctx);
+	return 0;
+}
+
+static void rtnet_ipv4_host_route_unlock(struct xnvfile *vfile)
+{
+	rtdm_lock_put_irqrestore(&host_table_lock,
+				 rtnet_ipv4_host_route_lock_ctx);
+}
+
+static struct xnvfile_lock_ops rtnet_ipv4_host_route_lock_ops = {
+	.get = rtnet_ipv4_host_route_lock,
+	.put = rtnet_ipv4_host_route_unlock,
+};
+
+struct rtnet_ipv4_host_route_priv {
+	unsigned key;
+	struct host_route *entry_ptr;
+};
+
+struct rtnet_ipv4_host_route_data {
+	int key;
+	char name[IFNAMSIZ];
+	struct dest_route dest_host;
+};
+
+struct xnvfile_rev_tag host_route_tag;
+
+static void *rtnet_ipv4_host_route_begin(struct xnvfile_snapshot_iterator *it)
+{
+	struct rtnet_ipv4_host_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_host_route_data *data;
+	unsigned routes;
+	int err;
+
+	routes = allocated_host_routes;
+	if (!routes)
+		return VFILE_SEQ_EMPTY;
+
+	data = kmalloc(sizeof(*data) * routes, GFP_KERNEL);
+	if (data == NULL)
+		return NULL;
+
+	err = rtnet_ipv4_module_lock(NULL);
+	if (err < 0) {
+		kfree(data);
+		return VFILE_SEQ_EMPTY;
+	}
+
+	priv->key = -1;
+	priv->entry_ptr = NULL;
+	return data;
+}
+
+static void rtnet_ipv4_host_route_end(struct xnvfile_snapshot_iterator *it,
+				      void *buf)
+{
+	rtnet_ipv4_module_unlock(NULL);
+	kfree(buf);
+}
+
+static int rtnet_ipv4_host_route_next(struct xnvfile_snapshot_iterator *it,
+				      void *data)
+{
+	struct rtnet_ipv4_host_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_host_route_data *p = data;
+	struct rtnet_device *rtdev;
+
+	if (priv->entry_ptr == NULL) {
+		if (++priv->key >= HOST_HASH_TBL_SIZE)
+			return 0;
+
+		priv->entry_ptr = host_hash_tbl[priv->key];
+		if (priv->entry_ptr == NULL)
+			return VFILE_SEQ_SKIP;
+	}
+
+	rtdev = priv->entry_ptr->dest_host.rtdev;
+
+	if (!rtdev_reference(rtdev))
+		return -EIDRM;
+
+	memcpy(&p->name, rtdev->name, sizeof(p->name));
+
+	rtdev_dereference(rtdev);
+
+	p->key = priv->key;
+
+	memcpy(&p->dest_host, &priv->entry_ptr->dest_host,
+	       sizeof(p->dest_host));
+
+	priv->entry_ptr = priv->entry_ptr->next;
+
+	return 1;
+}
+
+static int rtnet_ipv4_host_route_show(struct xnvfile_snapshot_iterator *it,
+				      void *data)
+{
+	struct rtnet_ipv4_host_route_data *p = data;
+
+	if (p == NULL) {
+		xnvfile_printf(it, "Hash\tDestination\tHW Address\t\tDevice\n");
+		return 0;
+	}
+
+	xnvfile_printf(it,
+		       "%02X\t%u.%u.%u.%-3u\t"
+		       "%02X:%02X:%02X:%02X:%02X:%02X\t%s\n",
+		       p->key, NIPQUAD(p->dest_host.ip),
+		       p->dest_host.dev_addr[0], p->dest_host.dev_addr[1],
+		       p->dest_host.dev_addr[2], p->dest_host.dev_addr[3],
+		       p->dest_host.dev_addr[4], p->dest_host.dev_addr[5],
+		       p->name);
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops rtnet_ipv4_host_route_vfile_ops = {
+	.begin = rtnet_ipv4_host_route_begin,
+	.end = rtnet_ipv4_host_route_end,
+	.next = rtnet_ipv4_host_route_next,
+	.show = rtnet_ipv4_host_route_show,
+};
+
+static struct xnvfile_snapshot rtnet_ipv4_host_route_vfile = {
+    .entry = {
+	.lockops = &rtnet_ipv4_host_route_lock_ops,
+    },
+    .privsz = sizeof(struct rtnet_ipv4_host_route_priv),
+    .datasz = sizeof(struct rtnet_ipv4_host_route_data),
+    .tag = &host_route_tag,
+    .ops = &rtnet_ipv4_host_route_vfile_ops,
+};
+
+static struct xnvfile_link rtnet_ipv4_arp_vfile;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+static rtdm_lockctx_t rtnet_ipv4_net_route_lock_ctx;
+
+static int rtnet_ipv4_net_route_lock(struct xnvfile *vfile)
+{
+	rtdm_lock_get_irqsave(&net_table_lock, rtnet_ipv4_net_route_lock_ctx);
+	return 0;
+}
+
+static void rtnet_ipv4_net_route_unlock(struct xnvfile *vfile)
+{
+	rtdm_lock_put_irqrestore(&net_table_lock,
+				 rtnet_ipv4_net_route_lock_ctx);
+}
+
+static struct xnvfile_lock_ops rtnet_ipv4_net_route_lock_ops = {
+	.get = rtnet_ipv4_net_route_lock,
+	.put = rtnet_ipv4_net_route_unlock,
+};
+
+struct rtnet_ipv4_net_route_priv {
+	unsigned key;
+	struct net_route *entry_ptr;
+};
+
+struct rtnet_ipv4_net_route_data {
+	int key;
+	u32 dest_net_ip;
+	u32 dest_net_mask;
+	u32 gw_ip;
+};
+
+struct xnvfile_rev_tag net_route_tag;
+
+static void *rtnet_ipv4_net_route_begin(struct xnvfile_snapshot_iterator *it)
+{
+	struct rtnet_ipv4_net_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_net_route_data *data;
+	unsigned routes;
+	int err;
+
+	routes = allocated_net_routes;
+	if (!routes)
+		return VFILE_SEQ_EMPTY;
+
+	data = kmalloc(sizeof(*data) * routes, GFP_KERNEL);
+	if (data == NULL)
+		return NULL;
+
+	err = rtnet_ipv4_module_lock(NULL);
+	if (err < 0) {
+		kfree(data);
+		return VFILE_SEQ_EMPTY;
+	}
+
+	priv->key = -1;
+	priv->entry_ptr = NULL;
+	return data;
+}
+
+static void rtnet_ipv4_net_route_end(struct xnvfile_snapshot_iterator *it,
+				     void *buf)
+{
+	rtnet_ipv4_module_unlock(NULL);
+	kfree(buf);
+}
+
+static int rtnet_ipv4_net_route_next(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct rtnet_ipv4_net_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_net_route_data *p = data;
+
+	if (priv->entry_ptr == NULL) {
+		if (++priv->key >= NET_HASH_TBL_SIZE + 1)
+			return 0;
+
+		priv->entry_ptr = net_hash_tbl[priv->key];
+		if (priv->entry_ptr == NULL)
+			return VFILE_SEQ_SKIP;
+	}
+
+	p->key = priv->key;
+	p->dest_net_ip = priv->entry_ptr->dest_net_ip;
+	p->dest_net_mask = priv->entry_ptr->dest_net_mask;
+	p->gw_ip = priv->entry_ptr->gw_ip;
+
+	priv->entry_ptr = priv->entry_ptr->next;
+
+	return 1;
+}
+
+static int rtnet_ipv4_net_route_show(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct rtnet_ipv4_net_route_data *p = data;
+
+	if (p == NULL) {
+		xnvfile_printf(it, "Hash\tDestination\tMask\t\t\tGateway\n");
+		return 0;
+	}
+
+	if (p->key < NET_HASH_TBL_SIZE)
+		xnvfile_printf(it,
+			       "%02X\t%u.%u.%u.%-3u\t%u.%u.%u.%-3u"
+			       "\t\t%u.%u.%u.%-3u\n",
+			       p->key, NIPQUAD(p->dest_net_ip),
+			       NIPQUAD(p->dest_net_mask), NIPQUAD(p->gw_ip));
+	else
+		xnvfile_printf(it,
+			       "*\t%u.%u.%u.%-3u\t%u.%u.%u.%-3u\t\t"
+			       "%u.%u.%u.%-3u\n",
+			       NIPQUAD(p->dest_net_ip),
+			       NIPQUAD(p->dest_net_mask), NIPQUAD(p->gw_ip));
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops rtnet_ipv4_net_route_vfile_ops = {
+	.begin = rtnet_ipv4_net_route_begin,
+	.end = rtnet_ipv4_net_route_end,
+	.next = rtnet_ipv4_net_route_next,
+	.show = rtnet_ipv4_net_route_show,
+};
+
+static struct xnvfile_snapshot rtnet_ipv4_net_route_vfile = {
+    .entry = {
+	.lockops = &rtnet_ipv4_net_route_lock_ops,
+    },
+    .privsz = sizeof(struct rtnet_ipv4_net_route_priv),
+    .datasz = sizeof(struct rtnet_ipv4_net_route_data),
+    .tag = &net_route_tag,
+    .ops = &rtnet_ipv4_net_route_vfile_ops,
+};
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+static int __init rt_route_proc_register(void)
+{
+	int err;
+
+	err = xnvfile_init_regular("route", &rtnet_ipv4_route_vfile,
+				   &ipv4_proc_root);
+	if (err < 0)
+		goto err1;
+
+	err = xnvfile_init_snapshot("host_route", &rtnet_ipv4_host_route_vfile,
+				    &ipv4_proc_root);
+	if (err < 0)
+		goto err2;
+
+	/* create "arp" as an alias for "host_route" */
+	err = xnvfile_init_link("arp", "host_route", &rtnet_ipv4_arp_vfile,
+				&ipv4_proc_root);
+	if (err < 0)
+		goto err3;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	err = xnvfile_init_snapshot("net_route", &rtnet_ipv4_net_route_vfile,
+				    &ipv4_proc_root);
+	if (err < 0)
+		goto err4;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	return 0;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+err4:
+	xnvfile_destroy_link(&rtnet_ipv4_arp_vfile);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+err3:
+	xnvfile_destroy_snapshot(&rtnet_ipv4_host_route_vfile);
+
+err2:
+	xnvfile_destroy_regular(&rtnet_ipv4_route_vfile);
+
+err1:
+	printk("RTnet: unable to initialize /proc entries (route)\n");
+	return err;
+}
+
+static void rt_route_proc_unregister(void)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	xnvfile_destroy_snapshot(&rtnet_ipv4_net_route_vfile);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+	xnvfile_destroy_link(&rtnet_ipv4_arp_vfile);
+	xnvfile_destroy_snapshot(&rtnet_ipv4_host_route_vfile);
+	xnvfile_destroy_regular(&rtnet_ipv4_route_vfile);
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/***
+ *  rt_alloc_host_route - allocates new host route
+ */
+static inline struct host_route *rt_alloc_host_route(void)
+{
+	rtdm_lockctx_t context;
+	struct host_route *rt;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	if ((rt = free_host_route) != NULL) {
+		free_host_route = rt->next;
+		allocated_host_routes++;
+	}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+	return rt;
+}
+
+/***
+ *  rt_free_host_route - releases host route
+ *
+ *  Note: must be called with host_table_lock held
+ */
+static inline void rt_free_host_route(struct host_route *rt)
+{
+	rt->next = free_host_route;
+	free_host_route = rt;
+	allocated_host_routes--;
+}
+
+/***
+ *  rt_ip_route_add_host: add or update host route
+ */
+int rt_ip_route_add_host(u32 addr, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *new_route;
+	struct host_route *rt;
+	unsigned int key;
+	int ret = 0;
+
+	rtdm_lock_get_irqsave(&rtdev->rtdev_lock, context);
+
+	if ((!test_bit(PRIV_FLAG_UP, &rtdev->priv_flags) ||
+	     test_and_set_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags))) {
+		rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+		return -EBUSY;
+	}
+
+	rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+
+	if ((new_route = rt_alloc_host_route()) != NULL) {
+		new_route->dest_host.ip = addr;
+		new_route->dest_host.rtdev = rtdev;
+		memcpy(new_route->dest_host.dev_addr, dev_addr,
+		       rtdev->addr_len);
+	}
+
+	key = ntohl(addr) & HOST_HASH_KEY_MASK;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	xnvfile_touch_tag(&host_route_tag);
+
+	rt = host_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_host.ip == addr) &&
+		    (rt->dest_host.rtdev->local_ip == rtdev->local_ip)) {
+			rt->dest_host.rtdev = rtdev;
+			memcpy(rt->dest_host.dev_addr, dev_addr,
+			       rtdev->addr_len);
+
+			if (new_route)
+				rt_free_host_route(new_route);
+
+			rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+			goto out;
+		}
+
+		rt = rt->next;
+	}
+
+	if (new_route) {
+		new_route->next = host_hash_tbl[key];
+		host_hash_tbl[key] = new_route;
+
+		rtdm_lock_put_irqrestore(&host_table_lock, context);
+	} else {
+		rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: no more host routes available\n");
+		ret = -ENOBUFS;
+	}
+
+out:
+	clear_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags);
+
+	return ret;
+}
+
+/***
+ *  rt_ip_route_del_host - deletes specified host route
+ */
+int rt_ip_route_del_host(u32 addr, struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *rt;
+	struct host_route **last_ptr;
+	unsigned int key;
+
+	key = ntohl(addr) & HOST_HASH_KEY_MASK;
+	last_ptr = &host_hash_tbl[key];
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	rt = host_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_host.ip == addr) &&
+		    (!rtdev ||
+		     (rt->dest_host.rtdev->local_ip == rtdev->local_ip))) {
+			*last_ptr = rt->next;
+
+			rt_free_host_route(rt);
+
+			xnvfile_touch_tag(&host_route_tag);
+
+			rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+			return 0;
+		}
+
+		last_ptr = &rt->next;
+		rt = rt->next;
+	}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+	return -ENOENT;
+}
+
+/***
+ *  rt_ip_route_del_all - deletes all routes associated with a specified device
+ */
+void rt_ip_route_del_all(struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *host_rt;
+	struct host_route **last_host_ptr;
+	unsigned int key;
+	u32 ip;
+
+	for (key = 0; key < HOST_HASH_TBL_SIZE; key++) {
+	host_start_over:
+		last_host_ptr = &host_hash_tbl[key];
+
+		rtdm_lock_get_irqsave(&host_table_lock, context);
+
+		host_rt = host_hash_tbl[key];
+		while (host_rt != NULL) {
+			if (host_rt->dest_host.rtdev == rtdev) {
+				*last_host_ptr = host_rt->next;
+
+				rt_free_host_route(host_rt);
+
+				rtdm_lock_put_irqrestore(&host_table_lock,
+							 context);
+
+				goto host_start_over;
+			}
+
+			last_host_ptr = &host_rt->next;
+			host_rt = host_rt->next;
+		}
+
+		rtdm_lock_put_irqrestore(&host_table_lock, context);
+	}
+
+	if ((ip = rtdev->local_ip) != 0)
+		rt_ip_route_del_host(ip, rtdev);
+}
+
+/***
+ *  rt_ip_route_get_host - check if specified host route is resolved
+ */
+int rt_ip_route_get_host(u32 addr, char *if_name, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *rt;
+	unsigned int key;
+
+	key = ntohl(addr) & HOST_HASH_KEY_MASK;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	rt = host_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_host.ip == addr) &&
+		    (!rtdev ||
+		     rt->dest_host.rtdev->local_ip == rtdev->local_ip)) {
+			memcpy(dev_addr, rt->dest_host.dev_addr,
+			       rt->dest_host.rtdev->addr_len);
+			strncpy(if_name, rt->dest_host.rtdev->name, IFNAMSIZ);
+
+			rtdm_lock_put_irqrestore(&host_table_lock, context);
+			return 0;
+		}
+
+		rt = rt->next;
+	}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+	return -ENOENT;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+/***
+ *  rt_alloc_net_route - allocates new network route
+ */
+static inline struct net_route *rt_alloc_net_route(void)
+{
+	rtdm_lockctx_t context;
+	struct net_route *rt;
+
+	rtdm_lock_get_irqsave(&net_table_lock, context);
+
+	if ((rt = free_net_route) != NULL) {
+		free_net_route = rt->next;
+		allocated_net_routes++;
+	}
+
+	rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+	return rt;
+}
+
+/***
+ *  rt_free_net_route - releases network route
+ *
+ *  Note: must be called with net_table_lock held
+ */
+static inline void rt_free_net_route(struct net_route *rt)
+{
+	rt->next = free_net_route;
+	free_net_route = rt;
+	allocated_host_routes--;
+}
+
+/***
+ *  rt_ip_route_add_net: add or update network route
+ */
+int rt_ip_route_add_net(u32 addr, u32 mask, u32 gw_addr)
+{
+	rtdm_lockctx_t context;
+	struct net_route *new_route;
+	struct net_route *rt;
+	struct net_route **last_ptr;
+	unsigned int key;
+	u32 shifted_mask;
+
+	addr &= mask;
+
+	if ((new_route = rt_alloc_net_route()) != NULL) {
+		new_route->dest_net_ip = addr;
+		new_route->dest_net_mask = mask;
+		new_route->gw_ip = gw_addr;
+	}
+
+	shifted_mask = NET_HASH_KEY_MASK << net_hash_key_shift;
+	if ((mask & shifted_mask) == shifted_mask)
+		key = (ntohl(addr) >> net_hash_key_shift) & NET_HASH_KEY_MASK;
+	else
+		key = NET_HASH_TBL_SIZE;
+	last_ptr = &net_hash_tbl[key];
+
+	rtdm_lock_get_irqsave(&net_table_lock, context);
+
+	xnvfile_touch_tag(&net_route_tag);
+
+	rt = net_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_net_ip == addr) && (rt->dest_net_mask == mask)) {
+			rt->gw_ip = gw_addr;
+
+			if (new_route)
+				rt_free_net_route(new_route);
+
+			rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+			return 0;
+		}
+
+		last_ptr = &rt->next;
+		rt = rt->next;
+	}
+
+	if (new_route) {
+		new_route->next = *last_ptr;
+		*last_ptr = new_route;
+
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+		return 0;
+	} else {
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: no more network routes available\n");
+		return -ENOBUFS;
+	}
+}
+
+/***
+ *  rt_ip_route_del_net - deletes specified network route
+ */
+int rt_ip_route_del_net(u32 addr, u32 mask)
+{
+	rtdm_lockctx_t context;
+	struct net_route *rt;
+	struct net_route **last_ptr;
+	unsigned int key;
+	u32 shifted_mask;
+
+	addr &= mask;
+
+	shifted_mask = NET_HASH_KEY_MASK << net_hash_key_shift;
+	if ((mask & shifted_mask) == shifted_mask)
+		key = (ntohl(addr) >> net_hash_key_shift) & NET_HASH_KEY_MASK;
+	else
+		key = NET_HASH_TBL_SIZE;
+	last_ptr = &net_hash_tbl[key];
+
+	rtdm_lock_get_irqsave(&net_table_lock, context);
+
+	rt = net_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_net_ip == addr) && (rt->dest_net_mask == mask)) {
+			*last_ptr = rt->next;
+
+			rt_free_net_route(rt);
+
+			xnvfile_touch_tag(&net_route_tag);
+
+			rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+			return 0;
+		}
+
+		last_ptr = &rt->next;
+		rt = rt->next;
+	}
+
+	rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+	return -ENOENT;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+/***
+ *  rt_ip_route_output - looks up output route
+ *
+ *  Note: increments refcount on returned rtdev in rt_buf
+ */
+int rt_ip_route_output(struct dest_route *rt_buf, u32 daddr, u32 saddr)
+{
+	rtdm_lockctx_t context;
+	struct host_route *host_rt;
+	unsigned int key;
+
+#ifndef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+#define DADDR daddr
+#else
+#define DADDR real_daddr
+
+	struct net_route *net_rt;
+	int lookup_gw = 1;
+	u32 real_daddr = daddr;
+
+restart:
+#endif /* !CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	key = ntohl(daddr) & HOST_HASH_KEY_MASK;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	host_rt = host_hash_tbl[key];
+	if (likely(saddr == INADDR_ANY))
+		while (host_rt != NULL) {
+			if (host_rt->dest_host.ip == daddr) {
+			host_route_found:
+				if (!rtdev_reference(
+					    host_rt->dest_host.rtdev)) {
+					rtdm_lock_put_irqrestore(
+						&host_table_lock, context);
+					goto next;
+				}
+
+				memcpy(rt_buf->dev_addr,
+				       &host_rt->dest_host.dev_addr,
+				       sizeof(rt_buf->dev_addr));
+				rt_buf->rtdev = host_rt->dest_host.rtdev;
+
+				rtdm_lock_put_irqrestore(&host_table_lock,
+							 context);
+
+				rt_buf->ip = DADDR;
+
+				return 0;
+			}
+		next:
+			host_rt = host_rt->next;
+		}
+	else
+		while (host_rt != NULL) {
+			if ((host_rt->dest_host.ip == daddr) &&
+			    (host_rt->dest_host.rtdev->local_ip == saddr))
+				goto host_route_found;
+			host_rt = host_rt->next;
+		}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	if (lookup_gw) {
+		lookup_gw = 0;
+		key = (ntohl(daddr) >> net_hash_key_shift) & NET_HASH_KEY_MASK;
+
+		rtdm_lock_get_irqsave(&net_table_lock, context);
+
+		net_rt = net_hash_tbl[key];
+		while (net_rt != NULL) {
+			if (net_rt->dest_net_ip ==
+			    (daddr & net_rt->dest_net_mask)) {
+				daddr = net_rt->gw_ip;
+
+				rtdm_lock_put_irqrestore(&net_table_lock,
+							 context);
+
+				/* start over, now using the gateway ip as destination */
+				goto restart;
+			}
+
+			net_rt = net_rt->next;
+		}
+
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+		/* last try: no hash key */
+		rtdm_lock_get_irqsave(&net_table_lock, context);
+
+		net_rt = net_hash_tbl[NET_HASH_TBL_SIZE];
+		while (net_rt != NULL) {
+			if (net_rt->dest_net_ip ==
+			    (daddr & net_rt->dest_net_mask)) {
+				daddr = net_rt->gw_ip;
+
+				rtdm_lock_put_irqrestore(&net_table_lock,
+							 context);
+
+				/* start over, now using the gateway ip as destination */
+				goto restart;
+			}
+
+			net_rt = net_rt->next;
+		}
+
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	/*ERRMSG*/ rtdm_printk("RTnet: host %u.%u.%u.%u unreachable\n",
+			       NIPQUAD(daddr));
+	return -EHOSTUNREACH;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+int rt_ip_route_forward(struct rtskb *rtskb, u32 daddr)
+{
+	struct rtnet_device *rtdev = rtskb->rtdev;
+	struct dest_route dest;
+
+	if (likely((daddr == rtdev->local_ip) ||
+		   (daddr == rtdev->broadcast_ip) ||
+		   (rtdev->flags & IFF_LOOPBACK)))
+		return 0;
+
+	if (rtskb_acquire(rtskb, &global_pool) != 0) {
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: router overloaded, dropping packet\n");
+		goto error;
+	}
+
+	if (rt_ip_route_output(&dest, daddr, INADDR_ANY) < 0) {
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: unable to forward packet from %u.%u.%u.%u\n",
+			NIPQUAD(rtskb->nh.iph->saddr));
+		goto error;
+	}
+
+	rtskb->rtdev = dest.rtdev;
+	rtskb->priority = ROUTER_FORWARD_PRIO;
+
+	if ((dest.rtdev->hard_header) &&
+	    (dest.rtdev->hard_header(rtskb, dest.rtdev, ETH_P_IP, dest.dev_addr,
+				     dest.rtdev->dev_addr, rtskb->len) < 0))
+		goto error;
+
+	rtdev_xmit(rtskb);
+
+	return 1;
+
+error:
+	kfree_rtskb(rtskb);
+	return 1;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER */
+
+/***
+ *  rt_ip_routing_init: initialize
+ */
+int __init rt_ip_routing_init(void)
+{
+	int i;
+
+	for (i = 0; i < CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES - 2; i++)
+		host_routes[i].next = &host_routes[i + 1];
+	free_host_route = &host_routes[0];
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	for (i = 0; i < CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES - 2; i++)
+		net_routes[i].next = &net_routes[i + 1];
+	free_net_route = &net_routes[0];
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	return rt_route_proc_register();
+#else /* !CONFIG_XENO_OPT_VFILE */
+	return 0;
+#endif /* CONFIG_XENO_OPT_VFILE */
+}
+
+/***
+ *  rt_ip_routing_realease
+ */
+void rt_ip_routing_release(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	rt_route_proc_unregister();
+#endif /* CONFIG_XENO_OPT_VFILE */
+}
+
+EXPORT_SYMBOL_GPL(rt_ip_route_add_host);
+EXPORT_SYMBOL_GPL(rt_ip_route_del_host);
+EXPORT_SYMBOL_GPL(rt_ip_route_del_all);
+EXPORT_SYMBOL_GPL(rt_ip_route_output);
+++ linux-patched/drivers/xenomai/net/stack/ipv4/Makefile	2022-03-21 12:58:30.827874939 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/udp/Kconfig	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4) += rtipv4.o
+
+rtipv4-y := \
+	route.o \
+	protocol.o \
+	arp.o \
+	af_inet.o \
+	ip_input.o \
+	ip_sock.o \
+	ip_output.o \
+	ip_fragment.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_UDP) += udp/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP) += tcp/
+
+rtipv4-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP) += icmp.o
+++ linux-patched/drivers/xenomai/net/stack/ipv4/udp/Kconfig	2022-03-21 12:58:30.820875007 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:3 @
--- linux/drivers/xenomai/net/stack/ipv4/udp/udp.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_RTIPV4_UDP
+    tristate "UDP support"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default y
+    help
+    Enables UDP support of the RTnet Real-Time IPv4 protocol.
+++ linux-patched/drivers/xenomai/net/stack/ipv4/udp/udp.c	2022-03-21 12:58:30.813875075 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/udp/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/udp.c - UDP implementation for RTnet
+ *
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/err.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/list.h>
+
+#include <rtskb.h>
+#include <rtnet_internal.h>
+#include <rtnet_checksum.h>
+#include <rtnet_port.h>
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/ip_sock.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+#include <ipv4/udp.h>
+
+/***
+ *  This structure is used to register a UDP socket for reception. All
+ +  structures are kept in the port_registry array to increase the cache
+ *  locality during the critical port lookup in rt_udp_v4_lookup().
+ */
+struct udp_socket {
+	u16 sport; /* local port */
+	u32 saddr; /* local ip-addr */
+	struct rtsocket *sock;
+	struct hlist_node link;
+};
+
+/***
+ *  Automatic port number assignment
+
+ *  The automatic assignment of port numbers to unbound sockets is realised as
+ *  a simple addition of two values:
+ *   - the socket ID (lower 8 bits of file descriptor) which is set during
+ *     initialisation and left unchanged afterwards
+ *   - the start value auto_port_start which is a module parameter
+
+ *  auto_port_mask, also a module parameter, is used to define the range of
+ *  port numbers which are used for automatic assignment. Any number within
+ *  this range will be rejected when passed to bind_rt().
+
+ */
+static unsigned int auto_port_start = 1024;
+static unsigned int auto_port_mask = ~(RT_UDP_SOCKETS - 1);
+static int free_ports = RT_UDP_SOCKETS;
+#define RT_PORT_BITMAP_WORDS                                                   \
+	((RT_UDP_SOCKETS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+static unsigned long port_bitmap[RT_PORT_BITMAP_WORDS];
+static struct udp_socket port_registry[RT_UDP_SOCKETS];
+static DEFINE_RTDM_LOCK(udp_socket_base_lock);
+
+static struct hlist_head port_hash[RT_UDP_SOCKETS * 2];
+#define port_hash_mask (RT_UDP_SOCKETS * 2 - 1)
+
+MODULE_LICENSE("GPL");
+
+module_param(auto_port_start, uint, 0444);
+module_param(auto_port_mask, uint, 0444);
+MODULE_PARM_DESC(auto_port_start, "Start of automatically assigned port range");
+MODULE_PARM_DESC(auto_port_mask,
+		 "Mask that defines port range for automatic assignment");
+
+static inline struct udp_socket *port_hash_search(u32 saddr, u16 sport)
+{
+	unsigned bucket = sport & port_hash_mask;
+	struct udp_socket *sock;
+
+	hlist_for_each_entry (sock, &port_hash[bucket], link)
+		if (sock->sport == sport &&
+		    (saddr == INADDR_ANY || sock->saddr == saddr ||
+		     sock->saddr == INADDR_ANY))
+			return sock;
+
+	return NULL;
+}
+
+static inline int port_hash_insert(struct udp_socket *sock, u32 saddr,
+				   u16 sport)
+{
+	unsigned bucket;
+
+	if (port_hash_search(saddr, sport))
+		return -EADDRINUSE;
+
+	bucket = sport & port_hash_mask;
+	sock->saddr = saddr;
+	sock->sport = sport;
+	hlist_add_head(&sock->link, &port_hash[bucket]);
+	return 0;
+}
+
+static inline void port_hash_del(struct udp_socket *sock)
+{
+	hlist_del(&sock->link);
+}
+
+/***
+ *  rt_udp_v4_lookup
+ */
+static inline struct rtsocket *rt_udp_v4_lookup(u32 daddr, u16 dport)
+{
+	rtdm_lockctx_t context;
+	struct udp_socket *sock;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+	sock = port_hash_search(daddr, dport);
+	if (sock && rt_socket_reference(sock->sock) == 0) {
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+		return sock->sock;
+	}
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	return NULL;
+}
+
+/***
+ *  rt_udp_bind - bind socket to local address
+ *  @s:     socket
+ *  @addr:  local address
+ */
+int rt_udp_bind(struct rtdm_fd *fd, struct rtsocket *sock,
+		const struct sockaddr __user *addr, socklen_t addrlen)
+{
+	struct sockaddr_in _sin, *sin;
+	rtdm_lockctx_t context;
+	int index;
+	int err = 0;
+
+	if (addrlen < sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	sin = rtnet_get_arg(fd, &_sin, addr, sizeof(_sin));
+	if (IS_ERR(sin))
+		return PTR_ERR(sin);
+
+	if ((sin->sin_port & auto_port_mask) == auto_port_start)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+	if ((index = sock->prot.inet.reg_index) < 0) {
+		/* socket is being closed */
+		err = -EBADF;
+		goto unlock_out;
+	}
+	if (sock->prot.inet.state != TCP_CLOSE) {
+		err = -EINVAL;
+		goto unlock_out;
+	}
+
+	port_hash_del(&port_registry[index]);
+	if (port_hash_insert(&port_registry[index], sin->sin_addr.s_addr,
+			     sin->sin_port ?: index + auto_port_start)) {
+		port_hash_insert(&port_registry[index],
+				 port_registry[index].saddr,
+				 port_registry[index].sport);
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+		return -EADDRINUSE;
+	}
+
+	/* set the source-addr */
+	sock->prot.inet.saddr = port_registry[index].saddr;
+
+	/* set source port, if not set by user */
+	sock->prot.inet.sport = port_registry[index].sport;
+
+unlock_out:
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	return err;
+}
+
+/***
+ *  rt_udp_connect
+ */
+int rt_udp_connect(struct rtdm_fd *fd, struct rtsocket *sock,
+		   const struct sockaddr __user *serv_addr, socklen_t addrlen)
+{
+	struct sockaddr _sa, *sa;
+	struct sockaddr_in _sin, *sin;
+	rtdm_lockctx_t context;
+	int index;
+
+	if (addrlen < sizeof(struct sockaddr))
+		return -EINVAL;
+
+	sa = rtnet_get_arg(fd, &_sa, serv_addr, sizeof(_sa));
+	if (IS_ERR(sa))
+		return PTR_ERR(sa);
+
+	if (sa->sa_family == AF_UNSPEC) {
+		if ((index = sock->prot.inet.reg_index) < 0)
+			/* socket is being closed */
+			return -EBADF;
+
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+		sock->prot.inet.saddr = INADDR_ANY;
+		/* Note: The following line differs from standard
+		   stacks, and we also don't remove the socket from
+		   the port list. Might get fixed in the future... */
+		sock->prot.inet.sport = index + auto_port_start;
+		sock->prot.inet.daddr = INADDR_ANY;
+		sock->prot.inet.dport = 0;
+		sock->prot.inet.state = TCP_CLOSE;
+
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+	} else {
+		if (addrlen < sizeof(struct sockaddr_in))
+			return -EINVAL;
+
+		sin = rtnet_get_arg(fd, &_sin, serv_addr, sizeof(_sin));
+		if (IS_ERR(sin))
+			return PTR_ERR(sin);
+
+		if (sin->sin_family != AF_INET)
+			return -EINVAL;
+
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+		if (sock->prot.inet.state != TCP_CLOSE) {
+			rtdm_lock_put_irqrestore(&udp_socket_base_lock,
+						 context);
+			return -EINVAL;
+		}
+
+		sock->prot.inet.state = TCP_ESTABLISHED;
+		sock->prot.inet.daddr = sin->sin_addr.s_addr;
+		sock->prot.inet.dport = sin->sin_port;
+
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+	}
+
+	return 0;
+}
+
+/***
+ *  rt_udp_socket - create a new UDP-Socket
+ *  @s: socket
+ */
+int rt_udp_socket(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int ret;
+	int i;
+	int index;
+	rtdm_lockctx_t context;
+
+	if ((ret = rt_socket_init(fd, IPPROTO_UDP)) != 0)
+		return ret;
+
+	sock->prot.inet.saddr = INADDR_ANY;
+	sock->prot.inet.state = TCP_CLOSE;
+	sock->prot.inet.tos = 0;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+	/* enforce maximum number of UDP sockets */
+	if (free_ports == 0) {
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+		rt_socket_cleanup(fd);
+		return -EAGAIN;
+	}
+	free_ports--;
+
+	/* find free auto-port in bitmap */
+	for (i = 0; i < RT_PORT_BITMAP_WORDS; i++)
+		if (port_bitmap[i] != (unsigned long)-1)
+			break;
+	index = ffz(port_bitmap[i]);
+	set_bit(index, &port_bitmap[i]);
+	index += i * BITS_PER_LONG;
+	sock->prot.inet.reg_index = index;
+	sock->prot.inet.sport = index + auto_port_start;
+
+	/* register UDP socket */
+	port_hash_insert(&port_registry[index], INADDR_ANY,
+			 sock->prot.inet.sport);
+	port_registry[index].sock = sock;
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	return 0;
+}
+
+/***
+ *  rt_udp_close
+ */
+void rt_udp_close(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct rtskb *del;
+	int port;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+	sock->prot.inet.state = TCP_CLOSE;
+
+	if (sock->prot.inet.reg_index >= 0) {
+		port = sock->prot.inet.reg_index;
+		clear_bit(port % BITS_PER_LONG,
+			  &port_bitmap[port / BITS_PER_LONG]);
+		port_hash_del(&port_registry[port]);
+
+		free_ports++;
+
+		sock->prot.inet.reg_index = -1;
+	}
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	/* cleanup already collected fragments */
+	rt_ip_frag_invalidate_socket(sock);
+
+	/* free packets in incoming queue */
+	while ((del = rtskb_dequeue(&sock->incoming)) != NULL)
+		kfree_rtskb(del);
+
+	rt_socket_cleanup(fd);
+}
+
+int rt_udp_ioctl(struct rtdm_fd *fd, unsigned int request, void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	const struct _rtdm_setsockaddr_args *setaddr;
+	struct _rtdm_setsockaddr_args _setaddr;
+
+	/* fast path for common socket IOCTLs */
+	if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK)
+		return rt_socket_common_ioctl(fd, request, arg);
+
+	switch (request) {
+	case _RTIOC_BIND:
+	case _RTIOC_CONNECT:
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		if (request == _RTIOC_BIND)
+			return rt_udp_bind(fd, sock, setaddr->addr,
+					   setaddr->addrlen);
+
+		return rt_udp_connect(fd, sock, setaddr->addr,
+				      setaddr->addrlen);
+
+	default:
+		return rt_ip_ioctl(fd, request, arg);
+	}
+}
+
+/***
+ *  rt_udp_recvmsg
+ */
+/***
+ *  rt_udp_recvmsg
+ */
+ssize_t rt_udp_recvmsg(struct rtdm_fd *fd, struct user_msghdr *msg,
+		       int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	size_t len;
+	struct rtskb *skb;
+	struct rtskb *first_skb;
+	size_t copied = 0;
+	size_t block_size;
+	size_t data_len;
+	struct udphdr *uh;
+	struct sockaddr_in sin;
+	nanosecs_rel_t timeout = sock->timeout;
+	int ret, flags;
+	socklen_t namelen;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	/* non-blocking receive? */
+	if (msg_flags & MSG_DONTWAIT)
+		timeout = -1;
+
+	ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL);
+	if (unlikely(ret < 0))
+		switch (ret) {
+		default:
+			ret = -EBADF; /* socket has been closed */
+			fallthrough;
+		case -EWOULDBLOCK:
+		case -ETIMEDOUT:
+		case -EINTR:
+			rtdm_drop_iovec(iov, iov_fast);
+			return ret;
+		}
+
+	skb = rtskb_dequeue_chain(&sock->incoming);
+	RTNET_ASSERT(skb != NULL, return -EFAULT;);
+	uh = skb->h.uh;
+	first_skb = skb;
+
+	/* copy the address if required. */
+	if (msg->msg_name) {
+		memset(&sin, 0, sizeof(sin));
+		sin.sin_family = AF_INET;
+		sin.sin_port = uh->source;
+		sin.sin_addr.s_addr = skb->nh.iph->saddr;
+
+		if (msg->msg_namelen < 0) {
+			ret = -EINVAL;
+			goto fail;
+		}
+		namelen = min(sizeof(sin), (size_t)msg->msg_namelen);
+
+		ret = rtnet_put_arg(fd, msg->msg_name, &sin, namelen);
+		if (ret)
+			goto fail;
+
+		msg->msg_namelen = sizeof(sin);
+	}
+
+	data_len = ntohs(uh->len) - sizeof(struct udphdr);
+
+	/* remove the UDP header */
+	__rtskb_pull(skb, sizeof(struct udphdr));
+
+	flags = msg->msg_flags & ~MSG_TRUNC;
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+
+	/* iterate over all IP fragments */
+	do {
+		rtskb_trim(skb, data_len);
+
+		block_size = skb->len;
+		copied += block_size;
+		data_len -= block_size;
+
+		/* The data must not be longer than the available buffer size */
+		if (copied > len) {
+			block_size -= copied - len;
+			copied = len;
+			flags |= MSG_TRUNC;
+		}
+
+		/* copy the data */
+		ret = rtnet_write_to_iov(fd, iov, msg->msg_iovlen, skb->data,
+					 block_size);
+		if (ret)
+			goto fail;
+
+		/* next fragment */
+		skb = skb->next;
+	} while (skb && !(flags & MSG_TRUNC));
+
+	/* did we copied all bytes? */
+	if (data_len > 0)
+		flags |= MSG_TRUNC;
+
+	msg->msg_flags = flags;
+out:
+	if ((msg_flags & MSG_PEEK) == 0)
+		kfree_rtskb(first_skb);
+	else {
+		__rtskb_push(first_skb, sizeof(struct udphdr));
+		rtskb_queue_head(&sock->incoming, first_skb);
+		rtdm_sem_up(&sock->pending_sem);
+	}
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return copied;
+fail:
+	copied = ret;
+	goto out;
+}
+
+/***
+ *  struct udpfakehdr
+ */
+struct udpfakehdr {
+	struct udphdr uh;
+	u32 daddr;
+	u32 saddr;
+	struct rtdm_fd *fd;
+	struct iovec *iov;
+	int iovlen;
+	u32 wcheck;
+};
+
+/***
+ *
+ */
+static int rt_udp_getfrag(const void *p, unsigned char *to, unsigned int offset,
+			  unsigned int fraglen)
+{
+	struct udpfakehdr *ufh = (struct udpfakehdr *)p;
+	int ret;
+
+	// We should optimize this function a bit (copy+csum...)!
+	if (offset) {
+		ret = rtnet_read_from_iov(ufh->fd, ufh->iov, ufh->iovlen, to,
+					  fraglen);
+		return ret < 0 ? ret : 0;
+	}
+
+	ret = rtnet_read_from_iov(ufh->fd, ufh->iov, ufh->iovlen,
+				  to + sizeof(struct udphdr),
+				  fraglen - sizeof(struct udphdr));
+	if (ret < 0)
+		return ret;
+
+	/* Checksum of the complete data part of the UDP message: */
+	ufh->wcheck =
+		rtnet_csum(to + sizeof(struct udphdr),
+			   fraglen - sizeof(struct udphdr), ufh->wcheck);
+
+	/* Checksum of the udp header: */
+	ufh->wcheck = rtnet_csum((unsigned char *)ufh, sizeof(struct udphdr),
+				 ufh->wcheck);
+
+	ufh->uh.check =
+		csum_tcpudp_magic(ufh->saddr, ufh->daddr, ntohs(ufh->uh.len),
+				  IPPROTO_UDP, ufh->wcheck);
+
+	if (ufh->uh.check == 0)
+		ufh->uh.check = -1;
+
+	memcpy(to, ufh, sizeof(struct udphdr));
+
+	return 0;
+}
+
+/***
+ *  rt_udp_sendmsg
+ */
+ssize_t rt_udp_sendmsg(struct rtdm_fd *fd, const struct user_msghdr *msg,
+		       int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	size_t len;
+	int ulen;
+	struct sockaddr_in _sin, *sin;
+	struct udpfakehdr ufh;
+	struct dest_route rt;
+	u32 saddr;
+	u32 daddr;
+	u16 dport;
+	int err;
+	rtdm_lockctx_t context;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
+		return -EOPNOTSUPP;
+
+	if (msg_flags & ~(MSG_DONTROUTE | MSG_DONTWAIT))
+		return -EINVAL;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	err = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (err)
+		return err;
+
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+	if ((len < 0) ||
+	    (len > 0xFFFF - sizeof(struct iphdr) - sizeof(struct udphdr))) {
+		err = -EMSGSIZE;
+		goto out;
+	}
+
+	ulen = len + sizeof(struct udphdr);
+
+	if (msg->msg_name && msg->msg_namelen == sizeof(*sin)) {
+		sin = rtnet_get_arg(fd, &_sin, msg->msg_name, sizeof(_sin));
+		if (IS_ERR(sin)) {
+			err = PTR_ERR(sin);
+			goto out;
+		}
+
+		if (sin->sin_family != AF_INET &&
+		    sin->sin_family != AF_UNSPEC) {
+			err = -EINVAL;
+			goto out;
+		}
+
+		daddr = sin->sin_addr.s_addr;
+		dport = sin->sin_port;
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+	} else {
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+		if (sock->prot.inet.state != TCP_ESTABLISHED) {
+			rtdm_lock_put_irqrestore(&udp_socket_base_lock,
+						 context);
+			err = -ENOTCONN;
+			goto out;
+		}
+
+		daddr = sock->prot.inet.daddr;
+		dport = sock->prot.inet.dport;
+	}
+
+	saddr = sock->prot.inet.saddr;
+	ufh.uh.source = sock->prot.inet.sport;
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	if ((daddr | dport) == 0) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* get output route */
+	err = rt_ip_route_output(&rt, daddr, saddr);
+	if (err)
+		goto out;
+
+	/* we found a route, remember the routing dest-addr could be the netmask */
+	ufh.saddr = saddr != INADDR_ANY ? saddr : rt.rtdev->local_ip;
+	ufh.daddr = daddr;
+	ufh.uh.dest = dport;
+	ufh.uh.len = htons(ulen);
+	ufh.uh.check = 0;
+	ufh.fd = fd;
+	ufh.iov = iov;
+	ufh.iovlen = msg->msg_iovlen;
+	ufh.wcheck = 0;
+
+	err = rt_ip_build_xmit(sock, rt_udp_getfrag, &ufh, ulen, &rt,
+			       msg_flags);
+
+	/* Drop the reference obtained in rt_ip_route_output() */
+	rtdev_dereference(rt.rtdev);
+out:
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return err ?: len;
+}
+
+/***
+ *  rt_udp_check
+ */
+static inline unsigned short rt_udp_check(struct udphdr *uh, int len,
+					  unsigned long saddr,
+					  unsigned long daddr,
+					  unsigned long base)
+{
+	return (csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
+}
+
+struct rtsocket *rt_udp_dest_socket(struct rtskb *skb)
+{
+	struct udphdr *uh = skb->h.uh;
+	unsigned short ulen = ntohs(uh->len);
+	u32 saddr = skb->nh.iph->saddr;
+	u32 daddr = skb->nh.iph->daddr;
+	struct rtnet_device *rtdev = skb->rtdev;
+
+	if (uh->check == 0)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	/* ip_summed (yet) never equals CHECKSUM_PARTIAL
+    else
+        if (skb->ip_summed == CHECKSUM_PARTIAL) {
+            skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+            if ( !rt_udp_check(uh, ulen, saddr, daddr, skb->csum) )
+                return NULL;
+
+            skb->ip_summed = CHECKSUM_NONE;
+        }*/
+
+	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+		skb->csum =
+			csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+
+	/* patch broadcast daddr */
+	if (daddr == rtdev->broadcast_ip)
+		daddr = rtdev->local_ip;
+
+	/* find the destination socket */
+	skb->sk = rt_udp_v4_lookup(daddr, uh->dest);
+
+	return skb->sk;
+}
+
+/***
+ *  rt_udp_rcv
+ */
+void rt_udp_rcv(struct rtskb *skb)
+{
+	struct rtsocket *sock = skb->sk;
+	void (*callback_func)(struct rtdm_fd *, void *);
+	void *callback_arg;
+	rtdm_lockctx_t context;
+
+	rtskb_queue_tail(&sock->incoming, skb);
+	rtdm_sem_up(&sock->pending_sem);
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+	callback_func = sock->callback_func;
+	callback_arg = sock->callback_arg;
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	if (callback_func)
+		callback_func(rt_socket_fd(sock), callback_arg);
+}
+
+/***
+ *  rt_udp_rcv_err
+ */
+void rt_udp_rcv_err(struct rtskb *skb)
+{
+	rtdm_printk("RTnet: rt_udp_rcv err\n");
+}
+
+/***
+ *  UDP-Initialisation
+ */
+static struct rtinet_protocol udp_protocol = { .protocol = IPPROTO_UDP,
+					       .dest_socket =
+						       &rt_udp_dest_socket,
+					       .rcv_handler = &rt_udp_rcv,
+					       .err_handler = &rt_udp_rcv_err,
+					       .init_socket = &rt_udp_socket };
+
+static struct rtdm_driver udp_driver = {
+    .profile_info =     RTDM_PROFILE_INFO(udp,
+                                        RTDM_CLASS_NETWORK,
+                                        RTDM_SUBCLASS_RTNET,
+                                        RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =	1,
+    .context_size =     sizeof(struct rtsocket),
+
+    .protocol_family =  PF_INET,
+    .socket_type =      SOCK_DGRAM,
+
+    /* default is UDP */
+    .ops = {
+        .socket =       rt_inet_socket,
+        .close =        rt_udp_close,
+        .ioctl_rt =     rt_udp_ioctl,
+        .ioctl_nrt =    rt_udp_ioctl,
+        .recvmsg_rt =   rt_udp_recvmsg,
+        .sendmsg_rt =   rt_udp_sendmsg,
+        .select =       rt_socket_select_bind,
+    },
+};
+
+static struct rtdm_device udp_device = {
+	.driver = &udp_driver,
+	.label = "udp",
+};
+
+/***
+ *  rt_udp_init
+ */
+static int __init rt_udp_init(void)
+{
+	int i, err;
+
+	if ((auto_port_start < 0) ||
+	    (auto_port_start >= 0x10000 - RT_UDP_SOCKETS))
+		auto_port_start = 1024;
+	auto_port_start = htons(auto_port_start & (auto_port_mask & 0xFFFF));
+	auto_port_mask = htons(auto_port_mask | 0xFFFF0000);
+
+	rt_inet_add_protocol(&udp_protocol);
+
+	for (i = 0; i < ARRAY_SIZE(port_hash); i++)
+		INIT_HLIST_HEAD(&port_hash[i]);
+
+	err = rtdm_dev_register(&udp_device);
+	if (err)
+		rt_inet_del_protocol(&udp_protocol);
+	return err;
+}
+
+/***
+ *  rt_udp_release
+ */
+static void __exit rt_udp_release(void)
+{
+	rtdm_dev_unregister(&udp_device);
+	rt_inet_del_protocol(&udp_protocol);
+}
+
+module_init(rt_udp_init);
+module_exit(rt_udp_release);
+++ linux-patched/drivers/xenomai/net/stack/ipv4/udp/Makefile	2022-03-21 12:58:30.805875153 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/net/stack/ipv4/ip_output.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_UDP) += rtudp.o
+
+rtudp-y := udp.o
+++ linux-patched/drivers/xenomai/net/stack/ipv4/ip_output.c	2022-03-21 12:58:30.798875222 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/icmp.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/ip_output.c - prepare outgoing IP packets
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/ip.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <rtnet_socket.h>
+#include <stack_mgr.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/ip_input.h>
+#include <ipv4/route.h>
+
+static DEFINE_RTDM_LOCK(rt_ip_id_lock);
+static u16 rt_ip_id_count = 0;
+
+/***
+ *  Slow path for fragmented packets
+ */
+int rt_ip_build_xmit_slow(struct rtsocket *sk,
+			  int getfrag(const void *, char *, unsigned int,
+				      unsigned int),
+			  const void *frag, unsigned length,
+			  struct dest_route *rt, int msg_flags,
+			  unsigned int mtu, unsigned int prio)
+{
+	int err, next_err;
+	struct rtskb *skb;
+	struct rtskb *next_skb;
+	struct iphdr *iph;
+	struct rtnet_device *rtdev = rt->rtdev;
+	unsigned int fragdatalen;
+	unsigned int offset = 0;
+	u16 msg_rt_ip_id;
+	rtdm_lockctx_t context;
+	unsigned int rtskb_size;
+	int hh_len = (rtdev->hard_header_len + 15) & ~15;
+
+#define FRAGHEADERLEN sizeof(struct iphdr)
+
+	fragdatalen = ((mtu - FRAGHEADERLEN) & ~7);
+
+	/* Store id in local variable */
+	rtdm_lock_get_irqsave(&rt_ip_id_lock, context);
+	msg_rt_ip_id = rt_ip_id_count++;
+	rtdm_lock_put_irqrestore(&rt_ip_id_lock, context);
+
+	rtskb_size = mtu + hh_len + 15;
+
+	/* TODO: delay previous skb until ALL errors are catched which may occure
+	     during next skb setup */
+
+	/* Preallocate first rtskb */
+	skb = alloc_rtskb(rtskb_size, &sk->skb_pool);
+	if (skb == NULL)
+		return -ENOBUFS;
+
+	for (offset = 0; offset < length; offset += fragdatalen) {
+		int fraglen; /* The length (IP, including ip-header) of this
+			very fragment */
+		__u16 frag_off = offset >> 3;
+
+		next_err = 0;
+		if (offset >= length - fragdatalen) {
+			/* last fragment */
+			fraglen = FRAGHEADERLEN + length - offset;
+			next_skb = NULL;
+		} else {
+			fraglen = FRAGHEADERLEN + fragdatalen;
+			frag_off |= IP_MF;
+
+			next_skb = alloc_rtskb(rtskb_size, &sk->skb_pool);
+			if (next_skb == NULL) {
+				frag_off &= ~IP_MF; /* cut the chain */
+				next_err = -ENOBUFS;
+			}
+		}
+
+		rtskb_reserve(skb, hh_len);
+
+		skb->rtdev = rtdev;
+		skb->nh.iph = iph = (struct iphdr *)rtskb_put(skb, fraglen);
+		skb->priority = prio;
+
+		iph->version = 4;
+		iph->ihl = 5; /* 20 byte header - no options */
+		iph->tos = sk->prot.inet.tos;
+		iph->tot_len = htons(fraglen);
+		iph->id = htons(msg_rt_ip_id);
+		iph->frag_off = htons(frag_off);
+		iph->ttl = 255;
+		iph->protocol = sk->protocol;
+		iph->saddr = rtdev->local_ip;
+		iph->daddr = rt->ip;
+		iph->check = 0; /* required! */
+		iph->check = ip_fast_csum((unsigned char *)iph, 5 /*iph->ihl*/);
+
+		if ((err = getfrag(frag, ((char *)iph) + 5 /*iph->ihl*/ * 4,
+				   offset, fraglen - FRAGHEADERLEN)))
+			goto error;
+
+		if (rtdev->hard_header) {
+			err = rtdev->hard_header(skb, rtdev, ETH_P_IP,
+						 rt->dev_addr, rtdev->dev_addr,
+						 skb->len);
+			if (err < 0)
+				goto error;
+		}
+
+		err = rtdev_xmit(skb);
+
+		skb = next_skb;
+
+		if (err != 0) {
+			err = -EAGAIN;
+			goto error;
+		}
+
+		if (next_err != 0)
+			return next_err;
+	}
+	return 0;
+
+error:
+	if (skb != NULL) {
+		kfree_rtskb(skb);
+
+		if (next_skb != NULL)
+			kfree_rtskb(next_skb);
+	}
+	return err;
+}
+
+/***
+ *  Fast path for unfragmented packets.
+ */
+int rt_ip_build_xmit(struct rtsocket *sk,
+		     int getfrag(const void *, char *, unsigned int,
+				 unsigned int),
+		     const void *frag, unsigned length, struct dest_route *rt,
+		     int msg_flags)
+{
+	int err = 0;
+	struct rtskb *skb;
+	struct iphdr *iph;
+	int hh_len;
+	u16 msg_rt_ip_id;
+	rtdm_lockctx_t context;
+	struct rtnet_device *rtdev = rt->rtdev;
+	unsigned int prio;
+	unsigned int mtu;
+
+	/* sk->priority may encode both priority and output channel. Make sure
+       we use a consitent value, also for the MTU which is derived from the
+       channel. */
+	prio = (volatile unsigned int)sk->priority;
+	mtu = rtdev->get_mtu(rtdev, prio);
+
+	/*
+     *  Try the simple case first. This leaves fragmented frames, and by choice
+     *  RAW frames within 20 bytes of maximum size(rare) to the long path
+     */
+	length += sizeof(struct iphdr);
+
+	if (length > mtu)
+		return rt_ip_build_xmit_slow(sk, getfrag, frag,
+					     length - sizeof(struct iphdr), rt,
+					     msg_flags, mtu, prio);
+
+	/* Store id in local variable */
+	rtdm_lock_get_irqsave(&rt_ip_id_lock, context);
+	msg_rt_ip_id = rt_ip_id_count++;
+	rtdm_lock_put_irqrestore(&rt_ip_id_lock, context);
+
+	hh_len = (rtdev->hard_header_len + 15) & ~15;
+
+	skb = alloc_rtskb(length + hh_len + 15, &sk->skb_pool);
+	if (skb == NULL)
+		return -ENOBUFS;
+
+	rtskb_reserve(skb, hh_len);
+
+	skb->rtdev = rtdev;
+	skb->nh.iph = iph = (struct iphdr *)rtskb_put(skb, length);
+	skb->priority = prio;
+
+	iph->version = 4;
+	iph->ihl = 5;
+	iph->tos = sk->prot.inet.tos;
+	iph->tot_len = htons(length);
+	iph->id = htons(msg_rt_ip_id);
+	iph->frag_off = htons(IP_DF);
+	iph->ttl = 255;
+	iph->protocol = sk->protocol;
+	iph->saddr = rtdev->local_ip;
+	iph->daddr = rt->ip;
+	iph->check = 0; /* required! */
+	iph->check = ip_fast_csum((unsigned char *)iph, 5 /*iph->ihl*/);
+
+	if ((err = getfrag(frag, ((char *)iph) + 5 /*iph->ihl*/ * 4, 0,
+			   length - 5 /*iph->ihl*/ * 4)))
+		goto error;
+
+	if (rtdev->hard_header) {
+		err = rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->dev_addr,
+					 rtdev->dev_addr, skb->len);
+		if (err < 0)
+			goto error;
+	}
+
+	err = rtdev_xmit(skb);
+
+	if (err)
+		return -EAGAIN;
+	else
+		return 0;
+
+error:
+	kfree_rtskb(skb);
+	return err;
+}
+EXPORT_SYMBOL_GPL(rt_ip_build_xmit);
+
+/***
+ *  IP protocol layer initialiser
+ */
+static struct rtpacket_type ip_packet_type = { .type = __constant_htons(
+						       ETH_P_IP),
+					       .handler = &rt_ip_rcv };
+
+/***
+ *  ip_init
+ */
+void __init rt_ip_init(void)
+{
+	rtdev_add_pack(&ip_packet_type);
+	rt_ip_fragment_init();
+}
+
+/***
+ *  ip_release
+ */
+void rt_ip_release(void)
+{
+	rtdev_remove_pack(&ip_packet_type);
+	rt_ip_fragment_cleanup();
+}
+++ linux-patched/drivers/xenomai/net/stack/ipv4/icmp.c	2022-03-21 12:58:30.790875300 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/ip_input.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/icmp.c
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2002       Vinay Sridhara <vinaysridhara@yahoo.com>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <net/checksum.h>
+
+#include <rtskb.h>
+#include <rtnet_socket.h>
+#include <rtnet_checksum.h>
+#include <ipv4_chrdev.h>
+#include <ipv4/icmp.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+
+/***
+ * Structure for sending the icmp packets
+ */
+struct icmp_bxm {
+	unsigned int csum;
+	size_t head_len;
+	size_t data_len;
+	off_t offset;
+	struct {
+		struct icmphdr icmph;
+		nanosecs_abs_t timestamp;
+	} head;
+	union {
+		struct rtskb *skb;
+		void *buf;
+	} data;
+};
+
+struct rt_icmp_control {
+	void (*handler)(struct rtskb *skb);
+	short error; /* This ICMP is classed as an error message */
+};
+
+static DEFINE_RTDM_LOCK(echo_calls_lock);
+LIST_HEAD(echo_calls);
+
+static struct {
+	/*
+     * Scratch pad, provided so that rt_socket_dereference(&icmp_socket);
+     * remains legal.
+     */
+	struct rtdm_dev_context dummy;
+
+	/*
+     *  Socket for icmp replies
+     *  It is not part of the socket pool. It may furthermore be used
+     *  concurrently by multiple tasks because all fields are static excect
+     *  skb_pool, but that one is spinlock protected.
+     */
+	struct rtsocket socket;
+} icmp_socket_container;
+
+#define icmp_fd (&icmp_socket_container.dummy.fd)
+#define icmp_socket ((struct rtsocket *)rtdm_fd_to_private(icmp_fd))
+
+void rt_icmp_queue_echo_request(struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+	list_add_tail(&call->list_entry, &echo_calls);
+	rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+}
+
+void rt_icmp_dequeue_echo_request(struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+	list_del(&call->list_entry);
+	rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+}
+
+void rt_icmp_cleanup_echo_requests(void)
+{
+	rtdm_lockctx_t context;
+	struct list_head *entry;
+	struct list_head *next;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+	entry = echo_calls.next;
+	INIT_LIST_HEAD(&echo_calls);
+	rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+
+	while (entry != &echo_calls) {
+		next = entry->next;
+		rtpc_complete_call_nrt((struct rt_proc_call *)entry, -EINTR);
+		entry = next;
+	}
+
+	/* purge any pending ICMP fragments */
+	rt_ip_frag_invalidate_socket(icmp_socket);
+}
+
+/***
+ *  rt_icmp_discard - dummy function
+ */
+static void rt_icmp_discard(struct rtskb *skb)
+{
+}
+
+static int rt_icmp_glue_reply_bits(const void *p, unsigned char *to,
+				   unsigned int offset, unsigned int fraglen)
+{
+	struct icmp_bxm *icmp_param = (struct icmp_bxm *)p;
+	struct icmphdr *icmph;
+	unsigned long csum;
+
+	/* TODO: add support for fragmented ICMP packets */
+	if (offset != 0)
+		return -EMSGSIZE;
+
+	csum = rtnet_csum_copy((void *)&icmp_param->head, to,
+			       icmp_param->head_len,
+			       icmp_param->csum);
+
+	csum = rtskb_copy_and_csum_bits(icmp_param->data.skb,
+					icmp_param->offset,
+					to + icmp_param->head_len,
+					fraglen - icmp_param->head_len, csum);
+
+	icmph = (struct icmphdr *)to;
+
+	icmph->checksum = csum_fold(csum);
+
+	return 0;
+}
+
+/***
+ *  common reply function
+ */
+static void rt_icmp_send_reply(struct icmp_bxm *icmp_param, struct rtskb *skb)
+{
+	struct dest_route rt;
+	int err;
+
+	icmp_param->head.icmph.checksum = 0;
+	icmp_param->csum = 0;
+
+	/* route back to the source address via the incoming device */
+	if (rt_ip_route_output(&rt, skb->nh.iph->saddr, skb->rtdev->local_ip) !=
+	    0)
+		return;
+
+	rt_socket_reference(icmp_socket);
+	err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_reply_bits, icmp_param,
+			       sizeof(struct icmphdr) + icmp_param->data_len,
+			       &rt, MSG_DONTWAIT);
+	if (err)
+		rt_socket_dereference(icmp_socket);
+
+	rtdev_dereference(rt.rtdev);
+
+	RTNET_ASSERT(err == 0,
+		     rtdm_printk("RTnet: %s() error in xmit\n", __FUNCTION__););
+	(void)err;
+}
+
+/***
+ *  rt_icmp_echo - handles echo replies on our previously sent requests
+ */
+static void rt_icmp_echo_reply(struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call;
+	struct ipv4_cmd *cmd;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+
+	if (!list_empty(&echo_calls)) {
+		call = (struct rt_proc_call *)echo_calls.next;
+		list_del(&call->list_entry);
+
+		rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+	} else {
+		rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+		return;
+	}
+
+	cmd = rtpc_get_priv(call, struct ipv4_cmd);
+
+	cmd->args.ping.ip_addr = skb->nh.iph->saddr;
+	cmd->args.ping.rtt = 0;
+
+	if ((skb->h.icmph->un.echo.id == cmd->args.ping.id) &&
+	    (ntohs(skb->h.icmph->un.echo.sequence) ==
+	     cmd->args.ping.sequence) &&
+	    skb->len == cmd->args.ping.msg_size) {
+		if (skb->len >= sizeof(nanosecs_abs_t))
+			cmd->args.ping.rtt = rtdm_clock_read() -
+					     *((nanosecs_abs_t *)skb->data);
+		rtpc_complete_call(call, sizeof(struct icmphdr) + skb->len);
+	} else
+		rtpc_complete_call(call, 0);
+}
+
+/***
+ *  rt_icmp_echo_request - handles echo requests sent by other stations
+ */
+static void rt_icmp_echo_request(struct rtskb *skb)
+{
+	struct icmp_bxm icmp_param;
+
+	icmp_param.head.icmph = *skb->h.icmph;
+	icmp_param.head.icmph.type = ICMP_ECHOREPLY;
+	icmp_param.data.skb = skb;
+	icmp_param.offset = 0;
+	icmp_param.data_len = skb->len;
+	icmp_param.head_len = sizeof(struct icmphdr);
+
+	rt_icmp_send_reply(&icmp_param, skb);
+
+	return;
+}
+
+static int rt_icmp_glue_request_bits(const void *p, unsigned char *to,
+				     unsigned int offset, unsigned int fraglen)
+{
+	struct icmp_bxm *icmp_param = (struct icmp_bxm *)p;
+	struct icmphdr *icmph;
+	unsigned long csum;
+
+	/* TODO: add support for fragmented ICMP packets */
+	RTNET_ASSERT(
+		offset == 0,
+		rtdm_printk("RTnet: %s() does not support fragmentation.\n",
+			    __FUNCTION__);
+		return -1;);
+
+	csum = rtnet_csum_copy((void *)&icmp_param->head, to,
+			       icmp_param->head_len,
+			       icmp_param->csum);
+
+	csum = rtnet_csum_copy(icmp_param->data.buf,
+			       to + icmp_param->head_len,
+			       fraglen - icmp_param->head_len, csum);
+
+	icmph = (struct icmphdr *)to;
+
+	icmph->checksum = csum_fold(csum);
+
+	return 0;
+}
+
+/***
+ *  common request function
+ */
+static int rt_icmp_send_request(u32 daddr, struct icmp_bxm *icmp_param)
+{
+	struct dest_route rt;
+	unsigned int size;
+	int err;
+
+	icmp_param->head.icmph.checksum = 0;
+	icmp_param->csum = 0;
+
+	if ((err = rt_ip_route_output(&rt, daddr, INADDR_ANY)) < 0)
+		return err;
+
+	/* TODO: add support for fragmented ICMP packets */
+	size = icmp_param->head_len + icmp_param->data_len;
+	if (size + 20 /* ip header */ >
+	    rt.rtdev->get_mtu(rt.rtdev, RT_ICMP_PRIO))
+		err = -EMSGSIZE;
+	else {
+		rt_socket_reference(icmp_socket);
+		err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_request_bits,
+				       icmp_param, size, &rt, MSG_DONTWAIT);
+		if (err)
+			rt_socket_dereference(icmp_socket);
+	}
+
+	rtdev_dereference(rt.rtdev);
+
+	return err;
+}
+
+/***
+ *  rt_icmp_send_echo - sends an echo request to the specified address
+ */
+int rt_icmp_send_echo(u32 daddr, u16 id, u16 sequence, size_t msg_size)
+{
+	struct icmp_bxm icmp_param;
+	unsigned char *pattern_buf;
+	off_t pos;
+	int ret;
+
+	/*
+	 * This is just setup of a ping message, exec time is not critical, so
+	 * rtdm_malloc() is ok here.
+	 */
+	pattern_buf = rtdm_malloc(msg_size);
+	if (pattern_buf == NULL)
+		return -ENOMEM;
+
+	/* first purge any potentially pending ICMP fragments */
+	rt_ip_frag_invalidate_socket(icmp_socket);
+
+	icmp_param.head.icmph.type = ICMP_ECHO;
+	icmp_param.head.icmph.code = 0;
+	icmp_param.head.icmph.un.echo.id = id;
+	icmp_param.head.icmph.un.echo.sequence = htons(sequence);
+	icmp_param.offset = 0;
+
+	if (msg_size >= sizeof(nanosecs_abs_t)) {
+		icmp_param.head_len =
+			sizeof(struct icmphdr) + sizeof(nanosecs_abs_t);
+		icmp_param.data_len = msg_size - sizeof(nanosecs_abs_t);
+
+		for (pos = 0; pos < icmp_param.data_len; pos++)
+			pattern_buf[pos] = pos & 0xFF;
+
+		icmp_param.head.timestamp = rtdm_clock_read();
+	} else {
+		icmp_param.head_len = sizeof(struct icmphdr) + msg_size;
+		icmp_param.data_len = 0;
+
+		for (pos = 0; pos < msg_size; pos++)
+			pattern_buf[pos] = pos & 0xFF;
+	}
+	icmp_param.data.buf = pattern_buf;
+
+	ret = rt_icmp_send_request(daddr, &icmp_param);
+	rtdm_free(pattern_buf);
+
+	return ret;
+}
+
+/***
+ *  rt_icmp_socket
+ */
+int rt_icmp_socket(struct rtdm_fd *fd)
+{
+	/* we don't support user-created ICMP sockets */
+	return -ENOPROTOOPT;
+}
+
+static struct rt_icmp_control rt_icmp_pointers[NR_ICMP_TYPES + 1] = {
+	/* ECHO REPLY (0) */
+	{ rt_icmp_echo_reply, 0 },
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+
+	/* DEST UNREACH (3) */
+	{ rt_icmp_discard, 1 },
+
+	/* SOURCE QUENCH (4) */
+	{ rt_icmp_discard, 1 },
+
+	/* REDIRECT (5) */
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+
+	/* ECHO (8) */
+	{ rt_icmp_echo_request, 0 },
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+
+	/* TIME EXCEEDED (11) */
+	{ rt_icmp_discard, 1 },
+
+	/* PARAMETER PROBLEM (12) */
+	{ rt_icmp_discard, 1 },
+
+	/* TIMESTAMP (13) */
+	{ rt_icmp_discard, 0 },
+
+	/* TIMESTAMP REPLY (14) */
+	{ rt_icmp_discard, 0 },
+
+	/* INFO (15) */
+	{ rt_icmp_discard, 0 },
+
+	/* INFO REPLY (16) */
+	{ rt_icmp_discard, 0 },
+
+	/* ADDR MASK (17) */
+	{ rt_icmp_discard, 0 },
+
+	/* ADDR MASK REPLY (18) */
+	{ rt_icmp_discard, 0 }
+};
+
+/***
+ *  rt_icmp_dest_pool
+ */
+struct rtsocket *rt_icmp_dest_socket(struct rtskb *skb)
+{
+	rt_socket_reference(icmp_socket);
+	return icmp_socket;
+}
+
+/***
+ *  rt_icmp_rcv
+ */
+void rt_icmp_rcv(struct rtskb *skb)
+{
+	struct icmphdr *icmpHdr = skb->h.icmph;
+	unsigned int length = skb->len;
+
+	/* check header sanity and don't accept fragmented packets */
+	if ((length < sizeof(struct icmphdr)) || (skb->next != NULL)) {
+		rtdm_printk("RTnet: improper length in icmp packet\n");
+		goto cleanup;
+	}
+
+	if (ip_compute_csum((unsigned char *)icmpHdr, length)) {
+		rtdm_printk("RTnet: invalid checksum in icmp packet %d\n",
+			    length);
+		goto cleanup;
+	}
+
+	if (!rtskb_pull(skb, sizeof(struct icmphdr))) {
+		rtdm_printk("RTnet: pull failed %p\n", (skb->sk));
+		goto cleanup;
+	}
+
+	if (icmpHdr->type > NR_ICMP_TYPES) {
+		rtdm_printk("RTnet: invalid icmp type\n");
+		goto cleanup;
+	}
+
+	/* sane packet, process it */
+	rt_icmp_pointers[icmpHdr->type].handler(skb);
+
+cleanup:
+	kfree_rtskb(skb);
+}
+
+/***
+ *  rt_icmp_rcv_err
+ */
+void rt_icmp_rcv_err(struct rtskb *skb)
+{
+	rtdm_printk("RTnet: rt_icmp_rcv err\n");
+}
+
+/***
+ *  ICMP-Initialisation
+ */
+static struct rtinet_protocol icmp_protocol = { .protocol = IPPROTO_ICMP,
+						.dest_socket =
+							&rt_icmp_dest_socket,
+						.rcv_handler = &rt_icmp_rcv,
+						.err_handler = &rt_icmp_rcv_err,
+						.init_socket =
+							&rt_icmp_socket };
+
+/***
+ *  rt_icmp_init
+ */
+void __init rt_icmp_init(void)
+{
+	int skbs;
+
+	skbs = rt_bare_socket_init(icmp_fd, IPPROTO_ICMP, RT_ICMP_PRIO,
+				   ICMP_REPLY_POOL_SIZE);
+	BUG_ON(skbs < 0);
+	if (skbs < ICMP_REPLY_POOL_SIZE)
+		printk("RTnet: allocated only %d icmp rtskbs\n", skbs);
+
+	icmp_socket->prot.inet.tos = 0;
+	icmp_fd->refs = 1;
+
+	rt_inet_add_protocol(&icmp_protocol);
+}
+
+/***
+ *  rt_icmp_release
+ */
+void rt_icmp_release(void)
+{
+	rt_icmp_cleanup_echo_requests();
+	rt_inet_del_protocol(&icmp_protocol);
+	rt_bare_socket_cleanup(icmp_socket);
+}
+++ linux-patched/drivers/xenomai/net/stack/ipv4/ip_input.c	2022-03-21 12:58:30.783875368 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/arp.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/ip_input.c - process incoming IP packets
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <rtskb.h>
+#include <rtnet_socket.h>
+#include <stack_mgr.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+#include <ipv4/ip_input.h>
+
+rt_ip_fallback_handler_t rt_ip_fallback_handler = NULL;
+EXPORT_SYMBOL_GPL(rt_ip_fallback_handler);
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+
+/***
+ *  rt_ip_local_deliver
+ */
+static inline void rt_ip_local_deliver(struct rtskb *skb)
+{
+	struct iphdr *iph = skb->nh.iph;
+	unsigned short protocol = iph->protocol;
+	struct rtinet_protocol *ipprot;
+	struct rtsocket *sock;
+	int err;
+
+	ipprot = rt_inet_protocols[rt_inet_hashkey(protocol)];
+
+	/* Check if we are supporting the protocol */
+	if ((ipprot != NULL) && (ipprot->protocol == protocol)) {
+		__rtskb_pull(skb, iph->ihl * 4);
+
+		/* Point into the IP datagram, just past the header. */
+		skb->h.raw = skb->data;
+
+		/* Reassemble IP fragments */
+		if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+			skb = rt_ip_defrag(skb, ipprot);
+			if (!skb)
+				return;
+
+			sock = skb->sk;
+		} else {
+			/* Get the destination socket */
+			if ((sock = ipprot->dest_socket(skb)) == NULL) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+				if (rt_ip_fallback_handler) {
+					__rtskb_push(skb, iph->ihl * 4);
+					rt_ip_fallback_handler(skb);
+					return;
+				}
+#endif
+				kfree_rtskb(skb);
+				return;
+			}
+
+			/* Acquire the rtskb, to unlock the device skb pool */
+			err = rtskb_acquire(skb, &sock->skb_pool);
+
+			if (err) {
+				kfree_rtskb(skb);
+				rt_socket_dereference(sock);
+				return;
+			}
+		}
+
+		/* Deliver the packet to the next layer */
+		ipprot->rcv_handler(skb);
+
+		/* Packet is queued, socket can be released */
+		rt_socket_dereference(sock);
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+	} else if (rt_ip_fallback_handler) {
+		/* If a fallback handler for IP protocol has been installed,
+         * call it. */
+		rt_ip_fallback_handler(skb);
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+	} else {
+		if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_DEBUG))
+			rtdm_printk("RTnet: no protocol found\n");
+		kfree_rtskb(skb);
+	}
+}
+
+/***
+ *  rt_ip_rcv
+ */
+int rt_ip_rcv(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct iphdr *iph;
+	__u32 len;
+
+	/* When the interface is in promisc. mode, drop all the crap
+     * that it receives, do not try to analyse it.
+     */
+	if (skb->pkt_type == PACKET_OTHERHOST)
+		goto drop;
+
+	iph = skb->nh.iph;
+
+	/*
+     *  RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
+     *
+     *  Is the datagram acceptable?
+     *
+     *  1.  Length at least the size of an ip header
+     *  2.  Version of 4
+     *  3.  Checksums correctly. [Speed optimisation for later, skip loopback checksums]
+     *  4.  Doesn't have a bogus length
+     */
+	if (iph->ihl < 5 || iph->version != 4)
+		goto drop;
+
+	if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
+		goto drop;
+
+	len = ntohs(iph->tot_len);
+	if ((skb->len < len) || (len < ((__u32)iph->ihl << 2)))
+		goto drop;
+
+	rtskb_trim(skb, len);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+	if (rt_ip_route_forward(skb, iph->daddr))
+		return 0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER */
+
+	rt_ip_local_deliver(skb);
+	return 0;
+
+drop:
+	kfree_rtskb(skb);
+	return 0;
+}
+++ linux-patched/drivers/xenomai/net/stack/ipv4/arp.c	2022-03-21 12:58:30.776875436 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/ip_sock.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/arp.h - Adress Resolution Protocol for RTnet
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtdev.h>
+#include <stack_mgr.h>
+#include <ipv4/arp.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+#include <ipv4/ip_input.h>
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+
+/***
+ *  arp_send:   Create and send an arp packet. If (dest_hw == NULL),
+ *              we create a broadcast message.
+ */
+void rt_arp_send(int type, int ptype, u32 dest_ip, struct rtnet_device *rtdev,
+		 u32 src_ip, unsigned char *dest_hw, unsigned char *src_hw,
+		 unsigned char *target_hw)
+{
+	struct rtskb *skb;
+	struct arphdr *arp;
+	unsigned char *arp_ptr;
+
+	if (rtdev->flags & IFF_NOARP)
+		return;
+
+	if (!(skb = alloc_rtskb(sizeof(struct arphdr) +
+					2 * (rtdev->addr_len + 4) +
+					rtdev->hard_header_len + 15,
+				&global_pool)))
+		return;
+
+	rtskb_reserve(skb, (rtdev->hard_header_len + 15) & ~15);
+
+	skb->nh.raw = skb->data;
+	arp = (struct arphdr *)rtskb_put(
+		skb, sizeof(struct arphdr) + 2 * (rtdev->addr_len + 4));
+
+	skb->rtdev = rtdev;
+	skb->protocol = __constant_htons(ETH_P_ARP);
+	skb->priority = RT_ARP_SKB_PRIO;
+	if (src_hw == NULL)
+		src_hw = rtdev->dev_addr;
+	if (dest_hw == NULL)
+		dest_hw = rtdev->broadcast;
+
+	/*
+     *  Fill the device header for the ARP frame
+     */
+	if (rtdev->hard_header &&
+	    (rtdev->hard_header(skb, rtdev, ptype, dest_hw, src_hw, skb->len) <
+	     0))
+		goto out;
+
+	arp->ar_hrd = htons(rtdev->type);
+	arp->ar_pro = __constant_htons(ETH_P_IP);
+	arp->ar_hln = rtdev->addr_len;
+	arp->ar_pln = 4;
+	arp->ar_op = htons(type);
+
+	arp_ptr = (unsigned char *)(arp + 1);
+
+	memcpy(arp_ptr, src_hw, rtdev->addr_len);
+	arp_ptr += rtdev->addr_len;
+
+	memcpy(arp_ptr, &src_ip, 4);
+	arp_ptr += 4;
+
+	if (target_hw != NULL)
+		memcpy(arp_ptr, target_hw, rtdev->addr_len);
+	else
+		memset(arp_ptr, 0, rtdev->addr_len);
+	arp_ptr += rtdev->addr_len;
+
+	memcpy(arp_ptr, &dest_ip, 4);
+
+	/* send the frame */
+	rtdev_xmit(skb);
+
+	return;
+
+out:
+	kfree_rtskb(skb);
+}
+
+/***
+ *  arp_rcv:    Receive an arp request by the device layer.
+ */
+int rt_arp_rcv(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct rtnet_device *rtdev = skb->rtdev;
+	struct arphdr *arp = skb->nh.arph;
+	unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+	unsigned char *sha;
+	u32 sip, tip;
+	u16 dev_type = rtdev->type;
+
+	/*
+     *  The hardware length of the packet should match the hardware length
+     *  of the device.  Similarly, the hardware types should match.  The
+     *  device should be ARP-able.  Also, if pln is not 4, then the lookup
+     *  is not from an IP number.  We can't currently handle this, so toss
+     *  it.
+     */
+	if ((arp->ar_hln != rtdev->addr_len) || (rtdev->flags & IFF_NOARP) ||
+	    (skb->pkt_type == PACKET_OTHERHOST) ||
+	    (skb->pkt_type == PACKET_LOOPBACK) || (arp->ar_pln != 4))
+		goto out;
+
+	switch (dev_type) {
+	default:
+		if ((arp->ar_pro != __constant_htons(ETH_P_IP)) &&
+		    (htons(dev_type) != arp->ar_hrd))
+			goto out;
+		break;
+	case ARPHRD_ETHER:
+		/*
+	     * ETHERNET devices will accept ARP hardware types of either
+	     * 1 (Ethernet) or 6 (IEEE 802.2).
+	     */
+		if ((arp->ar_hrd != __constant_htons(ARPHRD_ETHER)) &&
+		    (arp->ar_hrd != __constant_htons(ARPHRD_IEEE802))) {
+			goto out;
+		}
+		if (arp->ar_pro != __constant_htons(ETH_P_IP)) {
+			goto out;
+		}
+		break;
+	}
+
+	/* Understand only these message types */
+	if ((arp->ar_op != __constant_htons(ARPOP_REPLY)) &&
+	    (arp->ar_op != __constant_htons(ARPOP_REQUEST)))
+		goto out;
+
+	/*
+     *  Extract fields
+     */
+	sha = arp_ptr;
+	arp_ptr += rtdev->addr_len;
+	memcpy(&sip, arp_ptr, 4);
+
+	arp_ptr += 4;
+	arp_ptr += rtdev->addr_len;
+	memcpy(&tip, arp_ptr, 4);
+
+	/* process only requests/replies directed to us */
+	if (tip == rtdev->local_ip) {
+		rt_ip_route_add_host(sip, sha, rtdev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+		if (!rt_ip_fallback_handler)
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+			if (arp->ar_op == __constant_htons(ARPOP_REQUEST)) {
+				rt_arp_send(ARPOP_REPLY, ETH_P_ARP, sip, rtdev,
+					    tip, sha, rtdev->dev_addr, sha);
+				goto out1;
+			}
+	}
+
+out:
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	if (rt_ip_fallback_handler) {
+		rt_ip_fallback_handler(skb);
+		return 0;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+out1:
+	kfree_rtskb(skb);
+	return 0;
+}
+
+static struct rtpacket_type arp_packet_type = {
+	type: __constant_htons(ETH_P_ARP),
+	handler: &rt_arp_rcv
+};
+
+/***
+ *  rt_arp_init
+ */
+void __init rt_arp_init(void)
+{
+	rtdev_add_pack(&arp_packet_type);
+}
+
+/***
+ *  rt_arp_release
+ */
+void rt_arp_release(void)
+{
+	rtdev_remove_pack(&arp_packet_type);
+}
+++ linux-patched/drivers/xenomai/net/stack/ipv4/ip_sock.c	2022-03-21 12:58:30.768875514 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/ip_sock.c
+ *
+ *  Copyright (C) 2003       Hans-Peter Bock <hpbock@avaapgh.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *                2019       Sebastian Smolorz <sebastian.smolorz@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+
+#include <rtnet_socket.h>
+
+int rt_ip_setsockopt(struct rtdm_fd *fd, struct rtsocket *s, int level,
+		     int optname, const void __user *optval, socklen_t optlen)
+{
+	int err = 0;
+	unsigned int _tos, *tos;
+
+	if (level != SOL_IP)
+		return -ENOPROTOOPT;
+
+	if (optlen < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (optname) {
+	case IP_TOS:
+		tos = rtnet_get_arg(fd, &_tos, optval, sizeof(_tos));
+		if (IS_ERR(tos))
+			return PTR_ERR(tos);
+		else
+			s->prot.inet.tos = *tos;
+		break;
+
+	default:
+		err = -ENOPROTOOPT;
+		break;
+	}
+
+	return err;
+}
+
+int rt_ip_getsockopt(struct rtdm_fd *fd, struct rtsocket *s, int level,
+		     int optname, void __user *optval, socklen_t __user *optlen)
+{
+	int err = 0;
+	unsigned int tos;
+	socklen_t _len, *len;
+
+	len = rtnet_get_arg(fd, &_len, optlen, sizeof(_len));
+	if (IS_ERR(len))
+		return PTR_ERR(len);
+
+	if (*len < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (optname) {
+	case IP_TOS:
+		tos = s->prot.inet.tos;
+		err = rtnet_put_arg(fd, optval, &tos, sizeof(tos));
+		if (!err) {
+			*len = sizeof(unsigned int);
+			err = rtnet_put_arg(fd, optlen, len, sizeof(socklen_t));
+		}
+		break;
+
+	default:
+		err = -ENOPROTOOPT;
+		break;
+	}
+
+	return err;
+}
+
+int rt_ip_getsockname(struct rtdm_fd *fd, struct rtsocket *s,
+		      struct sockaddr __user *addr, socklen_t __user *addrlen)
+{
+	struct sockaddr_in _sin;
+	socklen_t *len, _len;
+	int ret;
+
+	len = rtnet_get_arg(fd, &_len, addrlen, sizeof(_len));
+	if (IS_ERR(len))
+		return PTR_ERR(len);
+
+	if (*len < sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	_sin.sin_family = AF_INET;
+	_sin.sin_addr.s_addr = s->prot.inet.saddr;
+	_sin.sin_port = s->prot.inet.sport;
+	memset(&_sin.sin_zero, 0, sizeof(_sin.sin_zero));
+	ret = rtnet_put_arg(fd, addr, &_sin, sizeof(_sin));
+	if (ret)
+		return ret;
+
+	*len = sizeof(struct sockaddr_in);
+	ret = rtnet_put_arg(fd, addrlen, len, sizeof(socklen_t));
+
+	return ret;
+}
+
+int rt_ip_getpeername(struct rtdm_fd *fd, struct rtsocket *s,
+		      struct sockaddr __user *addr, socklen_t __user *addrlen)
+{
+	struct sockaddr_in _sin;
+	socklen_t *len, _len;
+	int ret;
+
+	len = rtnet_get_arg(fd, &_len, addrlen, sizeof(_len));
+	if (IS_ERR(len))
+		return PTR_ERR(len);
+
+	if (*len < sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	_sin.sin_family = AF_INET;
+	_sin.sin_addr.s_addr = s->prot.inet.daddr;
+	_sin.sin_port = s->prot.inet.dport;
+	memset(&_sin.sin_zero, 0, sizeof(_sin.sin_zero));
+	ret = rtnet_put_arg(fd, addr, &_sin, sizeof(_sin));
+	if (ret)
+		return ret;
+
+	*len = sizeof(struct sockaddr_in);
+	ret = rtnet_put_arg(fd, addrlen, len, sizeof(socklen_t));
+
+	return ret;
+}
+
+int rt_ip_ioctl(struct rtdm_fd *fd, int request, void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct _rtdm_getsockaddr_args _getaddr, *getaddr;
+	struct _rtdm_getsockopt_args _getopt, *getopt;
+	struct _rtdm_setsockopt_args _setopt, *setopt;
+
+	switch (request) {
+	case _RTIOC_SETSOCKOPT:
+		setopt = rtnet_get_arg(fd, &_setopt, arg, sizeof(_setopt));
+		if (IS_ERR(setopt))
+			return PTR_ERR(setopt);
+
+		return rt_ip_setsockopt(fd, sock, setopt->level,
+					setopt->optname, setopt->optval,
+					setopt->optlen);
+
+	case _RTIOC_GETSOCKOPT:
+		getopt = rtnet_get_arg(fd, &_getopt, arg, sizeof(_getopt));
+		if (IS_ERR(getopt))
+			return PTR_ERR(getopt);
+
+		return rt_ip_getsockopt(fd, sock, getopt->level,
+					getopt->optname, getopt->optval,
+					getopt->optlen);
+
+	case _RTIOC_GETSOCKNAME:
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+
+		return rt_ip_getsockname(fd, sock, getaddr->addr,
+					 getaddr->addrlen);
+
+	case _RTIOC_GETPEERNAME:
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+
+		return rt_ip_getpeername(fd, sock, getaddr->addr,
+					 getaddr->addrlen);
+
+	default:
+		return rt_socket_if_ioctl(fd, request, arg);
+	}
+}
+EXPORT_SYMBOL_GPL(rt_ip_ioctl);
+++ linux-patched/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h	2022-03-21 12:58:30.761875582 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/tcp/timerwheel.h - timerwheel interface for RTnet
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TIMERWHEEL_H_
+#define __TIMERWHEEL_H_
+
+#include <linux/list.h>
+#include <rtdm/net.h>
+
+#define TIMERWHEEL_TIMER_UNUSED -1
+
+typedef void (*timerwheel_timer_handler)(void *);
+
+struct timerwheel_timer {
+	struct list_head link;
+	timerwheel_timer_handler handler;
+	void *data;
+	int slot;
+	volatile int refcount; /* only written by wheel task */
+};
+
+static inline void timerwheel_init_timer(struct timerwheel_timer *timer,
+					 timerwheel_timer_handler handler,
+					 void *data)
+{
+	timer->slot = TIMERWHEEL_TIMER_UNUSED;
+	timer->handler = handler;
+	timer->data = data;
+	timer->refcount = 0;
+}
+
+/* passed data must remain valid till a timer fireup */
+int timerwheel_add_timer(struct timerwheel_timer *timer,
+			 nanosecs_rel_t expires);
+
+int timerwheel_remove_timer(struct timerwheel_timer *timer);
+
+void timerwheel_remove_timer_sync(struct timerwheel_timer *timer);
+
+int timerwheel_init(nanosecs_rel_t timeout, unsigned int granularity);
+
+void timerwheel_cleanup(void);
+
+#endif
+++ linux-patched/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c	2022-03-21 12:58:30.753875661 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/tcp/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/tcp/timerwheel.c - timerwheel implementation for RTnet
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <rtdm/driver.h>
+#include "timerwheel.h"
+
+static struct {
+	/* timer pivot task */
+	rtdm_task_t pivot_task;
+
+	/* time length for one period of rotation of timerwheel */
+	nanosecs_rel_t timeout;
+
+	/* timer wheel slots for storing timers up to timerwheel_timeout */
+	unsigned int slots;
+
+	/* timer wheel interval timeout */
+	nanosecs_rel_t interval;
+
+	/* timer wheel interval timeout */
+	unsigned int interval_base;
+
+	/* timerwheel array */
+	struct list_head *ring;
+
+	/* timerwheel slot counter */
+	unsigned int current_slot;
+
+	/* timerwheel current slot lock */
+	rtdm_lock_t slot_lock;
+} wheel;
+
+static struct timerwheel_timer *timerwheel_get_from_current_slot(void)
+{
+	struct timerwheel_timer *timer = NULL;
+	struct list_head *slot_list;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&wheel.slot_lock, context);
+
+	slot_list = &wheel.ring[wheel.current_slot];
+
+	if (!list_empty(slot_list)) {
+		timer = list_first_entry(slot_list, struct timerwheel_timer,
+					 link);
+		list_del(&timer->link);
+		timer->slot = TIMERWHEEL_TIMER_UNUSED;
+		timer->refcount++;
+	}
+
+	rtdm_lock_put_irqrestore(&wheel.slot_lock, context);
+
+	return timer;
+}
+
+int timerwheel_add_timer(struct timerwheel_timer *timer, nanosecs_rel_t expires)
+{
+	rtdm_lockctx_t context;
+	int slot;
+
+	slot = expires >> wheel.interval_base;
+
+	if (slot >= wheel.slots)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&wheel.slot_lock, context);
+
+	/* cancel timer if it's still running */
+	if (timer->slot >= 0)
+		list_del(&timer->link);
+
+	slot = slot + wheel.current_slot;
+	if (slot >= wheel.slots)
+		slot = slot - wheel.slots;
+
+	list_add_tail(&timer->link, &wheel.ring[slot]);
+	timer->slot = slot;
+
+	rtdm_lock_put_irqrestore(&wheel.slot_lock, context);
+
+	return 0;
+}
+
+static int timerwheel_sleep(void)
+{
+	int ret;
+
+	ret = rtdm_task_sleep(wheel.interval);
+	if (ret < 0)
+		return ret;
+
+	wheel.current_slot++;
+	if (wheel.current_slot == wheel.slots)
+		wheel.current_slot = 0;
+
+	return 0;
+}
+
+static void timerwheel_pivot(void *arg)
+{
+	struct timerwheel_timer *timer;
+	int ret;
+
+	while (1) {
+		ret = timerwheel_sleep();
+		if (ret < 0) {
+			rtdm_printk(
+				"timerwheel: timerwheel_pivot interrupted %d\n",
+				-ret);
+			break;
+		}
+
+		while ((timer = timerwheel_get_from_current_slot())) {
+			timer->handler(timer->data);
+
+			smp_mb();
+			timer->refcount--;
+		}
+	}
+}
+
+int timerwheel_remove_timer(struct timerwheel_timer *timer)
+{
+	rtdm_lockctx_t context;
+	int ret;
+
+	rtdm_lock_get_irqsave(&wheel.slot_lock, context);
+
+	if (timer->slot >= 0) {
+		list_del(&timer->link);
+		timer->slot = TIMERWHEEL_TIMER_UNUSED;
+		ret = 0;
+	} else
+		ret = -ENOENT;
+
+	rtdm_lock_put_irqrestore(&wheel.slot_lock, context);
+
+	return ret;
+}
+
+void timerwheel_remove_timer_sync(struct timerwheel_timer *timer)
+{
+	u64 interval_ms = wheel.interval;
+
+	do_div(interval_ms, 1000000);
+
+	timerwheel_remove_timer(timer);
+
+	while (timer->refcount > 0)
+		msleep(interval_ms);
+}
+
+/*
+  timeout     - maximum expiration timeout for timers
+  granularity - is an exponent of 2 representing nanoseconds for
+  one wheel tick
+  heapsize    - is a number of timers to allocate
+*/
+int __init timerwheel_init(nanosecs_rel_t timeout, unsigned int granularity)
+{
+	int i;
+	int err;
+
+	/* the least possible slot timeout is set for 1ms */
+	if (granularity < 10)
+		return -EINVAL;
+
+	wheel.timeout = timeout;
+	wheel.interval_base = granularity;
+	wheel.slots = (timeout >> granularity) + 1;
+	wheel.interval = (1 << granularity);
+	wheel.current_slot = 0;
+
+	wheel.ring =
+		kmalloc(sizeof(struct list_head) * wheel.slots, GFP_KERNEL);
+	if (!wheel.ring)
+		return -ENOMEM;
+
+	for (i = 0; i < wheel.slots; i++)
+		INIT_LIST_HEAD(&wheel.ring[i]);
+
+	rtdm_lock_init(&wheel.slot_lock);
+
+	err = rtdm_task_init(&wheel.pivot_task, "rttcp timerwheel",
+			     timerwheel_pivot, NULL, 1, 0);
+	if (err) {
+		printk("timerwheel: error on pivot task initialization: %d\n",
+		       err);
+		kfree(wheel.ring);
+	}
+
+	return err;
+}
+
+void timerwheel_cleanup(void)
+{
+	rtdm_task_destroy(&wheel.pivot_task);
+	kfree(wheel.ring);
+}
+++ linux-patched/drivers/xenomai/net/stack/ipv4/tcp/Kconfig	2022-03-21 12:58:30.746875729 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/tcp/Makefile	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_RTIPV4_TCP
+    tristate "TCP support"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    help
+    Enables TCP support of the RTnet Real-Time IPv4 protocol.
+
+    When the RTnet IPv4 is enabled while this feature is disabled, TCP
+    will be forwarded to the Linux network stack.
+
+config XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+    bool "TCP error injection"
+    depends on XENO_DRIVERS_NET_RTIPV4_TCP
+    help
+    Enables error injection for incoming TCP packets. This can be used
+    to test both protocol as well as application behavior under error
+    conditions. The per-socket error rate is 0 by default and can be
+    tuned during runtime via the error_rate and multi_error module
+    parameters.
+++ linux-patched/drivers/xenomai/net/stack/ipv4/tcp/Makefile	2022-03-21 12:58:30.739875797 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/ipv4/tcp/tcp.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP) += rttcp.o
+
+rttcp-y := \
+	tcp.o \
+	timerwheel.o
+++ linux-patched/drivers/xenomai/net/stack/ipv4/tcp/tcp.c	2022-03-21 12:58:30.731875875 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/corectl.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/tcp/tcp.c - TCP implementation for RTnet
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <net/tcp_states.h>
+#include <net/tcp.h>
+
+#include <rtdm/driver.h>
+#include <rtnet_rtpc.h>
+#include <rtskb.h>
+#include <rtdev.h>
+#include <rtnet_port.h>
+#include <rtnet_checksum.h>
+#include <ipv4/tcp.h>
+#include <ipv4/ip_sock.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/route.h>
+#include <ipv4/af_inet.h>
+#include "timerwheel.h"
+
+static unsigned int close_timeout = 1000;
+module_param(close_timeout, uint, 0664);
+MODULE_PARM_DESC(close_timeout,
+		 "max time (ms) to wait during close for FIN-ACK handshake to complete, default 1000");
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+
+static unsigned int error_rate;
+module_param(error_rate, uint, 0664);
+MODULE_PARM_DESC(error_rate, "simulate packet loss after every n packets");
+
+static unsigned int multi_error = 1;
+module_param(multi_error, uint, 0664);
+MODULE_PARM_DESC(multi_error, "on simulated error, drop n packets in a row");
+
+static unsigned int counter_start = 1234;
+module_param(counter_start, uint, 0664);
+MODULE_PARM_DESC(counter_start, "start value of per-socket packet counter "
+				"(used for error injection)");
+
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+
+struct tcp_sync {
+	u32 seq;
+	u32 ack_seq;
+
+	/* Local window size sent to peer  */
+	u16 window;
+	/* Last received destination peer window size */
+	u16 dst_window;
+};
+
+/*
+  connection timeout
+*/
+/* 5 second */
+static const nanosecs_rel_t rt_tcp_connection_timeout = 1000000000ull;
+
+/* retransmission timerwheel timeout */
+static const u64 rt_tcp_retransmit_timeout = 100000000ull;
+
+/*
+  keepalive constants
+*/
+/* 75 second */
+static const u64 rt_tcp_keepalive_intvl = 75000000000ull;
+/* 9 probes to send */
+static const u8 rt_tcp_keepalive_probes = 9;
+/* 2 hour */
+static const u64 rt_tcp_keepalive_timeout = 7200000000000ull;
+
+/*
+  retransmission timeout
+*/
+/* 50 millisecond */
+static const nanosecs_rel_t rt_tcp_retransmission_timeout = 50000000ull;
+/*
+  maximum allowed number of retransmissions
+*/
+static const unsigned int max_retransmits = 3;
+
+struct tcp_keepalive {
+	u8 enabled;
+	u32 probes;
+	rtdm_timer_t timer;
+};
+
+/***
+ *  This structure is used to register a TCP socket for reception. All
+ *  structures are kept in the port_registry array to increase the cache
+ *  locality during the critical port lookup in rt_tcp_v4_lookup().
+ */
+
+/* if dport & daddr are zeroes, it means a listening socket */
+/* otherwise this is a data structure, which describes a connection */
+
+/* NB: sock->prot.inet.saddr & sock->prot.inet.sport values are not used */
+struct tcp_socket {
+	struct rtsocket sock; /* set up by rt_socket_init() implicitly */
+	u16 sport; /* local port */
+	u32 saddr; /* local ip-addr */
+	u16 dport; /* destination port */
+	u32 daddr; /* destination ip-addr */
+
+	u8 tcp_state; /* tcp connection state */
+
+	u8 is_binding; /* if set, tcp socket is in port binding progress */
+	u8 is_bound; /* if set, tcp socket is already port bound */
+	u8 is_valid; /* if set, read() and write() can process */
+	u8 is_accepting; /* if set, accept() is in progress */
+	u8 is_accepted; /* if set, accept() is already called */
+	u8 is_closed; /* close() call for resource deallocation follows */
+
+	rtdm_event_t send_evt; /* write request is permissible */
+	rtdm_event_t conn_evt; /* connection event */
+
+	struct dest_route rt;
+	struct tcp_sync sync;
+	struct tcp_keepalive keepalive;
+	rtdm_lock_t socket_lock;
+
+	struct hlist_node link;
+
+	nanosecs_rel_t sk_sndtimeo;
+
+	/* retransmission routine data */
+	u32 nacked_first;
+	unsigned int timer_state;
+	struct rtskb_queue retransmit_queue;
+	struct timerwheel_timer timer;
+
+	struct completion fin_handshake;
+	rtdm_nrtsig_t close_sig;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+	unsigned int packet_counter;
+	unsigned int error_rate;
+	unsigned int multi_error;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+};
+
+struct rt_tcp_dispatched_packet_send_cmd {
+	__be32 flags; /* packet flags value */
+	struct tcp_socket *ts;
+};
+
+/***
+ *  Automatic port number assignment
+
+ *  The automatic assignment of port numbers to unbound sockets is realised as
+ *  a simple addition of two values:
+ *   - the socket ID (lower 8 bits of file descriptor) which is set during
+ *     initialisation and left unchanged afterwards
+ *   - the start value tcp_auto_port_start which is a module parameter
+
+ *  tcp_auto_port_mask, also a module parameter, is used to define the range of
+ *  port numbers which are used for automatic assignment. Any number within
+ *  this range will be rejected when passed to bind_rt().
+
+ */
+
+MODULE_LICENSE("GPL");
+
+static struct {
+	struct rtdm_dev_context dummy;
+	struct tcp_socket rst_socket;
+} rst_socket_container;
+
+#define rst_fd (&rst_socket_container.dummy.fd)
+#define rst_socket (*(struct tcp_socket *)rtdm_fd_to_private(rst_fd))
+
+static u32 tcp_auto_port_start = 1024;
+static u32 tcp_auto_port_mask = ~(RT_TCP_SOCKETS - 1);
+static u32 free_ports = RT_TCP_SOCKETS;
+#define RT_PORT_BITMAP_WORDS                                                   \
+	((RT_TCP_SOCKETS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+static unsigned long port_bitmap[RT_PORT_BITMAP_WORDS];
+
+static struct tcp_socket *port_registry[RT_TCP_SOCKETS];
+static DEFINE_RTDM_LOCK(tcp_socket_base_lock);
+
+static struct hlist_head port_hash[RT_TCP_SOCKETS * 2];
+#define port_hash_mask (RT_TCP_SOCKETS * 2 - 1)
+
+module_param(tcp_auto_port_start, uint, 0444);
+module_param(tcp_auto_port_mask, uint, 0444);
+MODULE_PARM_DESC(tcp_auto_port_start, "Start of automatically assigned "
+				      "port range for TCP");
+MODULE_PARM_DESC(tcp_auto_port_mask, "Mask that defines port range for TCP "
+				     "for automatic assignment");
+
+static inline struct tcp_socket *port_hash_search(u32 saddr, u16 sport)
+{
+	u32 bucket = sport & port_hash_mask;
+	struct tcp_socket *ts;
+
+	hlist_for_each_entry (ts, &port_hash[bucket], link)
+		if (ts->sport == sport &&
+		    (saddr == INADDR_ANY || ts->saddr == saddr ||
+		     ts->saddr == INADDR_ANY))
+			return ts;
+
+	return NULL;
+}
+
+static int port_hash_insert(struct tcp_socket *ts, u32 saddr, u16 sport)
+{
+	u32 bucket;
+
+	if (port_hash_search(saddr, sport))
+		return -EADDRINUSE;
+
+	bucket = sport & port_hash_mask;
+	ts->saddr = saddr;
+	ts->sport = sport;
+	ts->daddr = 0;
+	ts->dport = 0;
+
+	hlist_add_head(&ts->link, &port_hash[bucket]);
+
+	return 0;
+}
+
+static inline void port_hash_del(struct tcp_socket *ts)
+{
+	hlist_del(&ts->link);
+}
+
+/***
+ *  rt_tcp_v4_lookup
+ */
+static struct rtsocket *rt_tcp_v4_lookup(u32 daddr, u16 dport)
+{
+	rtdm_lockctx_t context;
+	struct tcp_socket *ts;
+	int ret;
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+	ts = port_hash_search(daddr, dport);
+
+	if (ts != NULL) {
+		ret = rt_socket_reference(&ts->sock);
+		if (ret == 0 || (ret == -EIDRM && ts->is_closed)) {
+			rtdm_lock_put_irqrestore(&tcp_socket_base_lock,
+						 context);
+
+			return &ts->sock;
+		}
+	}
+
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	return NULL;
+}
+
+/* test seq1 <= seq2 */
+static inline int rt_tcp_before(__u32 seq1, __u32 seq2)
+{
+	return (__s32)(seq1 - seq2) <= 0;
+}
+
+/* test seq1 => seq2 */
+static inline int rt_tcp_after(__u32 seq1, __u32 seq2)
+{
+	return (__s32)(seq2 - seq1) <= 0;
+}
+
+static inline u32 rt_tcp_compute_ack_seq(struct tcphdr *th, u32 len)
+{
+	u32 ack_seq = ntohl(th->seq) + len;
+
+	if (unlikely(th->syn || th->fin))
+		ack_seq++;
+
+	return ack_seq;
+}
+
+static void rt_tcp_keepalive_start(struct tcp_socket *ts)
+{
+	if (ts->tcp_state == TCP_ESTABLISHED) {
+		rtdm_timer_start(&ts->keepalive.timer, rt_tcp_keepalive_timeout,
+				 0, RTDM_TIMERMODE_RELATIVE);
+	}
+}
+
+static void rt_tcp_keepalive_stop(struct tcp_socket *ts)
+{
+	if (ts->tcp_state == TCP_ESTABLISHED) {
+		rtdm_timer_stop(&ts->keepalive.timer);
+	}
+}
+
+#ifdef YET_UNUSED
+static void rt_tcp_keepalive_timer(rtdm_timer_t *timer);
+
+static void rt_tcp_keepalive_enable(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	struct tcp_keepalive *keepalive;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	keepalive = &ts->keepalive;
+
+	if (keepalive->enabled) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	keepalive->probes = rt_tcp_keepalive_probes;
+
+	rtdm_timer_init(&keepalive->timer, rt_tcp_keepalive_timer,
+			"RT TCP keepalive timer");
+
+	rt_tcp_keepalive_start(ts);
+
+	keepalive->enabled = 1;
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+}
+#endif
+
+static void rt_tcp_keepalive_disable(struct tcp_socket *ts)
+{
+	struct tcp_keepalive *keepalive;
+
+	keepalive = &ts->keepalive;
+
+	if (!keepalive->enabled) {
+		return;
+	}
+
+	rt_tcp_keepalive_stop(ts);
+	rtdm_timer_destroy(&keepalive->timer);
+
+	keepalive->enabled = 0;
+}
+
+static void rt_tcp_keepalive_feed(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	struct tcp_keepalive *keepalive;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	keepalive = &ts->keepalive;
+
+	if (ts->tcp_state == TCP_ESTABLISHED && ts->keepalive.enabled) {
+		keepalive->probes = rt_tcp_keepalive_probes;
+
+		/* Restart keepalive timer */
+		rtdm_timer_stop(&keepalive->timer);
+		rtdm_timer_start(&keepalive->timer, rt_tcp_keepalive_timeout, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	} else {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	}
+}
+
+static int rt_tcp_socket_invalidate(struct tcp_socket *ts, u8 to_state)
+{
+	int signal = ts->is_valid;
+
+	ts->tcp_state = to_state;
+
+	/*
+      multiple invalidation could happen without fuss,
+      see rt_tcp_close(), rt_tcp_rcv(), timeout expiration etc.
+    */
+	if (ts->is_valid) {
+		ts->is_valid = 0;
+
+		if (ts->keepalive.enabled) {
+			rt_tcp_keepalive_stop(ts);
+		}
+	}
+
+	return signal;
+}
+
+static void rt_tcp_socket_invalidate_signal(struct tcp_socket *ts)
+{
+	/* awake all readers and writers destroying events */
+	rtdm_sem_destroy(&ts->sock.pending_sem);
+	rtdm_event_destroy(&ts->send_evt);
+}
+
+static void rt_tcp_socket_validate(struct tcp_socket *ts)
+{
+	ts->tcp_state = TCP_ESTABLISHED;
+
+	ts->is_valid = 1;
+
+	if (ts->keepalive.enabled) {
+		rt_tcp_keepalive_start(ts);
+	}
+
+	rtdm_event_init(&ts->send_evt, 0);
+}
+
+/***
+ *  rt_tcp_retransmit_handler - timerwheel handler to process a retransmission
+ *  @data: pointer to a rttcp socket structure
+ */
+static void rt_tcp_retransmit_handler(void *data)
+{
+	struct tcp_socket *ts = (struct tcp_socket *)data;
+	struct rtskb *skb;
+	rtdm_lockctx_t context;
+	int signal;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (unlikely(rtskb_queue_empty(&ts->retransmit_queue))) {
+		/* handled, but retransmission queue is empty */
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+		rtdm_printk("rttcp: bug in RT TCP retransmission routine\n");
+		return;
+	}
+
+	if (ts->tcp_state == TCP_CLOSE) {
+		/* socket is already closed */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if (ts->timer_state) {
+		/* more tries */
+		ts->timer_state--;
+		timerwheel_add_timer(&ts->timer, rt_tcp_retransmission_timeout);
+
+		/* warning, rtskb_clone is under lock */
+		skb = rtskb_clone(ts->retransmit_queue.first,
+				  &ts->sock.skb_pool);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		/* BUG, window changes are not respected */
+		if (unlikely(rtdev_xmit(skb)) != 0) {
+			kfree_rtskb(skb);
+			rtdm_printk(
+				"rttcp: packet retransmission from timer failed\n");
+		}
+	} else {
+		ts->timer_state = max_retransmits;
+
+		/* report about connection lost */
+		signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		if (signal)
+			rt_tcp_socket_invalidate_signal(ts);
+
+		/* retransmission queue will be cleaned up in rt_tcp_socket_destruct */
+		rtdm_printk("rttcp: connection is lost by NACK timeout\n");
+	}
+}
+
+/***
+ *  rt_tcp_retransmit_ack - remove skbs from retransmission queue on ACK
+ *  @ts: rttcp socket
+ *  @ack_seq: received ACK sequence value
+ */
+static void rt_tcp_retransmit_ack(struct tcp_socket *ts, u32 ack_seq)
+{
+	struct rtskb *skb;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	/*
+      ACK, but retransmission queue is empty
+      This could happen on repeated ACKs
+    */
+	if (rtskb_queue_empty(&ts->retransmit_queue)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	/*
+      Check ts->nacked_first value firstly to ensure that
+      skb for retransmission is present in the queue, otherwise
+      retransmission queue will be drained completely
+    */
+	if (!rt_tcp_before(ts->nacked_first, ack_seq)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if (timerwheel_remove_timer(&ts->timer) != 0) {
+		/* already timed out */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+dequeue_loop:
+	if (ts->tcp_state == TCP_CLOSE) {
+		/* warn about queue safety in race with anyone,
+	   who closes the socket */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if ((skb = __rtskb_dequeue(&ts->retransmit_queue)) == NULL) {
+		ts->timer_state = max_retransmits;
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if (rt_tcp_before(ts->nacked_first, ack_seq)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		kfree_rtskb(skb);
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+		goto dequeue_loop;
+	}
+
+	/* Put NACKed skb back to queue */
+	/* BUG, need to respect half-acknowledged packets */
+	ts->nacked_first = ntohl(skb->h.th->seq) + 1;
+
+	__rtskb_queue_head(&ts->retransmit_queue, skb);
+
+	/* Have more packages in retransmission queue, restart the timer */
+	timerwheel_add_timer(&ts->timer, rt_tcp_retransmission_timeout);
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+}
+
+/***
+ *  rt_tcp_retransmit_send - enqueue a skb to retransmission queue (not locked)
+ *  @ts: rttcp socket
+ *  @skb: a copied skb for enqueueing
+ */
+static void rt_tcp_retransmit_send(struct tcp_socket *ts, struct rtskb *skb)
+{
+	if (rtskb_queue_empty(&ts->retransmit_queue)) {
+		/* retransmission queue is empty */
+		ts->nacked_first = ntohl(skb->h.th->seq) + 1;
+
+		__rtskb_queue_tail(&ts->retransmit_queue, skb);
+
+		timerwheel_add_timer(&ts->timer, rt_tcp_retransmission_timeout);
+	} else {
+		/* retransmission queue is not empty */
+		__rtskb_queue_tail(&ts->retransmit_queue, skb);
+	}
+}
+
+static int rt_ip_build_frame(struct rtskb *skb, struct rtsocket *sk,
+			     struct dest_route *rt, struct iphdr *iph)
+{
+	int ret;
+	struct rtnet_device *rtdev = rt->rtdev;
+
+	RTNET_ASSERT(rtdev->hard_header, return -EBADF;);
+
+	if (!rtdev_reference(rt->rtdev))
+		return -EIDRM;
+
+	iph->ihl = 5; /* 20 byte header only - no TCP options */
+
+	skb->nh.iph = iph;
+
+	iph->version = 4;
+	iph->tos = sk->prot.inet.tos;
+	iph->tot_len = htons(skb->len); /* length of IP header and IP payload */
+	iph->id = htons(0x00); /* zero IP frame id */
+	iph->frag_off = htons(IP_DF); /* and no more frames */
+	iph->ttl = 255;
+	iph->protocol = sk->protocol;
+	iph->saddr = rtdev->local_ip;
+	iph->daddr = rt->ip;
+	iph->check = 0; /* required to compute correct checksum */
+	iph->check = ip_fast_csum((u8 *)iph, 5 /*iph->ihl*/);
+
+	ret = rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->dev_addr,
+				 rtdev->dev_addr, skb->len);
+	rtdev_dereference(rt->rtdev);
+
+	if (ret != rtdev->hard_header_len) {
+		rtdm_printk("rttcp: rt_ip_build_frame: error on lower level\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void rt_tcp_build_header(struct tcp_socket *ts, struct rtskb *skb,
+				__be32 flags, u8 is_keepalive)
+{
+	u32 wcheck;
+	u8 tcphdrlen = 20;
+	u8 iphdrlen = 20;
+	struct tcphdr *th;
+
+	th = skb->h.th;
+	th->source = ts->sport;
+	th->dest = ts->dport;
+
+	th->seq = htonl(ts->sync.seq);
+
+	if (unlikely(is_keepalive))
+		th->seq--;
+
+	tcp_flag_word(th) = flags;
+	th->ack_seq = htonl(ts->sync.ack_seq);
+	th->window = htons(ts->sync.window);
+
+	th->doff = tcphdrlen >> 2; /* No options for now */
+	th->res1 = 0;
+	th->check = 0;
+	th->urg_ptr = 0;
+
+	/* compute checksum */
+	wcheck = rtnet_csum(th, tcphdrlen, 0);
+
+	if (skb->len - tcphdrlen - iphdrlen) {
+		wcheck = rtnet_csum(skb->data + tcphdrlen + iphdrlen,
+				      skb->len - tcphdrlen - iphdrlen, wcheck);
+	}
+
+	th->check =
+		tcp_v4_check(skb->len - iphdrlen, ts->saddr, ts->daddr, wcheck);
+}
+
+static int rt_tcp_segment(struct dest_route *rt, struct tcp_socket *ts,
+			  __be32 flags, u32 data_len, u8 *data_ptr,
+			  u8 is_keepalive)
+{
+	struct tcphdr *th;
+	struct rtsocket *sk = &ts->sock;
+	struct rtnet_device *rtdev = rt->rtdev;
+	struct rtskb *skb;
+	struct iphdr *iph;
+	struct rtskb *cloned_skb;
+	rtdm_lockctx_t context;
+
+	int ret;
+
+	u32 hh_len = (rtdev->hard_header_len + 15) & ~15;
+	u32 prio = (volatile unsigned int)sk->priority;
+	u32 mtu = rtdev->get_mtu(rtdev, prio);
+
+	u8 *data = NULL;
+
+	if ((skb = alloc_rtskb(mtu + hh_len + 15, &sk->skb_pool)) == NULL) {
+		rtdm_printk(
+			"rttcp: no more elements in skb_pool for allocation\n");
+		return -ENOBUFS;
+	}
+
+	/* rtskb_reserve(skb, hh_len + 20); */
+	rtskb_reserve(skb, hh_len);
+
+	iph = (struct iphdr *)rtskb_put(skb, 20); /* length of IP header */
+	skb->nh.iph = iph;
+
+	th = (struct tcphdr *)rtskb_put(skb, 20); /* length of TCP header */
+	skb->h.th = th;
+
+	if (data_len) { /* check for available place */
+		data = (u8 *)rtskb_put(skb,
+				       data_len); /* length of TCP payload */
+		if (!memcpy(data, (void *)data_ptr, data_len)) {
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+
+	/* used local phy MTU value */
+	if (data_len > mtu)
+		data_len = mtu;
+
+	skb->rtdev = rtdev;
+	skb->priority = prio;
+
+	/* do not validate socket connection on xmit
+       this should be done at upper level */
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	rt_tcp_build_header(ts, skb, flags, is_keepalive);
+
+	if ((ret = rt_ip_build_frame(skb, sk, rt, iph)) != 0) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto error;
+	}
+
+	/* add rtskb entry to the socket retransmission queue */
+	if (ts->tcp_state != TCP_CLOSE &&
+	    ((flags & (TCP_FLAG_SYN | TCP_FLAG_FIN)) || data_len)) {
+		/* rtskb_clone below is called under lock, this is an admission,
+	   because for now there is no rtskb copy by reference */
+		cloned_skb = rtskb_clone(skb, &ts->sock.skb_pool);
+		if (!cloned_skb) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_printk("rttcp: cann't clone skb\n");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		rt_tcp_retransmit_send(ts, cloned_skb);
+	}
+
+	/* need to update sync here, because it is safe way in
+       comparison with races on fast ACK response */
+	if (flags & (TCP_FLAG_FIN | TCP_FLAG_SYN))
+		ts->sync.seq++;
+
+	ts->sync.seq += data_len;
+	ts->sync.dst_window -= data_len;
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	/* ignore return value from rtdev_xmit */
+	/* the packet was enqueued and on error will be retransmitted later */
+	/* on critical error after retransmission timeout the connection will
+       be closed by connection lost */
+	rtdev_xmit(skb);
+
+	return data_len;
+
+error:
+	kfree_rtskb(skb);
+	return ret;
+}
+
+static int rt_tcp_send(struct tcp_socket *ts, __be32 flags)
+{
+	struct dest_route rt;
+	int ret;
+
+	/*
+     * We may not have a route yet during setup. But once it is set, it stays
+     * until the socket died.
+     */
+	if (likely(ts->rt.rtdev)) {
+		ret = rt_tcp_segment(&ts->rt, ts, flags, 0, NULL, 0);
+	} else {
+		ret = rt_ip_route_output(&rt, ts->daddr, ts->saddr);
+		if (ret == 0) {
+			ret = rt_tcp_segment(&rt, ts, flags, 0, NULL, 0);
+			rtdev_dereference(rt.rtdev);
+		}
+	}
+	if (ret < 0)
+		rtdm_printk("rttcp: can't send a packet: err %d\n", -ret);
+	return ret;
+}
+
+#ifdef YET_UNUSED
+static void rt_tcp_keepalive_timer(rtdm_timer_t *timer)
+{
+	rtdm_lockctx_t context;
+	struct tcp_keepalive *keepalive =
+		container_of(timer, struct tcp_keepalive, timer);
+
+	struct tcp_socket *ts =
+		container_of(keepalive, struct tcp_socket, keepalive);
+	int signal = 0;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (keepalive->probes) {
+		/* Send a probe */
+		if (rt_tcp_segment(&ts->rt, ts, 0, 0, NULL, 1) < 0) {
+			/* data receiving and sending is not possible anymore */
+			signal = rt_tcp_socket_invalidate(ts, TCP_TIME_WAIT);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		}
+
+		keepalive->probes--;
+		rtdm_timer_start_in_handler(&keepalive->timer,
+					    rt_tcp_keepalive_intvl, 0,
+					    RTDM_TIMERMODE_RELATIVE);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	} else {
+		/* data receiving and sending is not possible anymore */
+
+		signal = rt_tcp_socket_invalidate(ts, TCP_TIME_WAIT);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	}
+
+	if (signal)
+		rt_tcp_socket_invalidate_signal(ts);
+}
+#endif
+
+static inline u32 rt_tcp_initial_seq(void)
+{
+	uint64_t clock_val = rtdm_clock_read_monotonic();
+	return (u32)(clock_val ^ (clock_val >> 32));
+}
+
+/***
+ *  rt_tcp_dest_socket
+ */
+static struct rtsocket *rt_tcp_dest_socket(struct rtskb *skb)
+{
+	struct tcphdr *th = skb->h.th;
+
+	u32 saddr = skb->nh.iph->saddr;
+	u32 daddr = skb->nh.iph->daddr;
+	u32 sport = th->source;
+	u32 dport = th->dest;
+
+	u32 data_len;
+
+	if (tcp_v4_check(skb->len, saddr, daddr,
+			 rtnet_csum(skb->data, skb->len, 0))) {
+		rtdm_printk("rttcp: invalid TCP packet checksum, dropped\n");
+		return NULL; /* Invalid checksum, drop the packet */
+	}
+
+	/* find the destination socket */
+	if ((skb->sk = rt_tcp_v4_lookup(daddr, dport)) == NULL) {
+		/*
+	  rtdm_printk("Not found addr:0x%08x, port: 0x%04x\n", daddr, dport);
+	*/
+		if (!th->rst) {
+			/* No listening socket found, send RST|ACK */
+			rst_socket.saddr = daddr;
+			rst_socket.daddr = saddr;
+			rst_socket.sport = dport;
+			rst_socket.dport = sport;
+
+			data_len = skb->len - (th->doff << 2);
+
+			rst_socket.sync.seq = 0;
+			rst_socket.sync.ack_seq =
+				rt_tcp_compute_ack_seq(th, data_len);
+
+			if (rt_ip_route_output(&rst_socket.rt, daddr, saddr) ==
+			    0) {
+				rt_socket_reference(&rst_socket.sock);
+				rt_tcp_send(&rst_socket,
+					    TCP_FLAG_ACK | TCP_FLAG_RST);
+				rtdev_dereference(rst_socket.rt.rtdev);
+			}
+		}
+	}
+
+	return skb->sk;
+}
+
+static void rt_tcp_window_update(struct tcp_socket *ts, u16 window)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->sync.dst_window) {
+		ts->sync.dst_window = window;
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		if (!window) {
+			/* clear send event status */
+			rtdm_event_clear(&ts->send_evt);
+		}
+	} else {
+		if (window) {
+			ts->sync.dst_window = window;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			/* set send event status */
+			rtdm_event_signal(&ts->send_evt);
+		} else {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		}
+	}
+}
+
+/***
+ *  rt_tcp_rcv
+ */
+static void rt_tcp_rcv(struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+	struct tcp_socket *ts;
+	struct tcphdr *th = skb->h.th;
+	unsigned int data_len = skb->len - (th->doff << 2);
+	u32 seq = ntohl(th->seq);
+	int signal;
+
+	ts = container_of(skb->sk, struct tcp_socket, sock);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+	if (ts->error_rate > 0) {
+		if ((ts->packet_counter++ % error_rate) < ts->multi_error) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		}
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+
+	/* Check for daddr/dport correspondence to values stored in
+       selected socket from hash */
+	if (ts->tcp_state != TCP_LISTEN && (ts->daddr != skb->nh.iph->saddr ||
+					    ts->dport != skb->h.th->source)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto drop;
+	}
+
+	/* Check if it is a keepalive probe */
+	if (ts->sync.ack_seq == (seq + 1) && ts->tcp_state == TCP_ESTABLISHED) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rt_tcp_send(ts, TCP_FLAG_ACK);
+		goto feed;
+	}
+
+	if (ts->tcp_state == TCP_SYN_SENT) {
+		ts->sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
+
+		if (th->syn && th->ack) {
+			rt_tcp_socket_validate(ts);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_event_signal(&ts->conn_evt);
+			/* Send ACK */
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			goto feed;
+		}
+
+		ts->tcp_state = TCP_CLOSE;
+		ts->sync.seq = ntohl(th->ack_seq);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		/* Send RST|ACK */
+		rtdm_event_signal(&ts->conn_evt);
+		rt_tcp_send(ts, TCP_FLAG_RST | TCP_FLAG_ACK);
+		goto drop;
+	}
+
+	/* Check for SEQ correspondence to determine the connection relevance */
+
+	/* OR-list of conditions to be satisfied:
+     *
+     * th->ack && rt_tcp_after(ts->nacked_first, ntohl(th->ack_seq))
+     * th->ack && th->rst && ...
+     * th->syn && (ts->tcp_state == TCP_LISTEN ||
+		   ts->tcp_state == TCP_SYN_SENT)
+     * rt_tcp_after(seq, ts->sync.ack_seq) &&
+	   rt_tcp_before(seq, ts->sync.ack_seq + ts->sync.window)
+     */
+
+	if ((rt_tcp_after(seq, ts->sync.ack_seq) &&
+	     rt_tcp_before(seq, ts->sync.ack_seq + ts->sync.window)) ||
+	    th->rst ||
+	    (th->syn &&
+	     (ts->tcp_state == TCP_LISTEN || ts->tcp_state == TCP_SYN_SENT))) {
+		/* everything is ok */
+	} else if (rt_tcp_after(seq, ts->sync.ack_seq - data_len)) {
+		/* retransmission of data we already acked */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rt_tcp_send(ts, TCP_FLAG_ACK);
+		goto drop;
+	} else {
+		/* drop forward ack */
+		if (th->ack &&
+		    /* but reset ack from old connection */
+		    ts->tcp_state == TCP_ESTABLISHED) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_printk(
+				"rttcp: dropped unappropriate ACK packet %u\n",
+				ts->sync.ack_seq);
+			goto drop;
+		}
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rtdm_printk("rttcp: sequence number is not in window, "
+			    "dropped (failed: %u <= %u <= %u)\n",
+			    ts->sync.ack_seq, seq,
+			    ts->sync.ack_seq + ts->sync.window);
+
+		/* That's a forced RST for a lost connection */
+		rst_socket.saddr = skb->nh.iph->daddr;
+		rst_socket.daddr = skb->nh.iph->saddr;
+		rst_socket.sport = th->dest;
+		rst_socket.dport = th->source;
+
+		rst_socket.sync.seq = ntohl(th->ack_seq);
+		rst_socket.sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
+
+		if (rt_ip_route_output(&rst_socket.rt, rst_socket.daddr,
+				       rst_socket.saddr) == 0) {
+			rt_socket_reference(&rst_socket.sock);
+			rt_tcp_send(&rst_socket, TCP_FLAG_RST | TCP_FLAG_ACK);
+			rtdev_dereference(rst_socket.rt.rtdev);
+		}
+		goto drop;
+	}
+
+	if (th->rst) {
+		if (ts->tcp_state == TCP_SYN_RECV) {
+			ts->tcp_state = TCP_LISTEN;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		} else {
+			/* Drop our half-open connection, peer obviously went away. */
+			signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+			if (signal)
+				rt_tcp_socket_invalidate_signal(ts);
+
+			goto drop;
+		}
+	}
+
+	ts->sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
+
+	if (th->fin) {
+		if (ts->tcp_state == TCP_ESTABLISHED) {
+			/* Send ACK */
+			signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE_WAIT);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+			if (signal)
+				rt_tcp_socket_invalidate_signal(ts);
+
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			goto feed;
+		} else if ((ts->tcp_state == TCP_FIN_WAIT1 && th->ack) ||
+			   ts->tcp_state == TCP_FIN_WAIT2) {
+			/* Send ACK */
+			ts->tcp_state = TCP_TIME_WAIT;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			/* data receiving is not possible anymore */
+			rtdm_sem_destroy(&ts->sock.pending_sem);
+			rtdm_nrtsig_pend(&ts->close_sig);
+			goto feed;
+		} else if (ts->tcp_state == TCP_FIN_WAIT1) {
+			/* Send ACK */
+			ts->tcp_state = TCP_CLOSING;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			/* data receiving is not possible anymore */
+			rtdm_sem_destroy(&ts->sock.pending_sem);
+			goto feed;
+		} else {
+			/* just drop it */
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		}
+	}
+
+	if (th->syn) {
+		/* Need to differentiate LISTEN socket from ESTABLISHED one */
+		/* Both of them have the same sport/saddr, but different dport/daddr */
+		/* dport is unknown if it is the first connection of n */
+
+		if (ts->tcp_state == TCP_LISTEN) {
+			/* Need to store ts->seq while sending SYN earlier */
+			/* The socket shall be in TCP_LISTEN state */
+
+			/* safe to update ts->saddr here due to a single task for
+	       rt_tcp_rcv() and rt_tcp_dest_socket() callers */
+			ts->saddr = skb->nh.iph->daddr;
+
+			ts->daddr = skb->nh.iph->saddr;
+			ts->dport = th->source;
+			ts->sync.seq = rt_tcp_initial_seq();
+			ts->sync.window = 4096;
+			ts->tcp_state = TCP_SYN_RECV;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+			/* Send SYN|ACK */
+			rt_tcp_send(ts, TCP_FLAG_SYN | TCP_FLAG_ACK);
+			goto drop;
+		}
+
+		/* Send RST|ACK */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rt_tcp_send(ts, TCP_FLAG_RST | TCP_FLAG_ACK);
+		goto drop;
+	}
+
+	/* ACK received without SYN, FIN or RST flags */
+	if (th->ack) {
+		/* Check ack sequence */
+		if (rt_tcp_before(ts->sync.seq + 1, ntohl(th->ack_seq))) {
+			rtdm_printk("rttcp: unexpected ACK %u %u %u\n",
+				    ts->sync.seq, ts->nacked_first,
+				    ntohl(th->ack_seq));
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		}
+
+		if (ts->tcp_state == TCP_LAST_ACK) {
+			/* close connection and free socket data */
+			ts->tcp_state = TCP_CLOSE;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			/* socket destruction will be done on close() */
+			rtdm_nrtsig_pend(&ts->close_sig);
+			goto drop;
+		} else if (ts->tcp_state == TCP_FIN_WAIT1) {
+			ts->tcp_state = TCP_FIN_WAIT2;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto feed;
+		} else if (ts->tcp_state == TCP_SYN_RECV) {
+			rt_tcp_socket_validate(ts);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_event_signal(&ts->conn_evt);
+			goto feed;
+		} else if (ts->tcp_state == TCP_CLOSING) {
+			ts->tcp_state = TCP_TIME_WAIT;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			/* socket destruction will be done on close() */
+			rtdm_nrtsig_pend(&ts->close_sig);
+			goto feed;
+		}
+	}
+
+	if (ts->tcp_state != TCP_ESTABLISHED) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto drop;
+	}
+
+	if (data_len == 0) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto feed;
+	}
+
+	/* Send ACK */
+	ts->sync.window -= data_len;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	rt_tcp_send(ts, TCP_FLAG_ACK);
+
+	rtskb_queue_tail(&skb->sk->incoming, skb);
+	rtdm_sem_up(&ts->sock.pending_sem);
+
+	/* inform retransmission subsystem about arrived ack */
+	if (th->ack) {
+		rt_tcp_retransmit_ack(ts, ntohl(th->ack_seq));
+	}
+
+	rt_tcp_keepalive_feed(ts);
+	rt_tcp_window_update(ts, ntohs(th->window));
+
+	return;
+
+feed:
+	/* inform retransmission subsystem about arrived ack */
+	if (th->ack) {
+		rt_tcp_retransmit_ack(ts, ntohl(th->ack_seq));
+	}
+
+	rt_tcp_keepalive_feed(ts);
+	rt_tcp_window_update(ts, ntohs(th->window));
+
+drop:
+	kfree_rtskb(skb);
+	return;
+}
+
+/***
+ *  rt_tcp_rcv_err
+ */
+static void rt_tcp_rcv_err(struct rtskb *skb)
+{
+	rtdm_printk("rttcp: rt_tcp_rcv err\n");
+}
+
+static int rt_tcp_window_send(struct tcp_socket *ts, u32 data_len, u8 *data_ptr)
+{
+	u32 dst_window = ts->sync.dst_window;
+	int ret;
+
+	if (data_len > dst_window)
+		data_len = dst_window;
+
+	if ((ret = rt_tcp_segment(&ts->rt, ts, TCP_FLAG_ACK, data_len, data_ptr,
+				  0)) < 0) {
+		rtdm_printk("rttcp: cann't send a packet: err %d\n", -ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void rt_tcp_close_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	complete_all((struct completion *)arg);
+}
+
+static int rt_tcp_socket_create(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	int i;
+	int index;
+	struct rtsocket *sock = &ts->sock;
+
+	sock->prot.inet.saddr = INADDR_ANY;
+	sock->prot.inet.state = TCP_CLOSE;
+	sock->prot.inet.tos = 0;
+	/*
+      rtdm_printk("rttcp: rt_tcp_socket_create 0x%p\n", ts);
+    */
+	rtdm_lock_init(&ts->socket_lock);
+
+	ts->rt.rtdev = NULL;
+
+	ts->tcp_state = TCP_CLOSE;
+
+	ts->is_accepting = 0;
+	ts->is_accepted = 0;
+	ts->is_binding = 0;
+	ts->is_bound = 0;
+	ts->is_valid = 0;
+	ts->is_closed = 0;
+
+	ts->sk_sndtimeo = RTDM_TIMEOUT_INFINITE;
+
+	rtdm_event_init(&ts->conn_evt, 0);
+
+	ts->keepalive.enabled = 0;
+
+	ts->timer_state = max_retransmits;
+	timerwheel_init_timer(&ts->timer, rt_tcp_retransmit_handler, ts);
+	rtskb_queue_init(&ts->retransmit_queue);
+
+	init_completion(&ts->fin_handshake);
+	rtdm_nrtsig_init(&ts->close_sig, rt_tcp_close_signal_handler,
+			 &ts->fin_handshake);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+	ts->packet_counter = counter_start;
+	ts->error_rate = error_rate;
+	ts->multi_error = multi_error;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+
+	/* enforce maximum number of TCP sockets */
+	if (free_ports == 0) {
+		rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+		rtdm_nrtsig_destroy(&ts->close_sig);
+		return -EAGAIN;
+	}
+	free_ports--;
+
+	/* find free auto-port in bitmap */
+	for (i = 0; i < RT_PORT_BITMAP_WORDS; i++)
+		if (port_bitmap[i] != (unsigned long)-1)
+			break;
+	index = ffz(port_bitmap[i]);
+	set_bit(index, &port_bitmap[i]);
+	index += i * 32;
+	sock->prot.inet.reg_index = index;
+	sock->prot.inet.sport = index + tcp_auto_port_start;
+
+	/* register TCP socket */
+	port_registry[index] = ts;
+	port_hash_insert(ts, INADDR_ANY, sock->prot.inet.sport);
+
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	return 0;
+}
+
+/***
+ *  rt_tcp_socket - create a new TCP-Socket
+ *  @s: socket
+ */
+static int rt_tcp_socket(struct rtdm_fd *fd)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	int ret;
+
+	if ((ret = rt_socket_init(fd, IPPROTO_TCP)) != 0)
+		return ret;
+
+	if ((ret = rt_tcp_socket_create(ts)) != 0)
+		rt_socket_cleanup(fd);
+
+	return ret;
+}
+
+static int rt_tcp_dispatched_packet_send(struct rt_proc_call *call)
+{
+	int ret;
+	struct rt_tcp_dispatched_packet_send_cmd *cmd;
+
+	cmd = rtpc_get_priv(call, struct rt_tcp_dispatched_packet_send_cmd);
+	ret = rt_tcp_send(cmd->ts, cmd->flags);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_socket_destruct
+ *  this function requires non realtime context
+ */
+static void rt_tcp_socket_destruct(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *skb;
+	int index;
+	int signal;
+	struct rtsocket *sock = &ts->sock;
+
+	/*
+      rtdm_printk("rttcp: rt_tcp_socket_destruct 0x%p\n", ts);
+    */
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+	if (sock->prot.inet.reg_index >= 0) {
+		index = sock->prot.inet.reg_index;
+
+		clear_bit(index % BITS_PER_LONG,
+			  &port_bitmap[index / BITS_PER_LONG]);
+		port_hash_del(port_registry[index]);
+		free_ports++;
+		sock->prot.inet.reg_index = -1;
+	}
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE);
+
+	rt_tcp_keepalive_disable(ts);
+
+	sock->prot.inet.state = TCP_CLOSE;
+
+	/* dereference rtdev */
+	if (ts->rt.rtdev != NULL) {
+		rtdev_dereference(ts->rt.rtdev);
+		ts->rt.rtdev = NULL;
+	}
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	if (signal)
+		rt_tcp_socket_invalidate_signal(ts);
+
+	rtdm_event_destroy(&ts->conn_evt);
+
+	rtdm_nrtsig_destroy(&ts->close_sig);
+
+	/* cleanup already collected fragments */
+	rt_ip_frag_invalidate_socket(sock);
+
+	/* free packets in incoming queue */
+	while ((skb = rtskb_dequeue(&sock->incoming)) != NULL)
+		kfree_rtskb(skb);
+
+	/* ensure that the timer is no longer running */
+	timerwheel_remove_timer_sync(&ts->timer);
+
+	/* free packets in retransmission queue */
+	while ((skb = __rtskb_dequeue(&ts->retransmit_queue)) != NULL)
+		kfree_rtskb(skb);
+}
+
+/***
+ *  rt_tcp_close
+ */
+static void rt_tcp_close(struct rtdm_fd *fd)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	struct rt_tcp_dispatched_packet_send_cmd send_cmd;
+	rtdm_lockctx_t context;
+	int signal = 0;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	ts->is_closed = 1;
+
+	if (ts->tcp_state == TCP_ESTABLISHED || ts->tcp_state == TCP_SYN_RECV) {
+		/* close() from ESTABLISHED */
+		send_cmd.ts = ts;
+		send_cmd.flags = TCP_FLAG_FIN | TCP_FLAG_ACK;
+		signal = rt_tcp_socket_invalidate(ts, TCP_FIN_WAIT1);
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		rtpc_dispatch_call(rt_tcp_dispatched_packet_send, 0, &send_cmd,
+				   sizeof(send_cmd), NULL, NULL);
+		/* result is ignored */
+
+		/* Give the peer some time to reply to our FIN.
+		   Since it is not relevant what exactly causes the wait
+		   function to return its result is ignored. */
+		wait_for_completion_interruptible_timeout(&ts->fin_handshake,
+					      msecs_to_jiffies(close_timeout));
+	} else if (ts->tcp_state == TCP_CLOSE_WAIT) {
+		/* Send FIN in CLOSE_WAIT */
+		send_cmd.ts = ts;
+		send_cmd.flags = TCP_FLAG_FIN | TCP_FLAG_ACK;
+		signal = rt_tcp_socket_invalidate(ts, TCP_LAST_ACK);
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		rtpc_dispatch_call(rt_tcp_dispatched_packet_send, 0, &send_cmd,
+				   sizeof(send_cmd), NULL, NULL);
+		/* result is ignored */
+
+		/* Give the peer some time to reply to our FIN.
+		   Since it is not relevant what exactly causes the wait
+		   function to return its result is ignored. */
+		wait_for_completion_interruptible_timeout(&ts->fin_handshake,
+					      msecs_to_jiffies(close_timeout));
+	} else {
+		/*
+	  rt_tcp_socket_validate() has not been called at all,
+	  hence socket state is TCP_SYN_SENT or TCP_LISTEN,
+	  or socket is in one of close states,
+	  hence rt_tcp_socket_invalidate() was called,
+	  but close() is called at first time
+	*/
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	}
+
+	if (signal)
+		rt_tcp_socket_invalidate_signal(ts);
+
+	rt_tcp_socket_destruct(ts);
+
+	rt_socket_cleanup(fd);
+}
+
+/***
+ *  rt_tcp_bind - bind socket to local address
+ *  @s:     socket
+ *  @addr:  local address
+ */
+static int rt_tcp_bind(struct rtdm_fd *fd, struct tcp_socket *ts,
+		       const struct sockaddr __user *addr, socklen_t addrlen)
+{
+	struct sockaddr_in *usin, _usin;
+	rtdm_lockctx_t context;
+	int index;
+	int bound = 0;
+	int ret = 0;
+
+	usin = rtnet_get_arg(fd, &_usin, addr, sizeof(_usin));
+	if (IS_ERR(usin))
+		return PTR_ERR(usin);
+
+	if ((addrlen < (int)sizeof(struct sockaddr_in)) ||
+	    ((usin->sin_port & tcp_auto_port_mask) == tcp_auto_port_start))
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	if (ts->tcp_state != TCP_CLOSE || ts->is_bound || ts->is_binding) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+
+	ts->is_binding = 1;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+
+	if ((index = ts->sock.prot.inet.reg_index) < 0) {
+		/* socket is destroyed */
+		ret = -EBADF;
+		goto unlock_out;
+	}
+
+	port_hash_del(ts);
+	if (port_hash_insert(ts, usin->sin_addr.s_addr,
+			     usin->sin_port ?: index + tcp_auto_port_start)) {
+		port_hash_insert(ts, ts->saddr, ts->sport);
+
+		ret = -EADDRINUSE;
+		goto unlock_out;
+	}
+
+	bound = 1;
+
+unlock_out:
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	ts->is_bound = bound;
+	ts->is_binding = 0;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_connect
+ */
+static int rt_tcp_connect(struct rtdm_fd *fd, struct tcp_socket *ts,
+			  const struct sockaddr __user *serv_addr,
+			  socklen_t addrlen)
+{
+	struct sockaddr_in *usin, _usin;
+	struct dest_route rt;
+	rtdm_lockctx_t context;
+	int ret;
+
+	if (addrlen < (int)sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	usin = rtnet_get_arg(fd, &_usin, serv_addr, sizeof(_usin));
+	if (IS_ERR(usin))
+		return PTR_ERR(usin);
+
+	if (usin->sin_family != AF_INET)
+		return -EAFNOSUPPORT;
+
+	ret = rt_ip_route_output(&rt, usin->sin_addr.s_addr, ts->saddr);
+	if (ret < 0) {
+		/* no route to host */
+		return -ENETUNREACH;
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->is_closed) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		ret = -EBADF;
+		goto err_deref;
+	}
+
+	if (ts->tcp_state != TCP_CLOSE || ts->is_binding) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		ret = -EINVAL;
+		goto err_deref;
+	}
+
+	if (ts->rt.rtdev == NULL)
+		memcpy(&ts->rt, &rt, sizeof(rt));
+	else
+		rtdev_dereference(rt.rtdev);
+
+	ts->saddr = rt.rtdev->local_ip;
+
+	ts->daddr = usin->sin_addr.s_addr;
+	ts->dport = usin->sin_port;
+
+	ts->sync.seq = rt_tcp_initial_seq();
+	ts->sync.ack_seq = 0;
+	ts->sync.window = 4096;
+	ts->sync.dst_window = 0;
+
+	ts->tcp_state = TCP_SYN_SENT;
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	/* Complete three-way handshake */
+	ret = rt_tcp_send(ts, TCP_FLAG_SYN);
+	if (ret < 0) {
+		rtdm_printk("rttcp: cann't send SYN\n");
+		return ret;
+	}
+
+	ret = rtdm_event_timedwait(&ts->conn_evt, rt_tcp_connection_timeout,
+				   NULL);
+	if (unlikely(ret < 0))
+		switch (ret) {
+		case -EWOULDBLOCK:
+		case -ETIMEDOUT:
+		case -EINTR:
+			return ret;
+
+		default:
+			return -EBADF;
+		}
+
+	if (ts->tcp_state == TCP_SYN_SENT) {
+		/* received conn_evt, but connection is not established */
+		return -ECONNREFUSED;
+	}
+
+	return ret;
+
+err_deref:
+	rtdev_dereference(rt.rtdev);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_listen
+ */
+static int rt_tcp_listen(struct tcp_socket *ts, unsigned long backlog)
+{
+	int ret;
+	rtdm_lockctx_t context;
+
+	/* Ignore backlog value, maximum number of queued connections is 1 */
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	if (ts->is_closed) {
+		ret = -EBADF;
+		goto unlock_out;
+	}
+
+	if (ts->tcp_state != TCP_CLOSE || ts->is_binding) {
+		ret = -EINVAL;
+		goto unlock_out;
+	}
+
+	ts->tcp_state = TCP_LISTEN;
+	ret = 0;
+
+unlock_out:
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_accept
+ */
+static int rt_tcp_accept(struct rtdm_fd *fd, struct tcp_socket *ts,
+			 struct sockaddr *addr, socklen_t __user *addrlen)
+{
+	/* Return sockaddr, but bind it with rt_socket_init, so it would be
+       possible to read/write from it in future, return valid file descriptor */
+
+	int ret;
+	socklen_t *uaddrlen, _uaddrlen;
+	struct sockaddr_in sin;
+	nanosecs_rel_t timeout = ts->sock.timeout;
+	rtdm_lockctx_t context;
+	struct dest_route rt;
+
+	uaddrlen = rtnet_get_arg(fd, &_uaddrlen, addrlen, sizeof(_uaddrlen));
+	if (IS_ERR(uaddrlen))
+		return PTR_ERR(uaddrlen);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	if (ts->is_accepting || ts->is_accepted) {
+		/* socket is already accepted or is accepting a connection right now */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EALREADY;
+	}
+
+	if (ts->tcp_state != TCP_LISTEN ||
+	    *uaddrlen < sizeof(struct sockaddr_in)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+
+	ts->is_accepting = 1;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	ret = rtdm_event_timedwait(&ts->conn_evt, timeout, NULL);
+
+	if (unlikely(ret < 0))
+		switch (ret) {
+		case -ETIMEDOUT:
+		case -EINTR:
+			goto err;
+
+		default:
+			ret = -EBADF;
+			goto err;
+		}
+
+	/* accept() reported about connection establishment */
+	ret = rt_ip_route_output(&rt, ts->daddr, ts->saddr);
+	if (ret < 0) {
+		/* strange, no route to host, keep status quo */
+		ret = -EPROTO;
+		goto err;
+	}
+
+	if (addr) {
+		sin.sin_family = AF_INET;
+		sin.sin_port = ts->dport;
+		sin.sin_addr.s_addr = ts->daddr;
+		ret = rtnet_put_arg(fd, addr, &sin, sizeof(sin));
+		if (ret) {
+			rtdev_dereference(rt.rtdev);
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->tcp_state != TCP_ESTABLISHED) {
+		/* protocol error */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rtdev_dereference(rt.rtdev);
+		ret = -EPROTO;
+		goto err;
+	}
+
+	if (ts->rt.rtdev == NULL)
+		memcpy(&ts->rt, &rt, sizeof(rt));
+	else
+		rtdev_dereference(rt.rtdev);
+
+	ts->is_accepted = 1;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	ret = rtdm_fd_ufd(rt_socket_fd(&ts->sock));
+
+err:
+	/* it is not critical to leave this unlocked
+       due to single entry nature of accept() */
+	ts->is_accepting = 0;
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_shutdown
+ */
+static int rt_tcp_shutdown(struct tcp_socket *ts, unsigned long how)
+{
+	return -EOPNOTSUPP;
+}
+
+/***
+ *  rt_tcp_setsockopt
+ */
+static int rt_tcp_setsockopt(struct rtdm_fd *fd, struct tcp_socket *ts,
+			     int level, int optname, const void *optval,
+			     socklen_t optlen)
+{
+	/* uint64_t val; */
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t context;
+
+	switch (optname) {
+	case SO_KEEPALIVE:
+		if (optlen < sizeof(unsigned int))
+			return -EINVAL;
+
+		/* commented out, because current implementation transmits
+	       keepalive probes from interrupt context */
+		/*
+	    val = *(unsigned long*)optval;
+
+	    if (val)
+		rt_tcp_keepalive_enable(ts);
+	    else
+		rt_tcp_keepalive_disable(ts);
+	    */
+		return 0;
+
+	case SO_SNDTIMEO_OLD:
+		if (optlen < sizeof(tv))
+			return -EINVAL;
+		if (rtdm_copy_from_user(fd, &tv, optval, sizeof(tv)))
+			return -EFAULT;
+		if (tv.tv_usec < 0 || tv.tv_usec >= 1000000)
+			return -EDOM;
+
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+		if (tv.tv_sec < 0) {
+			ts->sk_sndtimeo = RTDM_TIMEOUT_NONE;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			return 0;
+		}
+
+		ts->sk_sndtimeo = RTDM_TIMEOUT_INFINITE;
+		if (tv.tv_sec == 0 && tv.tv_usec == 0) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			return 0;
+		}
+
+		if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / 1000000000ull - 1))
+			ts->sk_sndtimeo =
+				(tv.tv_sec * 1000000 + tv.tv_usec) * 1000;
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		return 0;
+
+	case SO_REUSEADDR:
+		/* to implement */
+		return -EOPNOTSUPP;
+	}
+
+	return -ENOPROTOOPT;
+}
+
+/***
+ *  rt_tcp_getsockopt
+ */
+static int rt_tcp_getsockopt(struct rtdm_fd *fd, struct tcp_socket *ts,
+			     int level, int optname, void *optval,
+			     socklen_t *optlen)
+{
+	int ret = 0;
+
+	if (*optlen < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (optname) {
+	case SO_ERROR:
+		ret = 0; /* used in nonblocking connect(), extend later */
+		break;
+
+	default:
+		ret = -ENOPROTOOPT;
+		break;
+	}
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_ioctl
+ */
+static int rt_tcp_ioctl(struct rtdm_fd *fd, unsigned int request,
+			void __user *arg)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	const struct _rtdm_setsockaddr_args *setaddr;
+	struct _rtdm_setsockaddr_args _setaddr;
+	const struct _rtdm_getsockaddr_args *getaddr;
+	struct _rtdm_getsockaddr_args _getaddr;
+	const struct _rtdm_getsockopt_args *getopt;
+	struct _rtdm_getsockopt_args _getopt;
+	const struct _rtdm_setsockopt_args *setopt;
+	struct _rtdm_setsockopt_args _setopt;
+	int in_rt;
+
+	/* fast path for common socket IOCTLs */
+	if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK)
+		return rt_socket_common_ioctl(fd, request, arg);
+
+	in_rt = rtdm_in_rt_context();
+
+	switch (request) {
+	case _RTIOC_BIND:
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		return rt_tcp_bind(fd, ts, setaddr->addr, setaddr->addrlen);
+	case _RTIOC_CONNECT:
+		if (!in_rt)
+			return -ENOSYS;
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		return rt_tcp_connect(fd, ts, setaddr->addr, setaddr->addrlen);
+
+	case _RTIOC_LISTEN:
+		return rt_tcp_listen(ts, (unsigned long)arg);
+
+	case _RTIOC_ACCEPT:
+		if (!in_rt)
+			return -ENOSYS;
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+		return rt_tcp_accept(fd, ts, getaddr->addr, getaddr->addrlen);
+
+	case _RTIOC_SHUTDOWN:
+		return rt_tcp_shutdown(ts, (unsigned long)arg);
+
+	case _RTIOC_SETSOCKOPT:
+		setopt = rtnet_get_arg(fd, &_setopt, arg, sizeof(_setopt));
+		if (IS_ERR(setopt))
+			return PTR_ERR(setopt);
+
+		if (setopt->level != SOL_SOCKET)
+			break;
+
+		return rt_tcp_setsockopt(fd, ts, setopt->level, setopt->optname,
+					 setopt->optval, setopt->optlen);
+
+	case _RTIOC_GETSOCKOPT:
+		getopt = rtnet_get_arg(fd, &_getopt, arg, sizeof(_getopt));
+		if (IS_ERR(getopt))
+			return PTR_ERR(getopt);
+
+		if (getopt->level != SOL_SOCKET)
+			break;
+
+		return rt_tcp_getsockopt(fd, ts, getopt->level, getopt->optname,
+					 getopt->optval, getopt->optlen);
+	default:
+		break;
+	}
+
+	return rt_ip_ioctl(fd, request, arg);
+}
+
+/***
+ *  rt_tcp_read
+ */
+static ssize_t rt_tcp_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	struct rtsocket *sock = &ts->sock;
+
+	struct rtskb *skb;
+	struct rtskb *first_skb;
+	nanosecs_rel_t timeout = sock->timeout;
+	size_t data_len;
+	size_t th_len;
+	size_t copied = 0;
+	size_t block_size;
+	u8 *user_buf = buf;
+	int ret;
+	rtdm_lockctx_t context;
+
+	rtdm_toseq_t timeout_seq;
+
+	if (!rtdm_fd_is_user(fd)) {
+		return -EFAULT;
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->is_closed) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EBADF;
+	}
+
+	if (!ts->is_valid) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return 0;
+	}
+
+	if (ts->tcp_state != TCP_ESTABLISHED &&
+	    ts->tcp_state != TCP_FIN_WAIT2) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+
+	while (copied < nbyte) {
+		ret = rtdm_sem_timeddown(&ts->sock.pending_sem, timeout,
+					 &timeout_seq);
+
+		if (unlikely(ret < 0))
+			switch (ret) {
+			case -EWOULDBLOCK:
+			case -ETIMEDOUT:
+			case -EINTR:
+				return (copied ? copied : ret);
+
+			case -EIDRM: /* event is destroyed */
+				if (ts->is_closed)
+					return -EBADF;
+
+				return copied;
+
+			default:
+				if (ts->is_closed) {
+					return -EBADF;
+				}
+
+				return 0;
+			}
+
+		skb = rtskb_dequeue_chain(&sock->incoming);
+		RTNET_ASSERT(skb != NULL, return -EFAULT;);
+
+		th_len = (skb->h.th->doff) << 2;
+
+		data_len = skb->len - th_len;
+
+		__rtskb_pull(skb, th_len);
+
+		first_skb = skb;
+
+		/* iterate over all IP fragments */
+	iterate_fragments:
+		block_size = skb->len;
+		copied += block_size;
+		data_len -= block_size;
+
+		if (copied > nbyte) {
+			block_size -= copied - nbyte;
+			copied = nbyte;
+
+			if (rtdm_copy_to_user(fd, user_buf, skb->data,
+					      block_size)) {
+				kfree_rtskb(first_skb); /* or store the data? */
+				return -EFAULT;
+			}
+			rtdm_lock_get_irqsave(&ts->socket_lock, context);
+			if (ts->sync.window) {
+				ts->sync.window += block_size;
+				rtdm_lock_put_irqrestore(&ts->socket_lock,
+							 context);
+			} else {
+				ts->sync.window = block_size;
+				rtdm_lock_put_irqrestore(&ts->socket_lock,
+							 context);
+				rt_tcp_send(ts,
+					    TCP_FLAG_ACK); /* window update */
+			}
+
+			__rtskb_pull(skb, block_size);
+			__rtskb_push(first_skb, sizeof(struct tcphdr));
+			first_skb->h.th->doff = 5;
+			rtskb_queue_head(&sock->incoming, first_skb);
+			rtdm_sem_up(&ts->sock.pending_sem);
+
+			return copied;
+		}
+
+		if (rtdm_copy_to_user(fd, user_buf, skb->data, block_size)) {
+			kfree_rtskb(first_skb); /* or store the data? */
+			return -EFAULT;
+		}
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+		if (ts->sync.window) {
+			ts->sync.window += block_size;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		} else {
+			ts->sync.window = block_size;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rt_tcp_send(ts, TCP_FLAG_ACK); /* window update */
+		}
+
+		if ((skb = skb->next) != NULL) {
+			user_buf += data_len;
+			goto iterate_fragments;
+		}
+
+		kfree_rtskb(first_skb);
+	}
+
+	return copied;
+}
+
+/***
+ *  rt_tcp_write
+ */
+static ssize_t rt_tcp_write(struct rtdm_fd *fd, const void __user *user_buf,
+			    size_t nbyte)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	uint32_t sent_len = 0;
+	rtdm_lockctx_t context;
+	int ret = 0;
+	nanosecs_rel_t sk_sndtimeo;
+	void *buf;
+
+	if (!rtdm_fd_is_user(fd)) {
+		return -EFAULT;
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	sk_sndtimeo = ts->sk_sndtimeo;
+
+	if (!ts->is_valid) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EPIPE;
+	}
+
+	if ((ts->daddr | ts->dport) == 0 || ts->tcp_state != TCP_ESTABLISHED) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	buf = xnmalloc(nbyte);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = rtdm_copy_from_user(fd, buf, user_buf, nbyte);
+	if (ret) {
+		xnfree(buf);
+		return ret;
+	}
+
+	while (sent_len < nbyte) {
+		ret = rtdm_event_timedwait(&ts->send_evt, sk_sndtimeo, NULL);
+
+		if (unlikely(ret < 0))
+			switch (ret) {
+			case -EWOULDBLOCK:
+			case -ETIMEDOUT:
+			case -EINTR:
+				xnfree(buf);
+				return sent_len ?: ret;
+
+			case -EIDRM: /* event is destroyed */
+			default:
+				if (ts->is_closed) {
+					xnfree(buf);
+					return -EBADF;
+				}
+
+				xnfree(buf);
+				return sent_len ?: ret;
+			}
+
+		ret = rt_tcp_window_send(ts, nbyte - sent_len,
+					 ((u8 *)buf) + sent_len);
+
+		if (ret < 0) { /* check this branch correctness */
+			rtdm_event_signal(&ts->send_evt);
+			break;
+		}
+
+		sent_len += ret;
+		if (ts->sync.dst_window)
+			rtdm_event_signal(&ts->send_evt);
+	}
+
+	xnfree(buf);
+	return (ret < 0 ? ret : sent_len);
+}
+
+/***
+ *  rt_tcp_recvmsg
+ */
+static ssize_t rt_tcp_recvmsg(struct rtdm_fd *fd, struct user_msghdr *msg,
+			      int msg_flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	ssize_t ret;
+	size_t len;
+	void *buf;
+
+	if (msg_flags)
+		return -EOPNOTSUPP;
+
+	/* loop over all vectors to be implemented */
+	if (msg->msg_iovlen != 1)
+		return -EOPNOTSUPP;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	len = iov[0].iov_len;
+	if (len > 0) {
+		buf = iov[0].iov_base;
+		ret = rt_tcp_read(fd, buf, len);
+	}
+
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_sendmsg
+ */
+static ssize_t rt_tcp_sendmsg(struct rtdm_fd *fd, const struct user_msghdr *msg,
+			      int msg_flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	ssize_t ret;
+	size_t len;
+
+	if (msg_flags)
+		return -EOPNOTSUPP;
+
+	/* loop over all vectors to be implemented */
+	if (msg->msg_iovlen != 1)
+		return -EOPNOTSUPP;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	len = iov[0].iov_len;
+	if (len > 0)
+		ret = rt_tcp_write(fd, iov[0].iov_base, len);
+
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_select
+ */
+static int rt_tcp_select(struct rtdm_fd *fd, rtdm_selector_t *selector,
+			 enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+
+	switch (type) {
+	case XNSELECT_READ:
+		return rtdm_sem_select(&ts->sock.pending_sem, selector,
+				       XNSELECT_READ, fd_index);
+	case XNSELECT_WRITE:
+		return rtdm_event_select(&ts->send_evt, selector,
+					 XNSELECT_WRITE, fd_index);
+	default:
+		return -EBADF;
+	}
+
+	return -EINVAL;
+}
+
+/***
+ *  TCP-Initialisation
+ */
+static struct rtinet_protocol tcp_protocol = { .protocol = IPPROTO_TCP,
+					       .dest_socket =
+						       &rt_tcp_dest_socket,
+					       .rcv_handler = &rt_tcp_rcv,
+					       .err_handler = &rt_tcp_rcv_err,
+					       .init_socket = &rt_tcp_socket };
+
+static struct rtdm_driver tcp_driver = {
+    .profile_info =     RTDM_PROFILE_INFO(tcp,
+					RTDM_CLASS_NETWORK,
+					RTDM_SUBCLASS_RTNET,
+					RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =	1,
+    .context_size =     sizeof(struct tcp_socket),
+
+    .protocol_family =  PF_INET,
+    .socket_type =      SOCK_STREAM,
+
+    .ops = {
+	.socket     =   rt_inet_socket,
+	.close      =   rt_tcp_close,
+	.ioctl_rt   =   rt_tcp_ioctl,
+	.ioctl_nrt  =   rt_tcp_ioctl,
+	.read_rt    =   rt_tcp_read,
+	.write_rt   =   rt_tcp_write,
+	.recvmsg_rt =   rt_tcp_recvmsg,
+	.sendmsg_rt =   rt_tcp_sendmsg,
+	.select     =   rt_tcp_select,
+    },
+};
+
+static struct rtdm_device tcp_device = {
+	.driver = &tcp_driver,
+	.label = "tcp",
+};
+
+#ifdef CONFIG_XENO_OPT_VFILE
+/***
+ *  rt_tcp_proc_read
+ */
+static inline char *rt_tcp_string_of_state(u8 state)
+{
+	switch (state) {
+	case TCP_ESTABLISHED:
+		return "ESTABLISHED";
+	case TCP_SYN_SENT:
+		return "SYN_SENT";
+	case TCP_SYN_RECV:
+		return "SYN_RECV";
+	case TCP_FIN_WAIT1:
+		return "FIN_WAIT1";
+	case TCP_FIN_WAIT2:
+		return "FIN_WAIT2";
+	case TCP_TIME_WAIT:
+		return "TIME_WAIT";
+	case TCP_CLOSE:
+		return "CLOSE";
+	case TCP_CLOSE_WAIT:
+		return "CLOSE_WAIT";
+	case TCP_LAST_ACK:
+		return "LASK_ACK";
+	case TCP_LISTEN:
+		return "LISTEN";
+	case TCP_CLOSING:
+		return "CLOSING";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+static int rtnet_ipv4_tcp_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	rtdm_lockctx_t context;
+	struct tcp_socket *ts;
+	u32 saddr, daddr;
+	u16 sport = 0, dport = 0; /* set to 0 to silence compiler */
+	char sbuffer[24];
+	char dbuffer[24];
+	int state;
+	int index;
+
+	xnvfile_printf(it, "Hash    Local Address           "
+			   "Foreign Address         State\n");
+
+	for (index = 0; index < RT_TCP_SOCKETS; index++) {
+		rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+
+		ts = port_registry[index];
+		state = ts ? ts->tcp_state : TCP_CLOSE;
+
+		if (ts && ts->tcp_state != TCP_CLOSE) {
+			saddr = ts->saddr;
+			sport = ts->sport;
+			daddr = ts->daddr;
+			dport = ts->dport;
+		}
+
+		rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+		if (state != TCP_CLOSE) {
+			snprintf(sbuffer, sizeof(sbuffer), "%u.%u.%u.%u:%u",
+				 NIPQUAD(saddr), ntohs(sport));
+			snprintf(dbuffer, sizeof(dbuffer), "%u.%u.%u.%u:%u",
+				 NIPQUAD(daddr), ntohs(dport));
+
+			xnvfile_printf(it, "%04X    %-23s %-23s %s\n",
+				       sport & port_hash_mask, sbuffer, dbuffer,
+				       rt_tcp_string_of_state(state));
+		}
+	}
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_ipv4_tcp_vfile_ops = {
+	.show = rtnet_ipv4_tcp_show,
+};
+
+static struct xnvfile_regular rtnet_ipv4_tcp_vfile = {
+	.ops = &rtnet_ipv4_tcp_vfile_ops,
+};
+
+/***
+ *  rt_tcp_proc_register
+ */
+static int __init rt_tcp_proc_register(void)
+{
+	return xnvfile_init_regular("tcp", &rtnet_ipv4_tcp_vfile,
+				    &ipv4_proc_root);
+}
+
+/***
+ *  rt_tcp_proc_unregister
+ */
+
+static void rt_tcp_proc_unregister(void)
+{
+	xnvfile_destroy_regular(&rtnet_ipv4_tcp_vfile);
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/***
+ *  rt_tcp_init
+ */
+int __init rt_tcp_init(void)
+{
+	unsigned int skbs;
+	int i;
+	int ret;
+
+	if ((tcp_auto_port_start < 0) ||
+	    (tcp_auto_port_start >= 0x10000 - RT_TCP_SOCKETS))
+		tcp_auto_port_start = 1024;
+	tcp_auto_port_start =
+		htons(tcp_auto_port_start & (tcp_auto_port_mask & 0xFFFF));
+	tcp_auto_port_mask = htons(tcp_auto_port_mask | 0xFFFF0000);
+
+	for (i = 0; i < ARRAY_SIZE(port_hash); i++)
+		INIT_HLIST_HEAD(&port_hash[i]);
+
+	/* Perform essential initialization of the RST|ACK socket */
+	skbs = rt_bare_socket_init(rst_fd, IPPROTO_TCP, RT_TCP_RST_PRIO,
+				   RT_TCP_RST_POOL_SIZE);
+	if (skbs < RT_TCP_RST_POOL_SIZE)
+		printk("rttcp: allocated only %d RST|ACK rtskbs\n", skbs);
+	rst_socket.sock.prot.inet.tos = 0;
+	rst_fd->refs = 1;
+	rtdm_lock_init(&rst_socket.socket_lock);
+
+	/*
+     * 100 ms forwarding timer with 8.38 ms slots
+     */
+	ret = timerwheel_init(100000000ull, 23);
+	if (ret < 0) {
+		rtdm_printk("rttcp: cann't initialize timerwheel task: %d\n",
+			    -ret);
+		goto out_1;
+	}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if ((ret = rt_tcp_proc_register()) < 0) {
+		rtdm_printk("rttcp: cann't initialize proc entry: %d\n", -ret);
+		goto out_2;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	rt_inet_add_protocol(&tcp_protocol);
+
+	ret = rtdm_dev_register(&tcp_device);
+	if (ret < 0) {
+		rtdm_printk("rttcp: cann't register RT TCP: %d\n", -ret);
+		goto out_3;
+	}
+
+	return ret;
+
+out_3:
+	rt_inet_del_protocol(&tcp_protocol);
+#ifdef CONFIG_XENO_OPT_VFILE
+	rt_tcp_proc_unregister();
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+out_2:
+	timerwheel_cleanup();
+
+out_1:
+	rt_bare_socket_cleanup(&rst_socket.sock);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_release
+ */
+void __exit rt_tcp_release(void)
+{
+	rt_inet_del_protocol(&tcp_protocol);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	rt_tcp_proc_unregister();
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	timerwheel_cleanup();
+
+	rt_bare_socket_cleanup(&rst_socket.sock);
+
+	rtdm_dev_unregister(&tcp_device);
+}
+
+module_init(rt_tcp_init);
+module_exit(rt_tcp_release);
+++ linux-patched/drivers/xenomai/net/stack/corectl.c	2022-03-21 12:58:30.724875943 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtskb.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2016 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <asm/xenomai/syscall.h>
+#include <xenomai/posix/corectl.h>
+
+static int rtnet_corectl_call(struct notifier_block *self, unsigned long arg,
+			      void *cookie)
+{
+	struct cobalt_config_vector *vec = cookie;
+	int ret = 0;
+
+	if (arg != _CC_COBALT_GET_NET_CONFIG)
+		return NOTIFY_DONE;
+
+	if (vec->u_bufsz < sizeof(ret))
+		return notifier_from_errno(-EINVAL);
+
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET))
+		ret |= _CC_COBALT_NET;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ETH_P_ALL))
+		ret |= _CC_COBALT_NET_ETH_P_ALL;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4))
+		ret |= _CC_COBALT_NET_IPV4;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP))
+		ret |= _CC_COBALT_NET_ICMP;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING))
+		ret |= _CC_COBALT_NET_NETROUTING;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTE))
+		ret |= _CC_COBALT_NET_ROUTER;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_UDP))
+		ret |= _CC_COBALT_NET_UDP;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTPACKET))
+		ret |= _CC_COBALT_NET_AF_PACKET;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_TDMA))
+		ret |= _CC_COBALT_NET_TDMA;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_NOMAC))
+		ret |= _CC_COBALT_NET_NOMAC;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTCFG))
+		ret |= _CC_COBALT_NET_CFG;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP))
+		ret |= _CC_COBALT_NET_CAP;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY))
+		ret |= _CC_COBALT_NET_PROXY;
+
+	ret = cobalt_copy_to_user(vec->u_buf, &ret, sizeof(ret));
+
+	return ret ? notifier_from_errno(-EFAULT) : NOTIFY_STOP;
+}
+
+static struct notifier_block rtnet_corectl_notifier = {
+	.notifier_call = rtnet_corectl_call,
+};
+
+void rtnet_corectl_register(void)
+{
+	cobalt_add_config_chain(&rtnet_corectl_notifier);
+}
+
+void rtnet_corectl_unregister(void)
+{
+	cobalt_remove_config_chain(&rtnet_corectl_notifier);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtskb.c	2022-03-21 12:58:30.716876021 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtnet_chrdev.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/rtskb.c - rtskb implementation for rtnet
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@fet.uni-hannover.de>,
+ *  Copyright (C) 2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *  Copyright (C) 2006 Jorge Almeida <j-almeida@criticalsoftware.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <rtnet_checksum.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtskb.h>
+#include <rtnet_port.h>
+
+static unsigned int global_rtskbs = DEFAULT_GLOBAL_RTSKBS;
+module_param(global_rtskbs, uint, 0444);
+MODULE_PARM_DESC(global_rtskbs,
+		 "Number of realtime socket buffers in global pool");
+
+/* Linux slab pool for rtskbs */
+static struct kmem_cache *rtskb_slab_pool;
+
+/* pool of rtskbs for global use */
+struct rtskb_pool global_pool;
+EXPORT_SYMBOL_GPL(global_pool);
+
+/* pool statistics */
+unsigned int rtskb_pools = 0;
+unsigned int rtskb_pools_max = 0;
+unsigned int rtskb_amount = 0;
+unsigned int rtskb_amount_max = 0;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+/* RTcap interface */
+rtdm_lock_t rtcap_lock;
+EXPORT_SYMBOL_GPL(rtcap_lock);
+
+void (*rtcap_handler)(struct rtskb *skb) = NULL;
+EXPORT_SYMBOL_GPL(rtcap_handler);
+#endif
+
+/***
+ *  rtskb_copy_and_csum_bits
+ */
+unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb, int offset,
+				      u8 *to, int len, unsigned int csum)
+{
+	int copy;
+
+	/* Copy header. */
+	if ((copy = skb->len - offset) > 0) {
+		if (copy > len)
+			copy = len;
+		csum = rtnet_csum_copy(skb->data + offset, to, copy, csum);
+		if ((len -= copy) == 0)
+			return csum;
+		offset += copy;
+		to += copy;
+	}
+
+	RTNET_ASSERT(len == 0, );
+	return csum;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_copy_and_csum_bits);
+
+/***
+ *  rtskb_copy_and_csum_dev
+ */
+void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to)
+{
+	unsigned int csum;
+	unsigned int csstart;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		csstart = skb->h.raw - skb->data;
+
+		if (csstart > skb->len)
+			BUG();
+	} else
+		csstart = skb->len;
+
+	memcpy(to, skb->data, csstart);
+
+	csum = 0;
+	if (csstart != skb->len)
+		csum = rtskb_copy_and_csum_bits(skb, csstart, to + csstart,
+						skb->len - csstart, 0);
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		unsigned int csstuff = csstart + skb->csum;
+
+		*((unsigned short *)(to + csstuff)) = csum_fold(csum);
+	}
+}
+
+EXPORT_SYMBOL_GPL(rtskb_copy_and_csum_dev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+/**
+ *  skb_over_panic - private function
+ *  @skb: buffer
+ *  @sz: size
+ *  @here: address
+ *
+ *  Out of line support code for rtskb_put(). Not user callable.
+ */
+void rtskb_over_panic(struct rtskb *skb, int sz, void *here)
+{
+	rtdm_printk("RTnet: rtskb_put :over: %p:%d put:%d dev:%s\n", here,
+		    skb->len, sz, (skb->rtdev) ? skb->rtdev->name : "<NULL>");
+}
+
+EXPORT_SYMBOL_GPL(rtskb_over_panic);
+
+/**
+ *  skb_under_panic - private function
+ *  @skb: buffer
+ *  @sz: size
+ *  @here: address
+ *
+ *  Out of line support code for rtskb_push(). Not user callable.
+ */
+void rtskb_under_panic(struct rtskb *skb, int sz, void *here)
+{
+	rtdm_printk("RTnet: rtskb_push :under: %p:%d put:%d dev:%s\n", here,
+		    skb->len, sz, (skb->rtdev) ? skb->rtdev->name : "<NULL>");
+}
+
+EXPORT_SYMBOL_GPL(rtskb_under_panic);
+#endif /* CONFIG_XENO_DRIVERS_NET_CHECKED */
+
+static struct rtskb *__rtskb_pool_dequeue(struct rtskb_pool *pool)
+{
+	struct rtskb_queue *queue = &pool->queue;
+	struct rtskb *skb;
+
+	if (pool->lock_ops && !pool->lock_ops->trylock(pool->lock_cookie))
+		return NULL;
+	skb = __rtskb_dequeue(queue);
+	if (skb == NULL && pool->lock_ops)
+		pool->lock_ops->unlock(pool->lock_cookie);
+
+	return skb;
+}
+
+struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool)
+{
+	struct rtskb_queue *queue = &pool->queue;
+	rtdm_lockctx_t context;
+	struct rtskb *skb;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	skb = __rtskb_pool_dequeue(pool);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(rtskb_pool_dequeue);
+
+static void __rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb)
+{
+	struct rtskb_queue *queue = &pool->queue;
+
+	__rtskb_queue_tail(queue, skb);
+	if (pool->lock_ops)
+		pool->lock_ops->unlock(pool->lock_cookie);
+}
+
+void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb)
+{
+	struct rtskb_queue *queue = &pool->queue;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	__rtskb_pool_queue_tail(pool, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+}
+EXPORT_SYMBOL_GPL(rtskb_pool_queue_tail);
+
+/***
+ *  alloc_rtskb - allocate an rtskb from a pool
+ *  @size: required buffer size (to check against maximum boundary)
+ *  @pool: pool to take the rtskb from
+ */
+struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool)
+{
+	struct rtskb *skb;
+
+	RTNET_ASSERT(size <= SKB_DATA_ALIGN(RTSKB_SIZE), return NULL;);
+
+	skb = rtskb_pool_dequeue(pool);
+	if (!skb)
+		return NULL;
+
+	/* Load the data pointers. */
+	skb->data = skb->buf_start;
+	skb->tail = skb->buf_start;
+	skb->end = skb->buf_start + size;
+
+	/* Set up other states */
+	skb->chain_end = skb;
+	skb->len = 0;
+	skb->pkt_type = PACKET_HOST;
+	skb->xmit_stamp = NULL;
+	skb->ip_summed = CHECKSUM_NONE;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	skb->cap_flags = 0;
+#endif
+
+	return skb;
+}
+
+EXPORT_SYMBOL_GPL(alloc_rtskb);
+
+/***
+ *  kfree_rtskb
+ *  @skb    rtskb
+ */
+void kfree_rtskb(struct rtskb *skb)
+{
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	rtdm_lockctx_t context;
+	struct rtskb *comp_skb;
+	struct rtskb *next_skb;
+	struct rtskb *chain_end;
+#endif
+
+	RTNET_ASSERT(skb != NULL, return;);
+	RTNET_ASSERT(skb->pool != NULL, return;);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	next_skb = skb;
+	chain_end = skb->chain_end;
+
+	do {
+		skb = next_skb;
+		next_skb = skb->next;
+
+		rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+		if (skb->cap_flags & RTSKB_CAP_SHARED) {
+			skb->cap_flags &= ~RTSKB_CAP_SHARED;
+
+			comp_skb = skb->cap_comp_skb;
+			skb->pool = xchg(&comp_skb->pool, skb->pool);
+
+			rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+			rtskb_pool_queue_tail(comp_skb->pool, comp_skb);
+		} else {
+			rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+			skb->chain_end = skb;
+			rtskb_pool_queue_tail(skb->pool, skb);
+		}
+
+	} while (chain_end != skb);
+
+#else /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+
+	rtskb_pool_queue_tail(skb->pool, skb);
+
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+}
+
+EXPORT_SYMBOL_GPL(kfree_rtskb);
+
+/***
+ *  rtskb_pool_init
+ *  @pool: pool to be initialized
+ *  @initial_size: number of rtskbs to allocate
+ *  return: number of actually allocated rtskbs
+ */
+unsigned int rtskb_pool_init(struct rtskb_pool *pool, unsigned int initial_size,
+			     const struct rtskb_pool_lock_ops *lock_ops,
+			     void *lock_cookie)
+{
+	unsigned int i;
+
+	rtskb_queue_init(&pool->queue);
+
+	i = rtskb_pool_extend(pool, initial_size);
+
+	rtskb_pools++;
+	if (rtskb_pools > rtskb_pools_max)
+		rtskb_pools_max = rtskb_pools;
+
+	pool->lock_ops = lock_ops;
+	pool->lock_cookie = lock_cookie;
+
+	return i;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_pool_init);
+
+static int rtskb_module_pool_trylock(void *cookie)
+{
+	int err = 1;
+	if (cookie)
+		err = try_module_get(cookie);
+	return err;
+}
+
+static void rtskb_module_pool_unlock(void *cookie)
+{
+	if (cookie)
+		module_put(cookie);
+}
+
+static const struct rtskb_pool_lock_ops rtskb_module_lock_ops = {
+	.trylock = rtskb_module_pool_trylock,
+	.unlock = rtskb_module_pool_unlock,
+};
+
+unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
+				      unsigned int initial_size,
+				      struct module *module)
+{
+	return rtskb_pool_init(pool, initial_size, &rtskb_module_lock_ops,
+			       module);
+}
+EXPORT_SYMBOL_GPL(__rtskb_module_pool_init);
+
+/***
+ *  __rtskb_pool_release
+ *  @pool: pool to release
+ */
+void rtskb_pool_release(struct rtskb_pool *pool)
+{
+	struct rtskb *skb;
+
+	while ((skb = rtskb_dequeue(&pool->queue)) != NULL) {
+		rtdev_unmap_rtskb(skb);
+		kmem_cache_free(rtskb_slab_pool, skb);
+		rtskb_amount--;
+	}
+
+	rtskb_pools--;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_pool_release);
+
+unsigned int rtskb_pool_extend(struct rtskb_pool *pool, unsigned int add_rtskbs)
+{
+	unsigned int i;
+	struct rtskb *skb;
+
+	RTNET_ASSERT(pool != NULL, return -EINVAL;);
+
+	for (i = 0; i < add_rtskbs; i++) {
+		/* get rtskb from slab pool */
+		if (!(skb = kmem_cache_alloc(rtskb_slab_pool, GFP_KERNEL))) {
+			printk(KERN_ERR
+			       "RTnet: rtskb allocation from slab pool failed\n");
+			break;
+		}
+
+		/* fill the header with zero */
+		memset(skb, 0, sizeof(struct rtskb));
+
+		skb->chain_end = skb;
+		skb->pool = pool;
+		skb->buf_start =
+			((unsigned char *)skb) + ALIGN_RTSKB_STRUCT_LEN;
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+		skb->buf_end = skb->buf_start + SKB_DATA_ALIGN(RTSKB_SIZE) - 1;
+#endif
+
+		if (rtdev_map_rtskb(skb) < 0) {
+			kmem_cache_free(rtskb_slab_pool, skb);
+			break;
+		}
+
+		rtskb_queue_tail(&pool->queue, skb);
+
+		rtskb_amount++;
+		if (rtskb_amount > rtskb_amount_max)
+			rtskb_amount_max = rtskb_amount;
+	}
+
+	return i;
+}
+
+unsigned int rtskb_pool_shrink(struct rtskb_pool *pool, unsigned int rem_rtskbs)
+{
+	unsigned int i;
+	struct rtskb *skb;
+
+	for (i = 0; i < rem_rtskbs; i++) {
+		if ((skb = rtskb_dequeue(&pool->queue)) == NULL)
+			break;
+
+		rtdev_unmap_rtskb(skb);
+		kmem_cache_free(rtskb_slab_pool, skb);
+		rtskb_amount--;
+	}
+
+	return i;
+}
+
+/* Note: acquires only the first skb of a chain! */
+int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool)
+{
+	struct rtskb *comp_rtskb;
+	struct rtskb_pool *release_pool;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&comp_pool->queue.lock, context);
+
+	comp_rtskb = __rtskb_pool_dequeue(comp_pool);
+	if (!comp_rtskb) {
+		rtdm_lock_put_irqrestore(&comp_pool->queue.lock, context);
+		return -ENOMEM;
+	}
+
+	rtdm_lock_put(&comp_pool->queue.lock);
+
+	comp_rtskb->chain_end = comp_rtskb;
+	comp_rtskb->pool = release_pool = rtskb->pool;
+
+	rtdm_lock_get(&release_pool->queue.lock);
+
+	__rtskb_pool_queue_tail(release_pool, comp_rtskb);
+
+	rtdm_lock_put_irqrestore(&release_pool->queue.lock, context);
+
+	rtskb->pool = comp_pool;
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_acquire);
+
+/* clone rtskb to another, allocating the new rtskb from pool */
+struct rtskb *rtskb_clone(struct rtskb *rtskb, struct rtskb_pool *pool)
+{
+	struct rtskb *clone_rtskb;
+	unsigned int total_len;
+
+	clone_rtskb = alloc_rtskb(rtskb->end - rtskb->buf_start, pool);
+	if (clone_rtskb == NULL)
+		return NULL;
+
+	/* Note: We don't clone
+	- rtskb.sk
+	- rtskb.xmit_stamp
+       until real use cases show up. */
+
+	clone_rtskb->priority = rtskb->priority;
+	clone_rtskb->rtdev = rtskb->rtdev;
+	clone_rtskb->time_stamp = rtskb->time_stamp;
+
+	clone_rtskb->mac.raw = clone_rtskb->buf_start;
+	clone_rtskb->nh.raw = clone_rtskb->buf_start;
+	clone_rtskb->h.raw = clone_rtskb->buf_start;
+
+	clone_rtskb->data += rtskb->data - rtskb->buf_start;
+	clone_rtskb->tail += rtskb->tail - rtskb->buf_start;
+	clone_rtskb->mac.raw += rtskb->mac.raw - rtskb->buf_start;
+	clone_rtskb->nh.raw += rtskb->nh.raw - rtskb->buf_start;
+	clone_rtskb->h.raw += rtskb->h.raw - rtskb->buf_start;
+
+	clone_rtskb->protocol = rtskb->protocol;
+	clone_rtskb->pkt_type = rtskb->pkt_type;
+
+	clone_rtskb->ip_summed = rtskb->ip_summed;
+	clone_rtskb->csum = rtskb->csum;
+
+	total_len = rtskb->len + rtskb->data - rtskb->mac.raw;
+	memcpy(clone_rtskb->mac.raw, rtskb->mac.raw, total_len);
+	clone_rtskb->len = rtskb->len;
+
+	return clone_rtskb;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_clone);
+
+int rtskb_pools_init(void)
+{
+	rtskb_slab_pool = kmem_cache_create("rtskb_slab_pool",
+					    ALIGN_RTSKB_STRUCT_LEN +
+						    SKB_DATA_ALIGN(RTSKB_SIZE),
+					    0, SLAB_HWCACHE_ALIGN, NULL);
+	if (rtskb_slab_pool == NULL)
+		return -ENOMEM;
+
+	/* reset the statistics (cache is accounted separately) */
+	rtskb_pools = 0;
+	rtskb_pools_max = 0;
+	rtskb_amount = 0;
+	rtskb_amount_max = 0;
+
+	/* create the global rtskb pool */
+	if (rtskb_module_pool_init(&global_pool, global_rtskbs) < global_rtskbs)
+		goto err_out;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	rtdm_lock_init(&rtcap_lock);
+#endif
+
+	return 0;
+
+err_out:
+	rtskb_pool_release(&global_pool);
+	kmem_cache_destroy(rtskb_slab_pool);
+
+	return -ENOMEM;
+}
+
+void rtskb_pools_release(void)
+{
+	rtskb_pool_release(&global_pool);
+	kmem_cache_destroy(rtskb_slab_pool);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtnet_chrdev.c	2022-03-21 12:58:30.709876090 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/eth.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/rtnet_chrdev.c - implements char device for management interface
+ *
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@fet.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/kmod.h>
+#include <linux/miscdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include <rtnet_chrdev.h>
+#include <rtnet_internal.h>
+#include <ipv4/route.h>
+
+static DEFINE_SPINLOCK(ioctl_handler_lock);
+static LIST_HEAD(ioctl_handlers);
+
+static long rtnet_ioctl(struct file *file, unsigned int request,
+			unsigned long arg)
+{
+	struct rtnet_ioctl_head head;
+	struct rtnet_device *rtdev = NULL;
+	struct rtnet_ioctls *ioctls;
+	struct list_head *entry;
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ret = copy_from_user(&head, (void *)arg, sizeof(head));
+	if (ret != 0)
+		return -EFAULT;
+
+	spin_lock(&ioctl_handler_lock);
+
+	list_for_each (entry, &ioctl_handlers) {
+		ioctls = list_entry(entry, struct rtnet_ioctls, entry);
+
+		if (ioctls->ioctl_type == _IOC_TYPE(request)) {
+			atomic_inc(&ioctls->ref_count);
+
+			spin_unlock(&ioctl_handler_lock);
+
+			if ((_IOC_NR(request) & RTNET_IOC_NODEV_PARAM) == 0) {
+				rtdev = rtdev_get_by_name(head.if_name);
+				if (!rtdev) {
+					atomic_dec(&ioctls->ref_count);
+					return -ENODEV;
+				}
+			}
+
+			ret = ioctls->handler(rtdev, request, arg);
+
+			if (rtdev)
+				rtdev_dereference(rtdev);
+			atomic_dec(&ioctls->ref_count);
+
+			return ret;
+		}
+	}
+
+	spin_unlock(&ioctl_handler_lock);
+
+	return -ENOTTY;
+}
+
+static int rtnet_core_ioctl(struct rtnet_device *rtdev, unsigned int request,
+			    unsigned long arg)
+{
+	struct rtnet_core_cmd cmd;
+	int ret;
+
+	ret = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+	if (ret != 0)
+		return -EFAULT;
+
+	switch (request) {
+	case IOC_RT_IFUP:
+		ret = rtdev_up(rtdev, &cmd);
+		break;
+
+	case IOC_RT_IFDOWN:
+		ret = rtdev_down(rtdev);
+		break;
+
+	case IOC_RT_IFINFO:
+		if (cmd.args.info.ifindex > 0)
+			rtdev = rtdev_get_by_index(cmd.args.info.ifindex);
+		else
+			rtdev = rtdev_get_by_name(cmd.head.if_name);
+		if (rtdev == NULL)
+			return -ENODEV;
+
+		if (mutex_lock_interruptible(&rtdev->nrt_lock)) {
+			rtdev_dereference(rtdev);
+			return -ERESTARTSYS;
+		}
+
+		memcpy(cmd.head.if_name, rtdev->name, IFNAMSIZ);
+		cmd.args.info.ifindex = rtdev->ifindex;
+		cmd.args.info.type = rtdev->type;
+		cmd.args.info.ip_addr = rtdev->local_ip;
+		cmd.args.info.broadcast_ip = rtdev->broadcast_ip;
+		cmd.args.info.mtu = rtdev->mtu;
+		cmd.args.info.flags = rtdev->flags;
+		if ((cmd.args.info.flags & IFF_UP) &&
+		    (rtdev->link_state &
+		     (RTNET_LINK_STATE_PRESENT | RTNET_LINK_STATE_NOCARRIER)) ==
+			    RTNET_LINK_STATE_PRESENT)
+			cmd.args.info.flags |= IFF_RUNNING;
+
+		memcpy(cmd.args.info.dev_addr, rtdev->dev_addr, MAX_ADDR_LEN);
+
+		mutex_unlock(&rtdev->nrt_lock);
+
+		rtdev_dereference(rtdev);
+
+		if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+int rtnet_register_ioctls(struct rtnet_ioctls *ioctls)
+{
+	struct list_head *entry;
+	struct rtnet_ioctls *registered_ioctls;
+
+	RTNET_ASSERT(ioctls->handler != NULL, return -EINVAL;);
+
+	spin_lock(&ioctl_handler_lock);
+
+	list_for_each (entry, &ioctl_handlers) {
+		registered_ioctls =
+			list_entry(entry, struct rtnet_ioctls, entry);
+		if (registered_ioctls->ioctl_type == ioctls->ioctl_type) {
+			spin_unlock(&ioctl_handler_lock);
+			return -EEXIST;
+		}
+	}
+
+	list_add_tail(&ioctls->entry, &ioctl_handlers);
+	atomic_set(&ioctls->ref_count, 0);
+
+	spin_unlock(&ioctl_handler_lock);
+
+	return 0;
+}
+
+void rtnet_unregister_ioctls(struct rtnet_ioctls *ioctls)
+{
+	spin_lock(&ioctl_handler_lock);
+
+	while (atomic_read(&ioctls->ref_count) != 0) {
+		spin_unlock(&ioctl_handler_lock);
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1 * HZ); /* wait a second */
+
+		spin_lock(&ioctl_handler_lock);
+	}
+
+	list_del(&ioctls->entry);
+
+	spin_unlock(&ioctl_handler_lock);
+}
+
+static struct file_operations rtnet_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = rtnet_ioctl,
+};
+
+static struct miscdevice rtnet_chr_misc_dev = {
+	.minor = RTNET_MINOR,
+	.name = "rtnet",
+	.fops = &rtnet_fops,
+};
+
+static struct rtnet_ioctls core_ioctls = { .service_name = "RTnet Core",
+					   .ioctl_type = RTNET_IOC_TYPE_CORE,
+					   .handler = rtnet_core_ioctl };
+
+/**
+ * rtnet_chrdev_init -
+ *
+ */
+int __init rtnet_chrdev_init(void)
+{
+	int err;
+
+	err = misc_register(&rtnet_chr_misc_dev);
+	if (err) {
+		printk("RTnet: unable to register rtnet management device/class "
+		       "(error %d)\n",
+		       err);
+		return err;
+	}
+
+	rtnet_register_ioctls(&core_ioctls);
+	return 0;
+}
+
+/**
+ * rtnet_chrdev_release -
+ *
+ */
+void rtnet_chrdev_release(void)
+{
+	misc_deregister(&rtnet_chr_misc_dev);
+}
+
+EXPORT_SYMBOL_GPL(rtnet_register_ioctls);
+EXPORT_SYMBOL_GPL(rtnet_unregister_ioctls);
+++ linux-patched/drivers/xenomai/net/stack/eth.c	2022-03-21 12:58:30.701876168 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtnet_rtpc.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/eth.c - Ethernet-specific functions
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+
+/*
+ *  Create the Ethernet MAC header for an arbitrary protocol layer
+ *
+ *  saddr=NULL  means use device source address
+ *  daddr=NULL  means leave destination address (eg unresolved arp)
+ */
+int rt_eth_header(struct rtskb *skb, struct rtnet_device *rtdev,
+		  unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+	struct ethhdr *eth = (struct ethhdr *)rtskb_push(skb, ETH_HLEN);
+
+	/*
+     *  Set rtskb mac field
+     */
+
+	skb->mac.ethernet = eth;
+
+	/*
+     *  Set the protocol type. For a packet of type ETH_P_802_3 we put the length
+     *  in here instead. It is up to the 802.2 layer to carry protocol information.
+     */
+
+	if (type != ETH_P_802_3)
+		eth->h_proto = htons(type);
+	else
+		eth->h_proto = htons(len);
+
+	/*
+     *  Set the source hardware address.
+     */
+
+	if (saddr)
+		memcpy(eth->h_source, saddr, rtdev->addr_len);
+	else
+		memcpy(eth->h_source, rtdev->dev_addr, rtdev->addr_len);
+
+	if (rtdev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+		memset(eth->h_dest, 0, rtdev->addr_len);
+		return rtdev->hard_header_len;
+	}
+
+	if (daddr) {
+		memcpy(eth->h_dest, daddr, rtdev->addr_len);
+		return rtdev->hard_header_len;
+	}
+
+	return -rtdev->hard_header_len;
+}
+
+unsigned short rt_eth_type_trans(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct ethhdr *eth;
+	unsigned char *rawp;
+
+	rtcap_mark_incoming(skb);
+
+	skb->mac.raw = skb->data;
+	rtskb_pull(skb, rtdev->hard_header_len);
+	eth = skb->mac.ethernet;
+
+	if (*eth->h_dest & 1) {
+		if (memcmp(eth->h_dest, rtdev->broadcast, ETH_ALEN) == 0)
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	}
+
+	/*
+     *  This ALLMULTI check should be redundant by 1.4
+     *  so don't forget to remove it.
+     *
+     *  Seems, you forgot to remove it. All silly devices
+     *  seems to set IFF_PROMISC.
+     */
+
+	else if (1 /*rtdev->flags&IFF_PROMISC*/) {
+		if (memcmp(eth->h_dest, rtdev->dev_addr, ETH_ALEN))
+			skb->pkt_type = PACKET_OTHERHOST;
+	}
+
+	if (ntohs(eth->h_proto) >= 1536)
+		return eth->h_proto;
+
+	rawp = skb->data;
+
+	/*
+     *  This is a magic hack to spot IPX packets. Older Novell breaks
+     *  the protocol design and runs IPX over 802.3 without an 802.2 LLC
+     *  layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+     *  won't work for fault tolerant netware but does for the rest.
+     */
+	if (*(unsigned short *)rawp == 0xFFFF)
+		return htons(ETH_P_802_3);
+
+	/*
+     *  Real 802.2 LLC
+     */
+	return htons(ETH_P_802_2);
+}
+
+EXPORT_SYMBOL_GPL(rt_eth_header);
+EXPORT_SYMBOL_GPL(rt_eth_type_trans);
+++ linux-patched/drivers/xenomai/net/stack/rtnet_rtpc.c	2022-03-21 12:58:30.691876265 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/rtnet_rtpc.c
+ *
+ *  RTnet - real-time networking subsystem
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <rtnet_rtpc.h>
+#include <rtdm/driver.h>
+
+static DEFINE_RTDM_LOCK(pending_calls_lock);
+static DEFINE_RTDM_LOCK(processed_calls_lock);
+static rtdm_event_t dispatch_event;
+static rtdm_task_t dispatch_task;
+static rtdm_nrtsig_t rtpc_nrt_signal;
+
+LIST_HEAD(pending_calls);
+LIST_HEAD(processed_calls);
+
+#ifndef __wait_event_interruptible_timeout
+#define __wait_event_interruptible_timeout(wq, condition, ret)                 \
+	do {                                                                   \
+		wait_queue_t __wait;                                           \
+		init_waitqueue_entry(&__wait, current);                        \
+                                                                               \
+		add_wait_queue(&wq, &__wait);                                  \
+		for (;;) {                                                     \
+			set_current_state(TASK_INTERRUPTIBLE);                 \
+			if (condition)                                         \
+				break;                                         \
+			if (!signal_pending(current)) {                        \
+				ret = schedule_timeout(ret);                   \
+				if (!ret)                                      \
+					break;                                 \
+				continue;                                      \
+			}                                                      \
+			ret = -ERESTARTSYS;                                    \
+			break;                                                 \
+		}                                                              \
+		current->state = TASK_RUNNING;                                 \
+		remove_wait_queue(&wq, &__wait);                               \
+	} while (0)
+#endif
+
+#ifndef wait_event_interruptible_timeout
+#define wait_event_interruptible_timeout(wq, condition, timeout)               \
+	({                                                                     \
+		long __ret = timeout;                                          \
+		if (!(condition))                                              \
+			__wait_event_interruptible_timeout(wq, condition,      \
+							   __ret);             \
+		__ret;                                                         \
+	})
+#endif
+
+int rtnet_rtpc_dispatch_call(rtpc_proc proc, unsigned int timeout,
+			     void *priv_data, size_t priv_data_size,
+			     rtpc_copy_back_proc copy_back_handler,
+			     rtpc_cleanup_proc cleanup_handler)
+{
+	struct rt_proc_call *call;
+	rtdm_lockctx_t context;
+	int ret;
+
+	call = kmalloc(sizeof(struct rt_proc_call) + priv_data_size,
+		       GFP_KERNEL);
+	if (call == NULL)
+		return -ENOMEM;
+
+	memcpy(call->priv_data, priv_data, priv_data_size);
+
+	call->processed = 0;
+	call->proc = proc;
+	call->result = 0;
+	call->cleanup_handler = cleanup_handler;
+	atomic_set(&call->ref_count, 2); /* dispatcher + rt-procedure */
+	init_waitqueue_head(&call->call_wq);
+
+	rtdm_lock_get_irqsave(&pending_calls_lock, context);
+	list_add_tail(&call->list_entry, &pending_calls);
+	rtdm_lock_put_irqrestore(&pending_calls_lock, context);
+
+	rtdm_event_signal(&dispatch_event);
+
+	if (timeout > 0) {
+		ret = wait_event_interruptible_timeout(
+			call->call_wq, call->processed, (timeout * HZ) / 1000);
+		if (ret == 0)
+			ret = -ETIME;
+	} else
+		ret = wait_event_interruptible(call->call_wq, call->processed);
+
+	if (ret >= 0) {
+		if (copy_back_handler != NULL)
+			copy_back_handler(call, priv_data);
+		ret = call->result;
+	}
+
+	if (atomic_dec_and_test(&call->ref_count)) {
+		if (call->cleanup_handler != NULL)
+			call->cleanup_handler(&call->priv_data);
+		kfree(call);
+	}
+
+	return ret;
+}
+
+static inline struct rt_proc_call *rtpc_dequeue_pending_call(void)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call = NULL;
+
+	rtdm_lock_get_irqsave(&pending_calls_lock, context);
+	if (!list_empty(&pending_calls)) {
+		call = (struct rt_proc_call *)pending_calls.next;
+		list_del(&call->list_entry);
+	}
+	rtdm_lock_put_irqrestore(&pending_calls_lock, context);
+
+	return call;
+}
+
+static inline void rtpc_queue_processed_call(struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+	bool trigger;
+
+	rtdm_lock_get_irqsave(&processed_calls_lock, context);
+	trigger = list_empty(&processed_calls);
+	list_add_tail(&call->list_entry, &processed_calls);
+	rtdm_lock_put_irqrestore(&processed_calls_lock, context);
+
+	if (trigger)
+		rtdm_nrtsig_pend(&rtpc_nrt_signal);
+}
+
+static inline struct rt_proc_call *rtpc_dequeue_processed_call(void)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call = NULL;
+
+	rtdm_lock_get_irqsave(&processed_calls_lock, context);
+	if (!list_empty(&processed_calls)) {
+		call = (struct rt_proc_call *)processed_calls.next;
+		list_del(&call->list_entry);
+	}
+	rtdm_lock_put_irqrestore(&processed_calls_lock, context);
+
+	return call;
+}
+
+static void rtpc_dispatch_handler(void *arg)
+{
+	struct rt_proc_call *call;
+	int ret;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&dispatch_event) < 0)
+			break;
+
+		while ((call = rtpc_dequeue_pending_call())) {
+			ret = call->proc(call);
+			if (ret != -CALL_PENDING)
+				rtpc_complete_call(call, ret);
+		}
+	}
+}
+
+static void rtpc_signal_handler(rtdm_nrtsig_t *nrt_sig, void *arg)
+{
+	struct rt_proc_call *call;
+
+	while ((call = rtpc_dequeue_processed_call()) != NULL) {
+		call->processed = 1;
+		wake_up(&call->call_wq);
+
+		if (atomic_dec_and_test(&call->ref_count)) {
+			if (call->cleanup_handler != NULL)
+				call->cleanup_handler(&call->priv_data);
+			kfree(call);
+		}
+	}
+}
+
+void rtnet_rtpc_complete_call(struct rt_proc_call *call, int result)
+{
+	call->result = result;
+	rtpc_queue_processed_call(call);
+}
+
+void rtnet_rtpc_complete_call_nrt(struct rt_proc_call *call, int result)
+{
+	RTNET_ASSERT(!rtdm_in_rt_context(),
+		     rtnet_rtpc_complete_call(call, result);
+		     return;);
+
+	call->processed = 1;
+	wake_up(&call->call_wq);
+
+	if (atomic_dec_and_test(&call->ref_count)) {
+		if (call->cleanup_handler != NULL)
+			call->cleanup_handler(&call->priv_data);
+		kfree(call);
+	}
+}
+
+int __init rtpc_init(void)
+{
+	int ret;
+
+	rtdm_nrtsig_init(&rtpc_nrt_signal, rtpc_signal_handler, NULL);
+
+	rtdm_event_init(&dispatch_event, 0);
+
+	ret = rtdm_task_init(&dispatch_task, "rtnet-rtpc",
+			     rtpc_dispatch_handler, 0,
+			     RTDM_TASK_LOWEST_PRIORITY, 0);
+	if (ret < 0) {
+		rtdm_event_destroy(&dispatch_event);
+		rtdm_nrtsig_destroy(&rtpc_nrt_signal);
+	}
+
+	return ret;
+}
+
+void rtpc_cleanup(void)
+{
+	rtdm_event_destroy(&dispatch_event);
+	rtdm_task_destroy(&dispatch_task);
+	rtdm_nrtsig_destroy(&rtpc_nrt_signal);
+}
+
+EXPORT_SYMBOL_GPL(rtnet_rtpc_dispatch_call);
+EXPORT_SYMBOL_GPL(rtnet_rtpc_complete_call);
+EXPORT_SYMBOL_GPL(rtnet_rtpc_complete_call_nrt);
+++ linux-patched/drivers/xenomai/net/stack/Makefile	2022-03-21 12:58:30.683876343 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/stack_mgr.h	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include -I$(srctree)/kernel/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4) += ipv4/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTPACKET) += packet/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTMAC) += rtmac/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTCFG) += rtcfg/
+
+obj-$(CONFIG_XENO_DRIVERS_NET) += rtnet.o
+
+rtnet-y :=  \
+	corectl.o \
+	iovec.o \
+	rtdev.o \
+	rtdev_mgr.o \
+	rtnet_chrdev.o \
+	rtnet_module.o \
+	rtnet_rtpc.o \
+	rtskb.o \
+	socket.o \
+	stack_mgr.o \
+	eth.o
+
+rtnet-$(CONFIG_XENO_DRIVERS_NET_RTWLAN) += rtwlan.o
+++ linux-patched/drivers/xenomai/net/stack/include/stack_mgr.h	2022-03-21 12:58:30.676876411 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_socket.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack_mgr.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2002      Ulrich Marx <marx@fet.uni-hannover.de>
+ *                2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __STACK_MGR_H_
+#define __STACK_MGR_H_
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+
+#include <rtnet_internal.h>
+#include <rtdev.h>
+
+/***
+ * network layer protocol (layer 3)
+ */
+
+#define RTPACKET_HASH_TBL_SIZE 64
+#define RTPACKET_HASH_KEY_MASK (RTPACKET_HASH_TBL_SIZE - 1)
+
+struct rtpacket_type {
+	struct list_head list_entry;
+
+	unsigned short type;
+	short refcount;
+
+	int (*handler)(struct rtskb *, struct rtpacket_type *);
+	int (*err_handler)(struct rtskb *, struct rtnet_device *,
+			   struct rtpacket_type *);
+	bool (*trylock)(struct rtpacket_type *);
+	void (*unlock)(struct rtpacket_type *);
+
+	struct module *owner;
+};
+
+int __rtdev_add_pack(struct rtpacket_type *pt, struct module *module);
+#define rtdev_add_pack(pt) __rtdev_add_pack(pt, THIS_MODULE)
+
+void rtdev_remove_pack(struct rtpacket_type *pt);
+
+static inline bool rtdev_lock_pack(struct rtpacket_type *pt)
+{
+	return try_module_get(pt->owner);
+}
+
+static inline void rtdev_unlock_pack(struct rtpacket_type *pt)
+{
+	module_put(pt->owner);
+}
+
+void rt_stack_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr);
+void rt_stack_disconnect(struct rtnet_device *rtdev);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK)
+void rt_stack_deliver(struct rtskb *rtskb);
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+
+int rt_stack_mgr_init(struct rtnet_mgr *mgr);
+void rt_stack_mgr_delete(struct rtnet_mgr *mgr);
+
+void rtnetif_rx(struct rtskb *skb);
+
+static inline void rtnetif_tx(struct rtnet_device *rtdev)
+{
+}
+
+static inline void rt_mark_stack_mgr(struct rtnet_device *rtdev)
+{
+	rtdm_event_signal(rtdev->stack_event);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __STACK_MGR_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_socket.h	2022-03-21 12:58:30.668876489 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_iovec.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtnet_socket.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_SOCKET_H_
+#define __RTNET_SOCKET_H_
+
+#include <asm/atomic.h>
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtdm/driver.h>
+#include <stack_mgr.h>
+
+struct rtsocket {
+	unsigned short protocol;
+
+	struct rtskb_pool skb_pool;
+	unsigned int pool_size;
+	struct mutex pool_nrt_lock;
+
+	struct rtskb_queue incoming;
+
+	rtdm_lock_t param_lock;
+
+	unsigned int priority;
+	nanosecs_rel_t timeout; /* receive timeout, 0 for infinite */
+
+	rtdm_sem_t pending_sem;
+
+	void (*callback_func)(struct rtdm_fd *, void *arg);
+	void *callback_arg;
+
+	unsigned long flags;
+
+	union {
+		/* IP specific */
+		struct {
+			u32 saddr; /* source ip-addr (bind) */
+			u32 daddr; /* destination ip-addr */
+			u16 sport; /* source port */
+			u16 dport; /* destination port */
+
+			int reg_index; /* index in port registry */
+			u8 tos;
+			u8 state;
+		} inet;
+
+		/* packet socket specific */
+		struct {
+			struct rtpacket_type packet_type;
+			int ifindex;
+		} packet;
+	} prot;
+};
+
+static inline struct rtdm_fd *rt_socket_fd(struct rtsocket *sock)
+{
+	return rtdm_private_to_fd(sock);
+}
+
+void *rtnet_get_arg(struct rtdm_fd *fd, void *tmp, const void *src, size_t len);
+
+int rtnet_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len);
+
+#define rt_socket_reference(sock) rtdm_fd_lock(rt_socket_fd(sock))
+#define rt_socket_dereference(sock) rtdm_fd_unlock(rt_socket_fd(sock))
+
+int rt_socket_init(struct rtdm_fd *fd, unsigned short protocol);
+
+void rt_socket_cleanup(struct rtdm_fd *fd);
+int rt_socket_common_ioctl(struct rtdm_fd *fd, int request, void __user *arg);
+int rt_socket_if_ioctl(struct rtdm_fd *fd, int request, void __user *arg);
+int rt_socket_select_bind(struct rtdm_fd *fd, rtdm_selector_t *selector,
+			  enum rtdm_selecttype type, unsigned fd_index);
+
+int rt_bare_socket_init(struct rtdm_fd *fd, unsigned short protocol,
+			unsigned int priority, unsigned int pool_size);
+
+static inline void rt_bare_socket_cleanup(struct rtsocket *sock)
+{
+	rtskb_pool_release(&sock->skb_pool);
+}
+
+#endif /* __RTNET_SOCKET_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_iovec.h	2022-03-21 12:58:30.661876558 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac.h	1970-01-01 01:00:00.000000000 +0100
+/* rtnet_iovec.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *               2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_IOVEC_H_
+#define __RTNET_IOVEC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/uio.h>
+
+struct user_msghdr;
+struct rtdm_fd;
+
+ssize_t rtnet_write_to_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			   const void *data, size_t len);
+
+ssize_t rtnet_read_from_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			    void *data, size_t len);
+#endif /* __KERNEL__ */
+
+#endif /* __RTNET_IOVEC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac.h	2022-03-21 12:58:30.654876626 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/tdma_chrdev.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac.h
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2004-2006 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ *  This exception does not apply when the application code is built as a
+ *  static or dynamically loadable portion of the Linux kernel nor does the
+ *  exception override other reasons justifying application of the GNU General
+ *  Public License.
+ *
+ *  This exception applies only to the code released by the RTnet project
+ *  under the name RTnet and bearing this exception notice. If you copy code
+ *  from other sources into a copy of RTnet, the exception does not apply to
+ *  the code that you add in this way.
+ *
+ */
+
+#ifndef __RTMAC_H_
+#define __RTMAC_H_
+
+#include <rtdm/rtdm.h>
+
+/* sub-classes: RTDM_CLASS_RTMAC */
+#define RTDM_SUBCLASS_TDMA 0
+#define RTDM_SUBCLASS_UNMANAGED 1
+
+#define RTIOC_TYPE_RTMAC RTDM_CLASS_RTMAC
+
+/* ** Common Cycle Event Types ** */
+/* standard event, wake up once per cycle */
+#define RTMAC_WAIT_ON_DEFAULT 0x00
+/* wake up on media access of the station, may trigger multiple times per
+   cycle */
+#define RTMAC_WAIT_ON_XMIT 0x01
+
+/* ** TDMA-specific Cycle Event Types ** */
+/* tigger on on SYNC frame reception/transmission */
+#define TDMA_WAIT_ON_SYNC RTMAC_WAIT_ON_DEFAULT
+#define TDMA_WAIT_ON_SOF TDMA_WAIT_ON_SYNC /* legacy support */
+
+/* RTMAC_RTIOC_WAITONCYCLE_EX control and status data */
+struct rtmac_waitinfo {
+	/** Set to wait type before invoking the service */
+	unsigned int type;
+
+	/** Set to sizeof(struct rtmac_waitinfo) before invoking the service */
+	size_t size;
+
+	/** Counter of elementary cycles of the underlying RTmac discipline
+        (if applicable) */
+	unsigned long cycle_no;
+
+	/** Date (in local time) of the last elementary cycle start of the RTmac
+        discipline (if applicable) */
+	nanosecs_abs_t cycle_start;
+
+	/** Offset of the local clock to the global clock provided by the RTmac
+        discipline (if applicable): t_global = t_local + clock_offset */
+	nanosecs_rel_t clock_offset;
+};
+
+/* RTmac Discipline IOCTLs */
+#define RTMAC_RTIOC_TIMEOFFSET _IOR(RTIOC_TYPE_RTMAC, 0x00, int64_t)
+#define RTMAC_RTIOC_WAITONCYCLE _IOW(RTIOC_TYPE_RTMAC, 0x01, unsigned int)
+#define RTMAC_RTIOC_WAITONCYCLE_EX                                             \
+	_IOWR(RTIOC_TYPE_RTMAC, 0x02, struct rtmac_waitinfo)
+
+#endif /* __RTMAC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/tdma_chrdev.h	2022-03-21 12:58:30.646876704 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtdev_mgr.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/tdma_chrdev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_CHRDEV_H_
+#define __TDMA_CHRDEV_H_
+
+#ifndef __KERNEL__
+#include <inttypes.h>
+#endif
+
+#include <rtnet_chrdev.h>
+
+#define MIN_SLOT_SIZE 60
+
+struct tdma_config {
+	struct rtnet_ioctl_head head;
+
+	union {
+		struct {
+			__u64 cycle_period;
+			__u64 backup_sync_offset;
+			__u32 cal_rounds;
+			__u32 max_cal_requests;
+			__u32 max_slot_id;
+		} master;
+
+		struct {
+			__u32 cal_rounds;
+			__u32 max_slot_id;
+		} slave;
+
+		struct {
+			__s32 id;
+			__u32 period;
+			__u64 offset;
+			__u32 phasing;
+			__u32 size;
+			__s32 joint_slot;
+			__u32 cal_timeout;
+			__u64 *cal_results;
+		} set_slot;
+
+		struct {
+			__s32 id;
+		} remove_slot;
+
+		__u64 __padding[8];
+	} args;
+};
+
+#define TDMA_IOC_MASTER _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 0, struct tdma_config)
+#define TDMA_IOC_SLAVE _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 1, struct tdma_config)
+#define TDMA_IOC_CAL_RESULT_SIZE                                               \
+	_IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 2, struct tdma_config)
+#define TDMA_IOC_SET_SLOT _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 3, struct tdma_config)
+#define TDMA_IOC_REMOVE_SLOT                                                   \
+	_IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 4, struct tdma_config)
+#define TDMA_IOC_DETACH _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 5, struct tdma_config)
+
+#endif /* __TDMA_CHRDEV_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtdev_mgr.h	2022-03-21 12:58:30.639876772 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/protocol.h	1970-01-01 01:00:00.000000000 +0100
+/* rtdev_mgr.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTDEV_MGR_H_
+#define __RTDEV_MGR_H_
+
+#ifdef __KERNEL__
+
+#include <rtnet_internal.h>
+
+extern void rtnetif_err_rx(struct rtnet_device *rtdev);
+extern void rtnetif_err_tx(struct rtnet_device *rtdev);
+
+extern void rt_rtdev_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr);
+extern void rt_rtdev_disconnect(struct rtnet_device *rtdev);
+extern int rt_rtdev_mgr_init(struct rtnet_mgr *mgr);
+extern void rt_rtdev_mgr_delete(struct rtnet_mgr *mgr);
+extern int rt_rtdev_mgr_start(struct rtnet_mgr *mgr);
+extern int rt_rtdev_mgr_stop(struct rtnet_mgr *mgr);
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTDEV_MGR_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/protocol.h	2022-03-21 12:58:30.631876850 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/ip_output.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/protocol.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_PROTOCOL_H_
+#define __RTNET_PROTOCOL_H_
+
+#include <rtnet_socket.h>
+#include <rtskb.h>
+
+#define MAX_RT_INET_PROTOCOLS 32
+
+/***
+ * transport layer protocol
+ */
+struct rtinet_protocol {
+	char *name;
+	unsigned short protocol;
+
+	struct rtsocket *(*dest_socket)(struct rtskb *);
+	void (*rcv_handler)(struct rtskb *);
+	void (*err_handler)(struct rtskb *);
+	int (*init_socket)(struct rtdm_fd *);
+};
+
+extern struct rtinet_protocol *rt_inet_protocols[];
+
+#define rt_inet_hashkey(id) (id & (MAX_RT_INET_PROTOCOLS - 1))
+extern void rt_inet_add_protocol(struct rtinet_protocol *prot);
+extern void rt_inet_del_protocol(struct rtinet_protocol *prot);
+extern int rt_inet_socket(struct rtdm_fd *fd, int protocol);
+
+#endif /* __RTNET_PROTOCOL_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/ip_output.h	2022-03-21 12:58:30.624876918 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/ip_input.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/ip_output.h - prepare outgoing IP packets
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *                2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_IP_OUTPUT_H_
+#define __RTNET_IP_OUTPUT_H_
+
+#include <linux/init.h>
+
+#include <rtdev.h>
+#include <ipv4/route.h>
+
+extern int rt_ip_build_xmit(struct rtsocket *sk,
+			    int getfrag(const void *, unsigned char *,
+					unsigned int, unsigned int),
+			    const void *frag, unsigned length,
+			    struct dest_route *rt, int flags);
+
+extern void __init rt_ip_init(void);
+extern void rt_ip_release(void);
+
+#endif /* __RTNET_IP_OUTPUT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/ip_input.h	2022-03-21 12:58:30.617876987 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/af_inet.h	1970-01-01 01:00:00.000000000 +0100
+/* ipv4/ip_input.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *               2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_IP_INPUT_H_
+#define __RTNET_IP_INPUT_H_
+
+#include <rtskb.h>
+#include <stack_mgr.h>
+
+extern int rt_ip_rcv(struct rtskb *skb, struct rtpacket_type *pt);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+typedef void (*rt_ip_fallback_handler_t)(struct rtskb *skb);
+
+/*
+ * This hook can be used to register a fallback handler for incoming
+ * IP packets. Typically this is done to move over to the standard Linux
+ * IP protocol (e.g. for handling TCP).
+ * Manipulating the fallback handler is expected to happen only when the
+ * RTnetinterfaces are shut down (avoiding race conditions).
+ *
+ * Note that merging RT and non-RT traffic this way most likely breaks hard
+ * real-time constraints!
+ */
+extern rt_ip_fallback_handler_t rt_ip_fallback_handler;
+#endif
+
+#endif /* __RTNET_IP_INPUT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/af_inet.h	2022-03-21 12:58:30.609877064 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/af_inet.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@wev.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_AF_INET_H_
+#define __RTNET_AF_INET_H_
+
+#include <rtnet_internal.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_directory ipv4_proc_root;
+#endif
+
+#endif /* __RTNET_AF_INET_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h	2022-03-21 12:58:30.602877133 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/ip_sock.h	1970-01-01 01:00:00.000000000 +0100
+/* ipv4/ip_fragment.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *               2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_IP_FRAGMENT_H_
+#define __RTNET_IP_FRAGMENT_H_
+
+#include <linux/init.h>
+
+#include <rtskb.h>
+#include <ipv4/protocol.h>
+
+extern struct rtskb *rt_ip_defrag(struct rtskb *skb,
+				  struct rtinet_protocol *ipprot);
+
+extern void rt_ip_frag_invalidate_socket(struct rtsocket *sock);
+
+extern int __init rt_ip_fragment_init(void);
+extern void rt_ip_fragment_cleanup(void);
+
+#endif /* __RTNET_IP_FRAGMENT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/ip_sock.h	2022-03-21 12:58:30.594877211 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/route.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/ip_sock.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_IP_SOCK_H_
+#define __RTNET_IP_SOCK_H_
+
+#include <rtnet_socket.h>
+
+extern int rt_ip_ioctl(struct rtdm_fd *fd, int request, void *arg);
+
+#endif /* __RTNET_IP_SOCK_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/route.h	2022-03-21 12:58:30.587877279 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/icmp.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/route.h - real-time routing
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  Rewritten version of the original route by David Schleef and Ulrich Marx
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_ROUTE_H_
+#define __RTNET_ROUTE_H_
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <rtdev.h>
+
+struct dest_route {
+	u32 ip;
+	unsigned char dev_addr[MAX_ADDR_LEN];
+	struct rtnet_device *rtdev;
+};
+
+int rt_ip_route_add_host(u32 addr, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev);
+void rt_ip_route_del_all(struct rtnet_device *rtdev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+int rt_ip_route_add_net(u32 addr, u32 mask, u32 gw_addr);
+int rt_ip_route_del_net(u32 addr, u32 mask);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+int rt_ip_route_forward(struct rtskb *rtskb, u32 daddr);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER */
+
+int rt_ip_route_del_host(u32 addr, struct rtnet_device *rtdev);
+int rt_ip_route_get_host(u32 addr, char *if_name, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev);
+int rt_ip_route_output(struct dest_route *rt_buf, u32 daddr, u32 saddr);
+
+int __init rt_ip_routing_init(void);
+void rt_ip_routing_release(void);
+
+#endif /* __RTNET_ROUTE_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/icmp.h	2022-03-21 12:58:30.579877357 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/udp.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  ipv4/icmp.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_ICMP_H_
+#define __RTNET_ICMP_H_
+
+#include <linux/init.h>
+
+#include <rtskb.h>
+#include <rtnet_rtpc.h>
+#include <ipv4/protocol.h>
+
+#define RT_ICMP_PRIO RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+#define ICMP_REPLY_POOL_SIZE 8
+
+void rt_icmp_queue_echo_request(struct rt_proc_call *call);
+void rt_icmp_dequeue_echo_request(struct rt_proc_call *call);
+void rt_icmp_cleanup_echo_requests(void);
+int rt_icmp_send_echo(u32 daddr, u16 id, u16 sequence, size_t msg_size);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP
+void __init rt_icmp_init(void);
+void rt_icmp_release(void);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+#define rt_icmp_init()                                                         \
+	do {                                                                   \
+	} while (0)
+#define rt_icmp_release()                                                      \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+
+#endif /* __RTNET_ICMP_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/udp.h	2022-03-21 12:58:30.572877425 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/arp.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/udp.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_UDP_H_
+#define __RTNET_UDP_H_
+
+/* Maximum number of active udp sockets
+   Only increase with care (look-up delays!), must be power of 2 */
+#define RT_UDP_SOCKETS 64
+
+#endif /* __RTNET_UDP_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/arp.h	2022-03-21 12:58:30.564877503 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4/tcp.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/arp.h - Adress Resolution Protocol for RTnet
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *                2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_ARP_H_
+#define __RTNET_ARP_H_
+
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <ipv4/route.h>
+
+#define RT_ARP_SKB_PRIO                                                        \
+	RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+void rt_arp_send(int type, int ptype, u32 dest_ip, struct rtnet_device *rtdev,
+		 u32 src_ip, unsigned char *dest_hw, unsigned char *src_hw,
+		 unsigned char *target_hw);
+
+static inline void rt_arp_solicit(struct rtnet_device *rtdev, u32 target)
+{
+	rt_arp_send(ARPOP_REQUEST, ETH_P_ARP, target, rtdev, rtdev->local_ip,
+		    NULL, NULL, NULL);
+}
+
+void __init rt_arp_init(void);
+void rt_arp_release(void);
+
+#endif /* __RTNET_ARP_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4/tcp.h	2022-03-21 12:58:30.557877572 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ipv4_chrdev.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4/tcp.h
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_TCP_H_
+#define __RTNET_TCP_H_
+
+#include <rtskb.h>
+#include <ipv4/protocol.h>
+
+/* Maximum number of active tcp sockets, must be power of 2 */
+#define RT_TCP_SOCKETS 32
+
+/*Maximum number of active tcp connections, must be power of 2 */
+#define RT_TCP_CONNECTIONS 64
+
+/* Maximum size of TCP input window */
+#define RT_TCP_WINDOW 4096
+
+/* Maximum number of retransmissions of invalid segments */
+#define RT_TCP_RETRANSMIT 3
+
+/* Number of milliseconds to wait for ACK */
+#define RT_TCP_WAIT_TIME 10
+
+/* Priority of RST|ACK replies (error condition => non-RT prio) */
+#define RT_TCP_RST_PRIO                                                        \
+	RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+/* rtskb pool for sending socket-less RST|ACK */
+#define RT_TCP_RST_POOL_SIZE 8
+
+#endif /* __RTNET_TCP_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ipv4_chrdev.h	2022-03-21 12:58:30.550877640 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_port.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/ipv4.h
+ *
+ *  Real-Time IP/UDP/ICMP stack
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __IPV4_H_
+#define __RTCFG_H_
+
+#include <rtnet_chrdev.h>
+
+struct ipv4_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		/*** rtroute ***/
+		struct {
+			__u32 ip_addr;
+		} solicit;
+
+		struct {
+			__u8 dev_addr[DEV_ADDR_LEN];
+			__u32 ip_addr;
+		} gethost;
+
+		struct {
+			__u8 dev_addr[DEV_ADDR_LEN];
+			__u32 ip_addr;
+		} addhost;
+
+		struct {
+			__u32 ip_addr;
+		} delhost;
+
+		struct {
+			__u32 net_addr;
+			__u32 net_mask;
+			__u32 gw_addr;
+		} addnet;
+
+		struct {
+			__u32 net_addr;
+			__u32 net_mask;
+		} delnet;
+
+		/*** rtping ***/
+		struct {
+			__u32 ip_addr;
+			__u16 id;
+			__u16 sequence;
+			__u32 msg_size;
+			__u32 timeout;
+			__s64 rtt;
+		} ping;
+
+		__u64 __padding[8];
+	} args;
+};
+
+#define IOC_RT_HOST_ROUTE_ADD _IOW(RTNET_IOC_TYPE_IPV4, 0, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_SOLICIT _IOW(RTNET_IOC_TYPE_IPV4, 1, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_DELETE                                               \
+	_IOW(RTNET_IOC_TYPE_IPV4, 2 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_NET_ROUTE_ADD                                                   \
+	_IOW(RTNET_IOC_TYPE_IPV4, 3 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_NET_ROUTE_DELETE                                                \
+	_IOW(RTNET_IOC_TYPE_IPV4, 4 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_PING                                                            \
+	_IOWR(RTNET_IOC_TYPE_IPV4, 5 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_DELETE_DEV                                           \
+	_IOW(RTNET_IOC_TYPE_IPV4, 6, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_GET                                                  \
+	_IOWR(RTNET_IOC_TYPE_IPV4, 7 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_GET_DEV _IOWR(RTNET_IOC_TYPE_IPV4, 8, struct ipv4_cmd)
+
+#endif /* __IPV4_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_port.h	2022-03-21 12:58:30.542877718 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtskb.h	1970-01-01 01:00:00.000000000 +0100
+/* include/rtnet_port.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 2003      Wittawat Yamwong
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_PORT_H_
+#define __RTNET_PORT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/bitops.h>
+#include <linux/moduleparam.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/bitops.h>
+
+#include <rtdev.h>
+#include <rtdev_mgr.h>
+#include <rtdm/driver.h>
+#include <stack_mgr.h>
+#include <ethernet/eth.h>
+
+static inline void rtnetif_start_queue(struct rtnet_device *rtdev)
+{
+	clear_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state);
+}
+
+static inline void rtnetif_wake_queue(struct rtnet_device *rtdev)
+{
+	if (test_and_clear_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state))
+		/*TODO __netif_schedule(dev); */;
+}
+
+static inline void rtnetif_stop_queue(struct rtnet_device *rtdev)
+{
+	set_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state);
+}
+
+static inline int rtnetif_queue_stopped(struct rtnet_device *rtdev)
+{
+	return test_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state);
+}
+
+static inline int rtnetif_running(struct rtnet_device *rtdev)
+{
+	return test_bit(__RTNET_LINK_STATE_START, &rtdev->link_state);
+}
+
+static inline int rtnetif_device_present(struct rtnet_device *rtdev)
+{
+	return test_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state);
+}
+
+static inline void rtnetif_device_detach(struct rtnet_device *rtdev)
+{
+	if (test_and_clear_bit(__RTNET_LINK_STATE_PRESENT,
+			       &rtdev->link_state) &&
+	    rtnetif_running(rtdev)) {
+		rtnetif_stop_queue(rtdev);
+	}
+}
+
+static inline void rtnetif_device_attach(struct rtnet_device *rtdev)
+{
+	if (!test_and_set_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state) &&
+	    rtnetif_running(rtdev)) {
+		rtnetif_wake_queue(rtdev);
+		/* __netdev_watchdog_up(rtdev); */
+	}
+}
+
+static inline void rtnetif_carrier_on(struct rtnet_device *rtdev)
+{
+	clear_bit(__RTNET_LINK_STATE_NOCARRIER, &rtdev->link_state);
+	/*
+    if (netif_running(dev))
+	__netdev_watchdog_up(dev);
+    */
+}
+
+static inline void rtnetif_carrier_off(struct rtnet_device *rtdev)
+{
+	set_bit(__RTNET_LINK_STATE_NOCARRIER, &rtdev->link_state);
+}
+
+static inline int rtnetif_carrier_ok(struct rtnet_device *rtdev)
+{
+	return !test_bit(__RTNET_LINK_STATE_NOCARRIER, &rtdev->link_state);
+}
+
+#define NIPQUAD(addr)                                                          \
+	((unsigned char *)&addr)[0], ((unsigned char *)&addr)[1],              \
+		((unsigned char *)&addr)[2], ((unsigned char *)&addr)[3]
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTNET_PORT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtskb.h	2022-03-21 12:58:30.535877786 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/nomac_chrdev.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtskb.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>,
+ *                2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTSKB_H_
+#define __RTSKB_H_
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+#include <rtdm/net.h>
+#include <rtnet_internal.h>
+
+/***
+
+rtskb Management - A Short Introduction
+---------------------------------------
+
+1. rtskbs (Real-Time Socket Buffers)
+
+A rtskb consists of a management structure (struct rtskb) and a fixed-sized
+(RTSKB_SIZE) data buffer. It is used to store network packets on their way from
+the API routines through the stack to the NICs or vice versa. rtskbs are
+allocated as one chunk of memory which contains both the managment structure
+and the buffer memory itself.
+
+
+2. rtskb Queues
+
+A rtskb queue is described by struct rtskb_queue. A queue can contain an
+unlimited number of rtskbs in an ordered way. A rtskb can either be added to
+the head (rtskb_queue_head()) or the tail of a queue (rtskb_queue_tail()). When
+a rtskb is removed from a queue (rtskb_dequeue()), it is always taken from the
+head. Queues are normally spin lock protected unless the __variants of the
+queuing functions are used.
+
+
+3. Prioritized rtskb Queues
+
+A prioritized queue contains a number of normal rtskb queues within an array.
+The array index of a sub-queue correspond to the priority of the rtskbs within
+this queue. For enqueuing a rtskb (rtskb_prio_queue_head()), its priority field
+is evaluated and the rtskb is then placed into the appropriate sub-queue. When
+dequeuing a rtskb, the first rtskb of the first non-empty sub-queue with the
+highest priority is returned. The current implementation supports 32 different
+priority levels, the lowest if defined by QUEUE_MIN_PRIO, the highest by
+QUEUE_MAX_PRIO.
+
+
+4. rtskb Pools
+
+As rtskbs must not be allocated by a normal memory manager during runtime,
+preallocated rtskbs are kept ready in several pools. Most packet producers
+(NICs, sockets, etc.) have their own pools in order to be independent of the
+load situation of other parts of the stack.
+
+When a pool is created (rtskb_pool_init()), the required rtskbs are allocated
+from a Linux slab cache. Pools can be extended (rtskb_pool_extend()) or
+shrinked (rtskb_pool_shrink()) during runtime. When shutting down the
+program/module, every pool has to be released (rtskb_pool_release()). All these
+commands demand to be executed within a non real-time context.
+
+Pools are organized as normal rtskb queues (struct rtskb_queue). When a rtskb
+is allocated (alloc_rtskb()), it is actually dequeued from the pool's queue.
+When freeing a rtskb (kfree_rtskb()), the rtskb is enqueued to its owning pool.
+rtskbs can be exchanged between pools (rtskb_acquire()). In this case, the
+passed rtskb switches over to from its owning pool to a given pool, but only if
+this pool can pass an empty rtskb from its own queue back.
+
+
+5. rtskb Chains
+
+To ease the defragmentation of larger IP packets, several rtskbs can form a
+chain. For these purposes, the first rtskb (and only the first!) provides a
+pointer to the last rtskb in the chain. When enqueuing the first rtskb of a
+chain, the whole chain is automatically placed into the destined queue. But,
+to dequeue a complete chain specialized calls are required (postfix: _chain).
+While chains also get freed en bloc (kfree_rtskb()) when passing the first
+rtskbs, it is not possible to allocate a chain from a pool (alloc_rtskb()); a
+newly allocated rtskb is always reset to a "single rtskb chain". Furthermore,
+the acquisition of complete chains is NOT supported (rtskb_acquire()).
+
+
+6. Capturing Support (Optional)
+
+When incoming or outgoing packets are captured, the assigned rtskb needs to be
+shared between the stack, the driver, and the capturing service. In contrast to
+many other network stacks, RTnet does not create a new rtskb head and
+re-references the payload. Instead, additional fields at the end of the rtskb
+structure are use for sharing a rtskb with a capturing service. If the sharing
+bit (RTSKB_CAP_SHARED) in cap_flags is set, the rtskb will not be returned to
+the owning pool upon the call of kfree_rtskb. Instead this bit will be reset,
+and a compensation rtskb stored in cap_comp_skb will be returned to the owning
+pool. cap_start and cap_len can be used to mirror the dimension of the full
+packet. This is required because the data and len fields will be modified while
+walking through the stack. cap_next allows to add a rtskb to a separate queue
+which is independent of any queue described in 2.
+
+Certain setup tasks for capturing packets can not become part of a capturing
+module, they have to be embedded into the stack. For this purpose, several
+inline functions are provided. rtcap_mark_incoming() is used to save the packet
+dimension right before it is modifed by the stack. rtcap_report_incoming()
+calls the capturing handler, if present, in order to let it process the
+received rtskb (e.g. allocate compensation rtskb, mark original rtskb as
+shared, and enqueue it).
+
+Outgoing rtskb have to be captured by adding a hook function to the chain of
+hard_start_xmit functions of a device. To measure the delay caused by RTmac
+between the request and the actual transmission, a time stamp can be taken using
+rtcap_mark_rtmac_enqueue(). This function is typically called by RTmac
+disciplines when they add a rtskb to their internal transmission queue. In such
+a case, the RTSKB_CAP_RTMAC_STAMP bit is set in cap_flags to indicate that the
+cap_rtmac_stamp field now contains valid data.
+
+ ***/
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#endif
+
+#define RTSKB_CAP_SHARED 1 /* rtskb shared between stack and RTcap */
+#define RTSKB_CAP_RTMAC_STAMP 2 /* cap_rtmac_stamp is valid             */
+
+#define RTSKB_UNMAPPED 0
+
+struct rtskb_queue;
+struct rtsocket;
+struct rtnet_device;
+
+/***
+ *  rtskb - realtime socket buffer
+ */
+struct rtskb {
+	struct rtskb *next; /* used for queuing rtskbs */
+	struct rtskb *chain_end; /* marks the end of a rtskb chain starting
+				       with this very rtskb */
+
+	struct rtskb_pool *pool; /* owning pool */
+
+	unsigned int priority; /* bit 0..15: prio, 16..31: user-defined */
+
+	struct rtsocket *sk; /* assigned socket */
+	struct rtnet_device *rtdev; /* source or destination device */
+
+	nanosecs_abs_t time_stamp; /* arrival or transmission (RTcap) time */
+
+	/* patch address of the transmission time stamp, can be NULL
+     * calculation: *xmit_stamp = cpu_to_be64(time_in_ns + *xmit_stamp)
+     */
+	nanosecs_abs_t *xmit_stamp;
+
+	/* transport layer */
+	union {
+		struct tcphdr *th;
+		struct udphdr *uh;
+		struct icmphdr *icmph;
+		struct iphdr *ipihdr;
+		unsigned char *raw;
+	} h;
+
+	/* network layer */
+	union {
+		struct iphdr *iph;
+		struct arphdr *arph;
+		unsigned char *raw;
+	} nh;
+
+	/* link layer */
+	union {
+		struct ethhdr *ethernet;
+		unsigned char *raw;
+	} mac;
+
+	unsigned short protocol;
+	unsigned char pkt_type;
+
+	unsigned char ip_summed;
+	unsigned int csum;
+
+	unsigned char *data;
+	unsigned char *tail;
+	unsigned char *end;
+	unsigned int len;
+
+	dma_addr_t buf_dma_addr;
+
+	unsigned char *buf_start;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+	unsigned char *buf_end;
+#endif
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	int cap_flags; /* see RTSKB_CAP_xxx                    */
+	struct rtskb *cap_comp_skb; /* compensation rtskb                */
+	struct rtskb *cap_next; /* used for capture queue               */
+	unsigned char *cap_start; /* start offset for capturing           */
+	unsigned int cap_len; /* capture length of this rtskb         */
+	nanosecs_abs_t cap_rtmac_stamp; /* RTmac enqueuing time            */
+#endif
+
+	struct list_head entry; /* for global rtskb list */
+};
+
+struct rtskb_queue {
+	struct rtskb *first;
+	struct rtskb *last;
+	rtdm_lock_t lock;
+};
+
+struct rtskb_pool_lock_ops {
+	int (*trylock)(void *cookie);
+	void (*unlock)(void *cookie);
+};
+
+struct rtskb_pool {
+	struct rtskb_queue queue;
+	const struct rtskb_pool_lock_ops *lock_ops;
+	void *lock_cookie;
+};
+
+#define QUEUE_MAX_PRIO 0
+#define QUEUE_MIN_PRIO 31
+
+struct rtskb_prio_queue {
+	rtdm_lock_t lock;
+	unsigned long usage; /* bit array encoding non-empty sub-queues */
+	struct rtskb_queue queue[QUEUE_MIN_PRIO + 1];
+};
+
+#define RTSKB_PRIO_MASK 0x0000FFFF /* bits  0..15: xmit prio    */
+#define RTSKB_CHANNEL_MASK 0xFFFF0000 /* bits 16..31: xmit channel */
+#define RTSKB_CHANNEL_SHIFT 16
+
+#define RTSKB_DEF_RT_CHANNEL SOCK_DEF_RT_CHANNEL
+#define RTSKB_DEF_NRT_CHANNEL SOCK_DEF_NRT_CHANNEL
+#define RTSKB_USER_CHANNEL SOCK_USER_CHANNEL
+
+/* Note: always keep SOCK_XMIT_PARAMS consistent with definitions above! */
+#define RTSKB_PRIO_VALUE SOCK_XMIT_PARAMS
+
+/* default values for the module parameter */
+#define DEFAULT_GLOBAL_RTSKBS 0 /* default number of rtskb's in global pool */
+#define DEFAULT_DEVICE_RTSKBS                                                  \
+	16 /* default additional rtskbs per network adapter */
+#define DEFAULT_SOCKET_RTSKBS 16 /* default number of rtskb's in socket pools */
+
+#define ALIGN_RTSKB_STRUCT_LEN SKB_DATA_ALIGN(sizeof(struct rtskb))
+#define RTSKB_SIZE                  (2048 + NET_IP_ALIGN)    /* maximum needed by igb */
+
+extern unsigned int rtskb_pools; /* current number of rtskb pools      */
+extern unsigned int rtskb_pools_max; /* maximum number of rtskb pools      */
+extern unsigned int rtskb_amount; /* current number of allocated rtskbs */
+extern unsigned int rtskb_amount_max; /* maximum number of allocated rtskbs */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+extern void rtskb_over_panic(struct rtskb *skb, int len, void *here);
+extern void rtskb_under_panic(struct rtskb *skb, int len, void *here);
+#endif
+
+extern struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool);
+
+extern void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb);
+
+extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool);
+
+extern void kfree_rtskb(struct rtskb *skb);
+#define dev_kfree_rtskb(a) kfree_rtskb(a)
+
+static inline void rtskb_tx_timestamp(struct rtskb *skb)
+{
+	nanosecs_abs_t *ts = skb->xmit_stamp;
+
+	if (!ts)
+		return;
+
+	*ts = cpu_to_be64(rtdm_clock_read() + *ts);
+}
+
+/***
+ *  rtskb_queue_init - initialize the queue
+ *  @queue
+ */
+static inline void rtskb_queue_init(struct rtskb_queue *queue)
+{
+	rtdm_lock_init(&queue->lock);
+	queue->first = NULL;
+	queue->last = NULL;
+}
+
+/***
+ *  rtskb_prio_queue_init - initialize the prioritized queue
+ *  @prioqueue
+ */
+static inline void rtskb_prio_queue_init(struct rtskb_prio_queue *prioqueue)
+{
+	memset(prioqueue, 0, sizeof(struct rtskb_prio_queue));
+	rtdm_lock_init(&prioqueue->lock);
+}
+
+/***
+ *  rtskb_queue_empty
+ *  @queue
+ */
+static inline int rtskb_queue_empty(struct rtskb_queue *queue)
+{
+	return (queue->first == NULL);
+}
+
+/***
+ *  rtskb__prio_queue_empty
+ *  @queue
+ */
+static inline int rtskb_prio_queue_empty(struct rtskb_prio_queue *prioqueue)
+{
+	return (prioqueue->usage == 0);
+}
+
+/***
+ *  __rtskb_queue_head - insert a buffer at the queue head (w/o locks)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_queue_head(struct rtskb_queue *queue,
+				      struct rtskb *skb)
+{
+	struct rtskb *chain_end = skb->chain_end;
+
+	chain_end->next = queue->first;
+
+	if (queue->first == NULL)
+		queue->last = chain_end;
+	queue->first = skb;
+}
+
+/***
+ *  rtskb_queue_head - insert a buffer at the queue head (lock protected)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_queue_head(struct rtskb_queue *queue,
+				    struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	__rtskb_queue_head(queue, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+}
+
+/***
+ *  __rtskb_prio_queue_head - insert a buffer at the prioritized queue head
+ *                            (w/o locks)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
+					   struct rtskb *skb)
+{
+	unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
+
+	RTNET_ASSERT(prio <= 31, prio = 31;);
+
+	__rtskb_queue_head(&prioqueue->queue[prio], skb);
+	__set_bit(prio, &prioqueue->usage);
+}
+
+/***
+ *  rtskb_prio_queue_head - insert a buffer at the prioritized queue head
+ *                          (lock protected)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
+					 struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	__rtskb_prio_queue_head(prioqueue, skb);
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+}
+
+/***
+ *  __rtskb_queue_tail - insert a buffer at the queue tail (w/o locks)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_queue_tail(struct rtskb_queue *queue,
+				      struct rtskb *skb)
+{
+	struct rtskb *chain_end = skb->chain_end;
+
+	chain_end->next = NULL;
+
+	if (queue->first == NULL)
+		queue->first = skb;
+	else
+		queue->last->next = skb;
+	queue->last = chain_end;
+}
+
+/***
+ *  rtskb_queue_tail - insert a buffer at the queue tail (lock protected)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_queue_tail(struct rtskb_queue *queue,
+				    struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	__rtskb_queue_tail(queue, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+}
+
+/***
+ *  rtskb_queue_tail_check - variant of rtskb_queue_tail
+ *          returning true on empty->non empty transition.
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline bool rtskb_queue_tail_check(struct rtskb_queue *queue,
+					  struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+	bool ret;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	ret = queue->first == NULL;
+	__rtskb_queue_tail(queue, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return ret;
+}
+
+/***
+ *  __rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
+ *                            (w/o locks)
+ *  @prioqueue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
+					   struct rtskb *skb)
+{
+	unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
+
+	RTNET_ASSERT(prio <= 31, prio = 31;);
+
+	__rtskb_queue_tail(&prioqueue->queue[prio], skb);
+	__set_bit(prio, &prioqueue->usage);
+}
+
+/***
+ *  rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
+ *                          (lock protected)
+ *  @prioqueue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
+					 struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	__rtskb_prio_queue_tail(prioqueue, skb);
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+}
+
+/***
+ *  __rtskb_dequeue - remove from the head of the queue (w/o locks)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *__rtskb_dequeue(struct rtskb_queue *queue)
+{
+	struct rtskb *result;
+
+	if ((result = queue->first) != NULL) {
+		queue->first = result->next;
+		result->next = NULL;
+	}
+
+	return result;
+}
+
+/***
+ *  rtskb_dequeue - remove from the head of the queue (lock protected)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *rtskb_dequeue(struct rtskb_queue *queue)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	result = __rtskb_dequeue(queue);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return result;
+}
+
+/***
+ *  __rtskb_prio_dequeue - remove from the head of the prioritized queue
+ *                         (w/o locks)
+ *  @prioqueue: queue to remove from
+ */
+static inline struct rtskb *
+__rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
+{
+	int prio;
+	struct rtskb *result = NULL;
+	struct rtskb_queue *sub_queue;
+
+	if (prioqueue->usage) {
+		prio = ffz(~prioqueue->usage);
+		sub_queue = &prioqueue->queue[prio];
+		result = __rtskb_dequeue(sub_queue);
+		if (rtskb_queue_empty(sub_queue))
+			__change_bit(prio, &prioqueue->usage);
+	}
+
+	return result;
+}
+
+/***
+ *  rtskb_prio_dequeue - remove from the head of the prioritized queue
+ *                       (lock protected)
+ *  @prioqueue: queue to remove from
+ */
+static inline struct rtskb *
+rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	result = __rtskb_prio_dequeue(prioqueue);
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+
+	return result;
+}
+
+/***
+ *  __rtskb_dequeue_chain - remove a chain from the head of the queue
+ *                          (w/o locks)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *__rtskb_dequeue_chain(struct rtskb_queue *queue)
+{
+	struct rtskb *result;
+	struct rtskb *chain_end;
+
+	if ((result = queue->first) != NULL) {
+		chain_end = result->chain_end;
+		queue->first = chain_end->next;
+		chain_end->next = NULL;
+	}
+
+	return result;
+}
+
+/***
+ *  rtskb_dequeue_chain - remove a chain from the head of the queue
+ *                        (lock protected)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *rtskb_dequeue_chain(struct rtskb_queue *queue)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	result = __rtskb_dequeue_chain(queue);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return result;
+}
+
+/***
+ *  rtskb_prio_dequeue_chain - remove a chain from the head of the
+ *                             prioritized queue
+ *  @prioqueue: queue to remove from
+ */
+static inline struct rtskb *
+rtskb_prio_dequeue_chain(struct rtskb_prio_queue *prioqueue)
+{
+	rtdm_lockctx_t context;
+	int prio;
+	struct rtskb *result = NULL;
+	struct rtskb_queue *sub_queue;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	if (prioqueue->usage) {
+		prio = ffz(~prioqueue->usage);
+		sub_queue = &prioqueue->queue[prio];
+		result = __rtskb_dequeue_chain(sub_queue);
+		if (rtskb_queue_empty(sub_queue))
+			__change_bit(prio, &prioqueue->usage);
+	}
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+
+	return result;
+}
+
+/***
+ *  rtskb_queue_purge - clean the queue
+ *  @queue
+ */
+static inline void rtskb_queue_purge(struct rtskb_queue *queue)
+{
+	struct rtskb *skb;
+	while ((skb = rtskb_dequeue(queue)) != NULL)
+		kfree_rtskb(skb);
+}
+
+static inline int rtskb_headlen(const struct rtskb *skb)
+{
+	return skb->len;
+}
+
+static inline void rtskb_reserve(struct rtskb *skb, unsigned int len)
+{
+	skb->data += len;
+	skb->tail += len;
+}
+
+static inline unsigned char *__rtskb_put(struct rtskb *skb, unsigned int len)
+{
+	unsigned char *tmp = skb->tail;
+
+	skb->tail += len;
+	skb->len += len;
+	return tmp;
+}
+
+#define rtskb_put(skb, length)                                                 \
+	({                                                                     \
+		struct rtskb *__rtskb = (skb);                                 \
+		unsigned int __len = (length);                                 \
+		unsigned char *tmp = __rtskb->tail;                            \
+                                                                               \
+		__rtskb->tail += __len;                                        \
+		__rtskb->len += __len;                                         \
+                                                                               \
+		RTNET_ASSERT(__rtskb->tail <= __rtskb->buf_end,                \
+			     rtskb_over_panic(__rtskb, __len,                  \
+					      current_text_addr()););          \
+                                                                               \
+		tmp;                                                           \
+	})
+
+static inline unsigned char *__rtskb_push(struct rtskb *skb, unsigned int len)
+{
+	skb->data -= len;
+	skb->len += len;
+	return skb->data;
+}
+
+#define rtskb_push(skb, length)                                                \
+	({                                                                     \
+		struct rtskb *__rtskb = (skb);                                 \
+		unsigned int __len = (length);                                 \
+                                                                               \
+		__rtskb->data -= __len;                                        \
+		__rtskb->len += __len;                                         \
+                                                                               \
+		RTNET_ASSERT(__rtskb->data >= __rtskb->buf_start,              \
+			     rtskb_under_panic(__rtskb, __len,                 \
+					       current_text_addr()););         \
+                                                                               \
+		__rtskb->data;                                                 \
+	})
+
+static inline unsigned char *__rtskb_pull(struct rtskb *skb, unsigned int len)
+{
+	RTNET_ASSERT(len <= skb->len, return NULL;);
+
+	skb->len -= len;
+
+	return skb->data += len;
+}
+
+static inline unsigned char *rtskb_pull(struct rtskb *skb, unsigned int len)
+{
+	if (len > skb->len)
+		return NULL;
+
+	skb->len -= len;
+
+	return skb->data += len;
+}
+
+static inline void rtskb_trim(struct rtskb *skb, unsigned int len)
+{
+	if (skb->len > len) {
+		skb->len = len;
+		skb->tail = skb->data + len;
+	}
+}
+
+static inline struct rtskb *rtskb_padto(struct rtskb *rtskb, unsigned int len)
+{
+	RTNET_ASSERT(len <= (unsigned int)(rtskb->buf_end + 1 - rtskb->data),
+		     return NULL;);
+
+	memset(rtskb->data + rtskb->len, 0, len - rtskb->len);
+
+	return rtskb;
+}
+
+static inline dma_addr_t rtskb_data_dma_addr(struct rtskb *rtskb,
+					     unsigned int offset)
+{
+	return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
+}
+
+extern struct rtskb_pool global_pool;
+
+extern unsigned int rtskb_pool_init(struct rtskb_pool *pool,
+				    unsigned int initial_size,
+				    const struct rtskb_pool_lock_ops *lock_ops,
+				    void *lock_cookie);
+
+extern unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
+					     unsigned int initial_size,
+					     struct module *module);
+
+#define rtskb_module_pool_init(pool, size)                                     \
+	__rtskb_module_pool_init(pool, size, THIS_MODULE)
+
+extern void rtskb_pool_release(struct rtskb_pool *pool);
+
+extern unsigned int rtskb_pool_extend(struct rtskb_pool *pool,
+				      unsigned int add_rtskbs);
+extern unsigned int rtskb_pool_shrink(struct rtskb_pool *pool,
+				      unsigned int rem_rtskbs);
+extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool);
+extern struct rtskb *rtskb_clone(struct rtskb *rtskb, struct rtskb_pool *pool);
+
+extern int rtskb_pools_init(void);
+extern void rtskb_pools_release(void);
+
+extern unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb,
+					     int offset, u8 *to, int len,
+					     unsigned int csum);
+extern void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+
+extern rtdm_lock_t rtcap_lock;
+extern void (*rtcap_handler)(struct rtskb *skb);
+
+static inline void rtcap_mark_incoming(struct rtskb *skb)
+{
+	skb->cap_start = skb->data;
+	skb->cap_len = skb->len;
+}
+
+static inline void rtcap_report_incoming(struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+	if (rtcap_handler != NULL)
+		rtcap_handler(skb);
+
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+}
+
+static inline void rtcap_mark_rtmac_enqueue(struct rtskb *skb)
+{
+	/* rtskb start and length are probably not valid yet */
+	skb->cap_flags |= RTSKB_CAP_RTMAC_STAMP;
+	skb->cap_rtmac_stamp = rtdm_clock_read();
+}
+
+#else /* ifndef CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+
+#define rtcap_mark_incoming(skb)
+#define rtcap_report_incoming(skb)
+#define rtcap_mark_rtmac_enqueue(skb)
+
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTSKB_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/nomac_chrdev.h	2022-03-21 12:58:30.528877855 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_internal.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/nomac_chrdev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_CHRDEV_H_
+#define __NOMAC_CHRDEV_H_
+
+#include <rtnet_chrdev.h>
+
+struct nomac_config {
+	struct rtnet_ioctl_head head;
+};
+
+#define NOMAC_IOC_ATTACH                                                       \
+	_IOW(RTNET_IOC_TYPE_RTMAC_NOMAC, 0, struct nomac_config)
+#define NOMAC_IOC_DETACH                                                       \
+	_IOW(RTNET_IOC_TYPE_RTMAC_NOMAC, 1, struct nomac_config)
+
+#endif /* __NOMAC_CHRDEV_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_internal.h	2022-03-21 12:58:30.520877933 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtskb_fifo.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtnet_internal.h - internal declarations
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_INTERNAL_H_
+#define __RTNET_INTERNAL_H_
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <rtdm/driver.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+#define RTNET_ASSERT(expr, func)                                               \
+	if (!(expr)) {                                                         \
+		rtdm_printk("Assertion failed! %s:%s:%d %s\n", __FILE__,       \
+			    __FUNCTION__, __LINE__, (#expr));                  \
+		func                                                           \
+	}
+#else
+#define RTNET_ASSERT(expr, func)
+#endif /* CONFIG_XENO_DRIVERS_NET_CHECKED */
+
+/* some configurables */
+
+#define RTNET_DEF_STACK_PRIORITY                                               \
+	RTDM_TASK_HIGHEST_PRIORITY + RTDM_TASK_LOWER_PRIORITY
+/*#define RTNET_RTDEV_PRIORITY        5*/
+
+struct rtnet_device;
+
+/*struct rtnet_msg {
+    int                 msg_type;
+    struct rtnet_device *rtdev;
+};*/
+
+struct rtnet_mgr {
+	rtdm_task_t task;
+	/*    MBX     mbx;*/
+	rtdm_event_t event;
+};
+
+extern struct rtnet_mgr STACK_manager;
+extern struct rtnet_mgr RTDEV_manager;
+
+extern const char rtnet_rtdm_provider_name[];
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_directory rtnet_proc_root;
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+extern struct class *rtnet_class;
+
+#endif /* __RTNET_INTERNAL_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtskb_fifo.h	2022-03-21 12:58:30.513878001 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtwlan_io.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtskb_fifo.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTSKB_FIFO_H_
+#define __RTSKB_FIFO_H_
+
+#include <rtskb.h>
+
+struct rtskb_fifo {
+	unsigned long read_pos ____cacheline_aligned_in_smp;
+	rtdm_lock_t read_lock;
+	unsigned long size_mask;
+	unsigned long write_pos ____cacheline_aligned_in_smp;
+	rtdm_lock_t write_lock;
+	struct rtskb *buffer[0];
+};
+
+#define DECLARE_RTSKB_FIFO(name_prefix, size)                                  \
+	struct {                                                               \
+		struct rtskb_fifo fifo;                                        \
+		struct rtskb *__buffer[(size)];                                \
+	} name_prefix
+
+static inline int __rtskb_fifo_insert(struct rtskb_fifo *fifo,
+				      struct rtskb *rtskb)
+{
+	unsigned long pos = fifo->write_pos;
+	unsigned long new_pos = (pos + 1) & fifo->size_mask;
+
+	if (unlikely(new_pos == fifo->read_pos))
+		return -EAGAIN;
+
+	fifo->buffer[pos] = rtskb;
+
+	/* rtskb must have been written before write_pos update */
+	smp_wmb();
+
+	fifo->write_pos = new_pos;
+
+	return 0;
+}
+
+static inline int rtskb_fifo_insert(struct rtskb_fifo *fifo,
+				    struct rtskb *rtskb)
+{
+	rtdm_lockctx_t context;
+	int result;
+
+	rtdm_lock_get_irqsave(&fifo->write_lock, context);
+	result = __rtskb_fifo_insert(fifo, rtskb);
+	rtdm_lock_put_irqrestore(&fifo->write_lock, context);
+
+	return result;
+}
+
+static inline int rtskb_fifo_insert_inirq(struct rtskb_fifo *fifo,
+					  struct rtskb *rtskb)
+{
+	int result;
+
+	rtdm_lock_get(&fifo->write_lock);
+	result = __rtskb_fifo_insert(fifo, rtskb);
+	rtdm_lock_put(&fifo->write_lock);
+
+	return result;
+}
+
+static inline struct rtskb *__rtskb_fifo_remove(struct rtskb_fifo *fifo)
+{
+	unsigned long pos = fifo->read_pos;
+	struct rtskb *result;
+
+	/* check FIFO status first */
+	if (unlikely(pos == fifo->write_pos))
+		return NULL;
+
+	/* at least one rtskb is enqueued, so get the next one */
+	result = fifo->buffer[pos];
+
+	/* result must have been read before read_pos update */
+	smp_rmb();
+
+	fifo->read_pos = (pos + 1) & fifo->size_mask;
+
+	/* read_pos must have been written for a consitent fifo state on exit */
+	smp_wmb();
+
+	return result;
+}
+
+static inline struct rtskb *rtskb_fifo_remove(struct rtskb_fifo *fifo)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&fifo->read_lock, context);
+	result = __rtskb_fifo_remove(fifo);
+	rtdm_lock_put_irqrestore(&fifo->read_lock, context);
+
+	return result;
+}
+
+static inline struct rtskb *rtskb_fifo_remove_inirq(struct rtskb_fifo *fifo)
+{
+	struct rtskb *result;
+
+	rtdm_lock_get(&fifo->read_lock);
+	result = __rtskb_fifo_remove(fifo);
+	rtdm_lock_put(&fifo->read_lock);
+
+	return result;
+}
+
+/* for now inlined... */
+static inline void rtskb_fifo_init(struct rtskb_fifo *fifo, unsigned long size)
+{
+	fifo->read_pos = 0;
+	fifo->write_pos = 0;
+	fifo->size_mask = size - 1;
+	rtdm_lock_init(&fifo->read_lock);
+	rtdm_lock_init(&fifo->write_lock);
+}
+
+#endif /* __RTSKB_FIFO_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtwlan_io.h	2022-03-21 12:58:30.505878079 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_checksum.h	1970-01-01 01:00:00.000000000 +0100
+/* rtwlan_io.h
+ *
+ * Copyright (C) 2006      Daniel Gregorek <dxg@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef RTWLAN_IO
+#define RTWLAN_IO
+
+#include <rtnet_chrdev.h>
+
+#define RTWLAN_TXMODE_RAW 0
+#define RTWLAN_TXMODE_ACK 1
+#define RTWLAN_TXMODE_MCAST 2
+
+#define ENORTWLANDEV 0xff08
+
+struct rtwlan_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		struct {
+			unsigned int bitrate;
+			unsigned int channel;
+			unsigned int retry;
+			unsigned int txpower;
+			unsigned int mode;
+			unsigned int autoresponder;
+			unsigned int dropbcast;
+			unsigned int dropmcast;
+			unsigned int bbpsens;
+		} set;
+
+		struct {
+			unsigned int address;
+			unsigned int value;
+		} reg;
+
+		struct {
+			int ifindex;
+			unsigned int flags;
+			unsigned int bitrate;
+			unsigned int channel;
+			unsigned int retry;
+			unsigned int txpower;
+			unsigned int bbpsens;
+			unsigned int mode;
+			unsigned int autoresponder;
+			unsigned int dropbcast;
+			unsigned int dropmcast;
+			unsigned int rx_packets;
+			unsigned int tx_packets;
+			unsigned int tx_retry;
+		} info;
+	} args;
+};
+
+#define RTNET_IOC_TYPE_RTWLAN 8
+
+#define IOC_RTWLAN_IFINFO                                                      \
+	_IOWR(RTNET_IOC_TYPE_RTWLAN, 0 | RTNET_IOC_NODEV_PARAM,                \
+	      struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BITRATE _IOWR(RTNET_IOC_TYPE_RTWLAN, 1, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_CHANNEL _IOWR(RTNET_IOC_TYPE_RTWLAN, 2, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_TXPOWER _IOWR(RTNET_IOC_TYPE_RTWLAN, 3, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_RETRY _IOWR(RTNET_IOC_TYPE_RTWLAN, 4, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_TXMODE _IOWR(RTNET_IOC_TYPE_RTWLAN, 5, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_DROPBCAST _IOWR(RTNET_IOC_TYPE_RTWLAN, 6, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_DROPMCAST _IOWR(RTNET_IOC_TYPE_RTWLAN, 7, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_REGREAD _IOWR(RTNET_IOC_TYPE_RTWLAN, 8, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_REGWRITE _IOWR(RTNET_IOC_TYPE_RTWLAN, 9, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BBPWRITE _IOWR(RTNET_IOC_TYPE_RTWLAN, 10, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BBPREAD _IOWR(RTNET_IOC_TYPE_RTWLAN, 11, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BBPSENS _IOWR(RTNET_IOC_TYPE_RTWLAN, 12, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_AUTORESP _IOWR(RTNET_IOC_TYPE_RTWLAN, 13, struct rtwlan_cmd)
+
+#endif
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_checksum.h	2022-03-21 12:58:30.498878147 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtwlan.h	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __RTNET_CHECKSUM_H_
+#define __RTNET_CHECKSUM_H_
+
+#include <linux/string.h>
+#include <net/checksum.h>
+
+#define rtnet_csum(__buf, __len, __csum)				\
+	({								\
+		csum_partial(__buf, __len, (__force __wsum)__csum);	\
+	})
+
+#define rtnet_csum_copy(__src, __dst, __len, __csum)			\
+	({								\
+		memcpy(__dst, __src, __len);				\
+		csum_partial(__dst, __len, (__force __wsum)__csum);	\
+	})
+
+#endif /* !__RTNET_CHECKSUM_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtwlan.h	2022-03-21 12:58:30.491878215 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtdev.h	1970-01-01 01:00:00.000000000 +0100
+/* rtwlan.h
+ *
+ * This file is a rtnet adaption from ieee80211/ieee80211.h used by the
+ * rt2x00-2.0.0-b3 sourceforge project
+ *
+ * Merged with mainline ieee80211.h in Aug 2004.  Original ieee802_11
+ * remains copyright by the original authors
+ *
+ * Portions of the merged code are based on Host AP (software wireless
+ * LAN access point) driver for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ * Copyright (c) 2004-2005, Intel Corporation
+ *
+ * Adaption to rtnet
+ * Copyright (c) 2006, Daniel Gregorek <dxg@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef RTWLAN_H
+#define RTWLAN_H
+
+#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/kernel.h> /* ARRAY_SIZE */
+
+#include <rtskb.h>
+#include <rtwlan_io.h>
+
+#define IEEE80211_1ADDR_LEN 10
+#define IEEE80211_2ADDR_LEN 16
+#define IEEE80211_3ADDR_LEN 24
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_FCS_LEN 4
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+
+/* Frame control field constants */
+#define IEEE80211_FCTL_VERS 0x0003
+#define IEEE80211_FCTL_FTYPE 0x000c
+#define IEEE80211_FCTL_STYPE 0x00f0
+#define IEEE80211_FCTL_TODS 0x0100
+#define IEEE80211_FCTL_FROMDS 0x0200
+#define IEEE80211_FCTL_MOREFRAGS 0x0400
+#define IEEE80211_FCTL_RETRY 0x0800
+#define IEEE80211_FCTL_PM 0x1000
+#define IEEE80211_FCTL_MOREDATA 0x2000
+#define IEEE80211_FCTL_PROTECTED 0x4000
+#define IEEE80211_FCTL_ORDER 0x8000
+
+#define IEEE80211_FTYPE_MGMT 0x0000
+#define IEEE80211_FTYPE_CTL 0x0004
+#define IEEE80211_FTYPE_DATA 0x0008
+
+/* management */
+#define IEEE80211_STYPE_ASSOC_REQ 0x0000
+#define IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define IEEE80211_STYPE_REASSOC_REQ 0x0020
+#define IEEE80211_STYPE_REASSOC_RESP 0x0030
+#define IEEE80211_STYPE_PROBE_REQ 0x0040
+#define IEEE80211_STYPE_PROBE_RESP 0x0050
+#define IEEE80211_STYPE_BEACON 0x0080
+#define IEEE80211_STYPE_ATIM 0x0090
+#define IEEE80211_STYPE_DISASSOC 0x00A0
+#define IEEE80211_STYPE_AUTH 0x00B0
+#define IEEE80211_STYPE_DEAUTH 0x00C0
+#define IEEE80211_STYPE_ACTION 0x00D0
+
+/* control */
+#define IEEE80211_STYPE_PSPOLL 0x00A0
+#define IEEE80211_STYPE_RTS 0x00B0
+#define IEEE80211_STYPE_CTS 0x00C0
+#define IEEE80211_STYPE_ACK 0x00D0
+#define IEEE80211_STYPE_CFEND 0x00E0
+#define IEEE80211_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define IEEE80211_STYPE_DATA 0x0000
+#define IEEE80211_STYPE_DATA_CFACK 0x0010
+#define IEEE80211_STYPE_DATA_CFPOLL 0x0020
+#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
+#define IEEE80211_STYPE_NULLFUNC 0x0040
+#define IEEE80211_STYPE_CFACK 0x0050
+#define IEEE80211_STYPE_CFPOLL 0x0060
+#define IEEE80211_STYPE_CFACKPOLL 0x0070
+#define IEEE80211_STYPE_QOS_DATA 0x0080
+
+#define RTWLAN_SCTL_SEQ 0xFFF0
+
+#define WLAN_FC_GET_VERS(fc) ((fc)&IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc)&IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc)&IEEE80211_FCTL_STYPE)
+
+#define IEEE80211_DSSS_RATE_1MB 0x02
+#define IEEE80211_DSSS_RATE_2MB 0x04
+#define IEEE80211_DSSS_RATE_5MB 0x0B
+#define IEEE80211_DSSS_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ARG(x)                                                             \
+	((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3],        \
+		((u8 *)(x))[4], ((u8 *)(x))[5]
+
+#ifdef CONFIG_RTWLAN_DEBUG
+#define RTWLAN_DEBUG_PRINTK(__message...)                                      \
+	do {                                                                   \
+		rtdm_printk(__message);                                        \
+	} while (0)
+#define RTWLAN_DEBUG(__message, __args...)                                     \
+	RTWLAN_DEBUG_PRINTK(KERN_DEBUG "rtwlan->%s: Debug - " __message,       \
+			    __FUNCTION__, ##__args);
+#else
+#define RTWLAN_DEBUG(__message...)                                             \
+	do {                                                                   \
+	} while (0)
+#endif
+
+struct rtwlan_stats {
+	unsigned long rx_packets; /* total packets received	*/
+	unsigned long tx_packets; /* total packets transmitted	*/
+	unsigned long tx_retry; /* total packets transmitted with retry */
+};
+
+struct rtwlan_device {
+	struct rtwlan_stats stats;
+
+	struct rtskb_pool skb_pool;
+
+	int mode;
+
+	int (*hard_start_xmit)(struct rtskb *rtskb,
+			       struct rtnet_device *rtnet_dev);
+
+	/* This must be the last item */
+	u8 priv[0];
+};
+
+/* Minimal header; can be used for passing 802.11 frames with sufficient
+ * information to determine what type of underlying data type is actually
+ * stored in the data. */
+struct ieee80211_hdr {
+	u16 frame_ctl;
+	u16 duration_id;
+	u8 payload[0];
+} __attribute__((packed));
+
+struct ieee80211_hdr_3addr {
+	u16 frame_ctl;
+	u16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	u16 seq_ctl;
+	u8 payload[0];
+} __attribute__((packed));
+
+static inline int ieee80211_get_hdrlen(u16 fc)
+{
+	int hdrlen = IEEE80211_3ADDR_LEN;
+	u16 stype = WLAN_FC_GET_STYPE(fc);
+
+	switch (WLAN_FC_GET_TYPE(fc)) {
+	case IEEE80211_FTYPE_DATA:
+		if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
+			hdrlen = IEEE80211_4ADDR_LEN;
+		if (stype & IEEE80211_STYPE_QOS_DATA)
+			hdrlen += 2;
+		break;
+
+	case IEEE80211_FTYPE_CTL:
+		switch (WLAN_FC_GET_STYPE(fc)) {
+		case IEEE80211_STYPE_CTS:
+		case IEEE80211_STYPE_ACK:
+			hdrlen = IEEE80211_1ADDR_LEN;
+			break;
+
+		default:
+			hdrlen = IEEE80211_2ADDR_LEN;
+			break;
+		}
+		break;
+	}
+
+	return hdrlen;
+}
+
+static inline int ieee80211_is_ofdm_rate(u8 rate)
+{
+	switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+	case IEEE80211_OFDM_RATE_6MB:
+	case IEEE80211_OFDM_RATE_9MB:
+	case IEEE80211_OFDM_RATE_12MB:
+	case IEEE80211_OFDM_RATE_18MB:
+	case IEEE80211_OFDM_RATE_24MB:
+	case IEEE80211_OFDM_RATE_36MB:
+	case IEEE80211_OFDM_RATE_48MB:
+	case IEEE80211_OFDM_RATE_54MB:
+		return 1;
+	}
+	return 0;
+}
+
+static inline int ieee80211_is_dsss_rate(u8 rate)
+{
+	switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+	case IEEE80211_DSSS_RATE_1MB:
+	case IEEE80211_DSSS_RATE_2MB:
+	case IEEE80211_DSSS_RATE_5MB:
+	case IEEE80211_DSSS_RATE_11MB:
+		return 1;
+	}
+	return 0;
+}
+
+static inline void *rtwlan_priv(struct rtwlan_device *rtwlan_dev)
+{
+	return (void *)rtwlan_dev + sizeof(struct rtwlan_device);
+}
+
+struct rtnet_device *rtwlan_alloc_dev(unsigned sizeof_priv,
+				      unsigned dev_pool_size);
+int rtwlan_rx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev);
+int rtwlan_tx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTWLAN
+int __init rtwlan_init(void);
+void rtwlan_exit(void);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTWLAN */
+#define rtwlan_init() 0
+#define rtwlan_exit()
+#endif /* CONFIG_XENO_DRIVERS_NET_RTWLAN */
+
+#endif
+++ linux-patched/drivers/xenomai/net/stack/include/rtdev.h	2022-03-21 12:58:30.483878293 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtdev.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTDEV_H_
+#define __RTDEV_H_
+
+#define MAX_RT_DEVICES 8
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <linux/netdevice.h>
+
+#include <rtskb.h>
+#include <rtnet_internal.h>
+
+#define RTDEV_VERS_2_0 0x0200
+
+#define PRIV_FLAG_UP 0
+#define PRIV_FLAG_ADDING_ROUTE 1
+
+#ifndef NETIF_F_LLTX
+#define NETIF_F_LLTX 4096
+#endif
+
+#define RTDEV_TX_OK 0
+#define RTDEV_TX_BUSY 1
+
+enum rtnet_link_state {
+	__RTNET_LINK_STATE_XOFF = 0,
+	__RTNET_LINK_STATE_START,
+	__RTNET_LINK_STATE_PRESENT,
+	__RTNET_LINK_STATE_NOCARRIER,
+};
+#define RTNET_LINK_STATE_XOFF (1 << __RTNET_LINK_STATE_XOFF)
+#define RTNET_LINK_STATE_START (1 << __RTNET_LINK_STATE_START)
+#define RTNET_LINK_STATE_PRESENT (1 << __RTNET_LINK_STATE_PRESENT)
+#define RTNET_LINK_STATE_NOCARRIER (1 << __RTNET_LINK_STATE_NOCARRIER)
+
+/***
+ *  rtnet_device
+ */
+struct rtnet_device {
+	/* Many field are borrowed from struct net_device in
+     * <linux/netdevice.h> - WY
+     */
+	unsigned int vers;
+
+	char name[IFNAMSIZ];
+	struct device *sysbind; /* device bound in sysfs (optional) */
+
+	unsigned long rmem_end; /* shmem "recv" end     */
+	unsigned long rmem_start; /* shmem "recv" start   */
+	unsigned long mem_end; /* shared mem end       */
+	unsigned long mem_start; /* shared mem start     */
+	unsigned long base_addr; /* device I/O address   */
+	unsigned int irq; /* device IRQ number    */
+
+	/*
+     *  Some hardware also needs these fields, but they are not
+     *  part of the usual set specified in Space.c.
+     */
+	unsigned char if_port; /* Selectable AUI, TP,..*/
+	unsigned char dma; /* DMA channel          */
+	__u16 __padding;
+
+	unsigned long link_state;
+	int ifindex;
+	atomic_t refcount;
+
+	struct device *sysdev; /* node in driver model for sysfs */
+	struct module *rt_owner; /* like classic owner, but      *
+				     * forces correct macro usage   */
+
+	unsigned int flags; /* interface flags (a la BSD)   */
+	unsigned long priv_flags; /* internal flags               */
+	unsigned short type; /* interface hardware type      */
+	unsigned short hard_header_len; /* hardware hdr length  */
+	unsigned int mtu; /* eth = 1536, tr = 4...        */
+	void *priv; /* pointer to private data      */
+	netdev_features_t features; /* [RT]NETIF_F_*                */
+
+	/* Interface address info. */
+	unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+	unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address   */
+	unsigned char addr_len; /* hardware address length      */
+
+	int promiscuity;
+	int allmulti;
+
+	__u32 local_ip; /* IP address in network order  */
+	__u32 broadcast_ip; /* broadcast IP in network order */
+
+	rtdm_event_t *stack_event;
+
+	rtdm_mutex_t xmit_mutex; /* protects xmit routine        */
+	rtdm_lock_t rtdev_lock; /* management lock              */
+	struct mutex nrt_lock; /* non-real-time locking        */
+
+	unsigned int add_rtskbs; /* additionally allocated global rtskbs */
+
+	struct rtskb_pool dev_pool;
+
+	/* RTmac related fields */
+	struct rtmac_disc *mac_disc;
+	struct rtmac_priv *mac_priv;
+	int (*mac_detach)(struct rtnet_device *rtdev);
+
+	/* Device operations */
+	int (*open)(struct rtnet_device *rtdev);
+	int (*stop)(struct rtnet_device *rtdev);
+	int (*hard_header)(struct rtskb *, struct rtnet_device *,
+			   unsigned short type, void *daddr, void *saddr,
+			   unsigned int len);
+	int (*rebuild_header)(struct rtskb *);
+	int (*hard_start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+	int (*hw_reset)(struct rtnet_device *rtdev);
+
+	/* Transmission hook, managed by the stack core, RTcap, and RTmac
+     *
+     * If xmit_lock is used, start_xmit points either to rtdev_locked_xmit or
+     * the RTmac discipline handler. If xmit_lock is not required, start_xmit
+     * points to hard_start_xmit or the discipline handler.
+     */
+	int (*start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+
+	/* MTU hook, managed by the stack core and RTmac */
+	unsigned int (*get_mtu)(struct rtnet_device *rtdev,
+				unsigned int priority);
+
+	int (*do_ioctl)(struct rtnet_device *rtdev, struct ifreq *ifr, int cmd);
+	struct net_device_stats *(*get_stats)(struct rtnet_device *rtdev);
+
+	/* DMA pre-mapping hooks */
+	dma_addr_t (*map_rtskb)(struct rtnet_device *rtdev, struct rtskb *skb);
+	void (*unmap_rtskb)(struct rtnet_device *rtdev, struct rtskb *skb);
+};
+
+struct rtnet_core_cmd;
+
+struct rtdev_event_hook {
+	struct list_head entry;
+	void (*register_device)(struct rtnet_device *rtdev);
+	void (*unregister_device)(struct rtnet_device *rtdev);
+	void (*ifup)(struct rtnet_device *rtdev, struct rtnet_core_cmd *up_cmd);
+	void (*ifdown)(struct rtnet_device *rtdev);
+};
+
+extern struct list_head event_hook_list;
+extern struct mutex rtnet_devices_nrt_lock;
+extern struct rtnet_device *rtnet_devices[];
+
+int __rt_init_etherdev(struct rtnet_device *rtdev, unsigned int dev_pool_size,
+		       struct module *module);
+
+#define rt_init_etherdev(__rtdev, __dev_pool_size)                             \
+	__rt_init_etherdev(__rtdev, __dev_pool_size, THIS_MODULE)
+
+struct rtnet_device *__rt_alloc_etherdev(unsigned sizeof_priv,
+					 unsigned dev_pool_size,
+					 struct module *module);
+#define rt_alloc_etherdev(priv_size, rx_size)                                  \
+	__rt_alloc_etherdev(priv_size, rx_size, THIS_MODULE)
+
+void rtdev_destroy(struct rtnet_device *rtdev);
+
+void rtdev_free(struct rtnet_device *rtdev);
+
+int rt_register_rtnetdev(struct rtnet_device *rtdev);
+int rt_unregister_rtnetdev(struct rtnet_device *rtdev);
+
+void rtdev_add_event_hook(struct rtdev_event_hook *hook);
+void rtdev_del_event_hook(struct rtdev_event_hook *hook);
+
+void rtdev_alloc_name(struct rtnet_device *rtdev, const char *name_mask);
+
+/**
+ *  __rtdev_get_by_index - find a rtnet_device by its ifindex
+ *  @ifindex: index of device
+ *  @note: caller must hold rtnet_devices_nrt_lock
+ */
+static inline struct rtnet_device *__rtdev_get_by_index(int ifindex)
+{
+	return rtnet_devices[ifindex - 1];
+}
+
+struct rtnet_device *rtdev_get_by_name(const char *if_name);
+struct rtnet_device *rtdev_get_by_index(int ifindex);
+struct rtnet_device *rtdev_get_by_hwaddr(unsigned short type, char *ha);
+struct rtnet_device *rtdev_get_loopback(void);
+
+int rtdev_reference(struct rtnet_device *rtdev);
+
+static inline void rtdev_dereference(struct rtnet_device *rtdev)
+{
+	smp_mb__before_atomic();
+	if (rtdev->rt_owner && atomic_dec_and_test(&rtdev->refcount))
+		module_put(rtdev->rt_owner);
+}
+
+int rtdev_xmit(struct rtskb *skb);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+int rtdev_xmit_proxy(struct rtskb *skb);
+#endif
+
+unsigned int rt_hard_mtu(struct rtnet_device *rtdev, unsigned int priority);
+
+int rtdev_open(struct rtnet_device *rtdev);
+int rtdev_close(struct rtnet_device *rtdev);
+
+int rtdev_up(struct rtnet_device *rtdev, struct rtnet_core_cmd *cmd);
+int rtdev_down(struct rtnet_device *rtdev);
+
+int rtdev_map_rtskb(struct rtskb *skb);
+void rtdev_unmap_rtskb(struct rtskb *skb);
+
+struct rtskb *rtnetdev_alloc_rtskb(struct rtnet_device *dev, unsigned int size);
+
+#define rtnetdev_priv(dev) ((dev)->priv)
+
+#define rtdev_emerg(__dev, format, args...)                                    \
+	pr_emerg("%s: " format, (__dev)->name, ##args)
+#define rtdev_alert(__dev, format, args...)                                    \
+	pr_alert("%s: " format, (__dev)->name, ##args)
+#define rtdev_crit(__dev, format, args...)                                     \
+	pr_crit("%s: " format, (__dev)->name, ##args)
+#define rtdev_err(__dev, format, args...)                                      \
+	pr_err("%s: " format, (__dev)->name, ##args)
+#define rtdev_warn(__dev, format, args...)                                     \
+	pr_warn("%s: " format, (__dev)->name, ##args)
+#define rtdev_notice(__dev, format, args...)                                   \
+	pr_notice("%s: " format, (__dev)->name, ##args)
+#define rtdev_info(__dev, format, args...)                                     \
+	pr_info("%s: " format, (__dev)->name, ##args)
+#define rtdev_dbg(__dev, format, args...)                                      \
+	pr_debug("%s: " format, (__dev)->name, ##args)
+
+#ifdef VERBOSE_DEBUG
+#define rtdev_vdbg rtdev_dbg
+#else
+#define rtdev_vdbg(__dev, format, args...)                                     \
+	({                                                                     \
+		if (0)                                                         \
+			pr_debug("%s: " format, (__dev)->name, ##args);        \
+                                                                               \
+		0;                                                             \
+	})
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTDEV_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h	2022-03-21 12:58:30.476878362 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/rtmac_proto.h
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTMAC_PROTO_H_
+#define __RTMAC_PROTO_H_
+
+#include <stack_mgr.h>
+
+#define RTMAC_VERSION 0x02
+#define ETH_RTMAC 0x9021
+
+#define RTMAC_FLAG_TUNNEL 0x01
+
+struct rtmac_hdr {
+	u16 type;
+	u8 ver;
+	u8 flags;
+} __attribute__((packed));
+
+static inline int rtmac_add_header(struct rtnet_device *rtdev, void *daddr,
+				   struct rtskb *skb, u16 type, u8 flags)
+{
+	struct rtmac_hdr *hdr =
+		(struct rtmac_hdr *)rtskb_push(skb, sizeof(struct rtmac_hdr));
+
+	hdr->type = htons(type);
+	hdr->ver = RTMAC_VERSION;
+	hdr->flags = flags;
+
+	skb->rtdev = rtdev;
+
+	if (rtdev->hard_header &&
+	    (rtdev->hard_header(skb, rtdev, ETH_RTMAC, daddr, rtdev->dev_addr,
+				skb->len) < 0))
+		return -1;
+
+	return 0;
+}
+
+static inline int rtmac_xmit(struct rtskb *skb)
+{
+	struct rtnet_device *rtdev = skb->rtdev;
+	int ret;
+
+	ret = rtdev->hard_start_xmit(skb, rtdev);
+	if (ret != 0)
+		kfree_rtskb(skb);
+
+	return ret;
+}
+
+extern struct rtpacket_type rtmac_packet_type;
+
+#define rtmac_proto_init() rtdev_add_pack(&rtmac_packet_type)
+void rtmac_proto_release(void);
+
+#endif /* __RTMAC_PROTO_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h	2022-03-21 12:58:30.468878440 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/tdma/tdma_worker.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_WORKER_H_
+#define __TDMA_WORKER_H_
+
+#include <rtdm/driver.h>
+
+#define DEF_WORKER_PRIO RTDM_TASK_HIGHEST_PRIORITY
+
+void tdma_worker(void *arg);
+
+#endif /* __TDMA_WORKER_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h	2022-03-21 12:58:30.461878508 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/tdma/tdma_dev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_DEV_H_
+#define __TDMA_DEV_H_
+
+#include <rtmac/tdma/tdma.h>
+
+int tdma_dev_init(struct rtnet_device *rtdev, struct tdma_priv *tdma);
+
+static inline void tdma_dev_release(struct tdma_priv *tdma)
+{
+	rtdm_dev_unregister(&tdma->api_device);
+}
+
+#endif /* __TDMA_DEV_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h	2022-03-21 12:58:30.454878576 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/tdma/tdma_ioctl.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_IOCTL_H_
+#define __TDMA_IOCTL_H_
+
+#include <rtmac/tdma/tdma.h>
+
+int tdma_cleanup_slot(struct tdma_priv *tdma, struct tdma_slot *slot);
+
+int tdma_ioctl(struct rtnet_device *rtdev, unsigned int request,
+	       unsigned long arg);
+
+#endif /* __TDMA_IOCTL_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h	2022-03-21 12:58:30.446878654 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/tdma/tdma.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_H_
+#define __TDMA_H_
+
+#include <rtdm/driver.h>
+
+#include <rtnet_rtpc.h>
+#include <rtmac/rtmac_disc.h>
+
+#define RTMAC_TYPE_TDMA 0x0001
+
+#define TDMA_MAGIC 0x3A0D4D0A
+
+#define TDMA_FLAG_CALIBRATED 1
+#define TDMA_FLAG_RECEIVED_SYNC 2
+#define TDMA_FLAG_MASTER 3 /* also set for backup masters */
+#define TDMA_FLAG_BACKUP_MASTER 4
+#define TDMA_FLAG_ATTACHED 5
+#define TDMA_FLAG_BACKUP_ACTIVE 6
+
+#define DEFAULT_SLOT 0
+#define DEFAULT_NRT_SLOT 1
+
+/* job IDs */
+#define WAIT_ON_SYNC -1
+#define XMIT_SYNC -2
+#define BACKUP_SYNC -3
+#define XMIT_REQ_CAL -4
+#define XMIT_RPL_CAL -5
+
+struct tdma_priv;
+
+struct tdma_job {
+	struct list_head entry;
+	int id;
+	unsigned int ref_count;
+};
+
+#define SLOT_JOB(job) ((struct tdma_slot *)(job))
+
+struct tdma_slot {
+	struct tdma_job head;
+
+	u64 offset;
+	unsigned int period;
+	unsigned int phasing;
+	unsigned int mtu;
+	unsigned int size;
+	struct rtskb_prio_queue *queue;
+	struct rtskb_prio_queue local_queue;
+};
+
+#define REQUEST_CAL_JOB(job) ((struct tdma_request_cal *)(job))
+
+struct tdma_request_cal {
+	struct tdma_job head;
+
+	struct tdma_priv *tdma;
+	u64 offset;
+	unsigned int period;
+	unsigned int phasing;
+	unsigned int cal_rounds;
+	u64 *cal_results;
+	u64 *result_buffer;
+};
+
+#define REPLY_CAL_JOB(job) ((struct tdma_reply_cal *)(job))
+
+struct tdma_reply_cal {
+	struct tdma_job head;
+
+	u32 reply_cycle;
+	u64 reply_offset;
+	struct rtskb *reply_rtskb;
+};
+
+struct tdma_priv {
+	unsigned int magic;
+	struct rtnet_device *rtdev;
+	char device_name[32];
+	struct rtdm_driver api_driver;
+	struct rtdm_device api_device;
+
+#ifdef ALIGN_RTOS_TASK
+	__u8 __align[(ALIGN_RTOS_TASK -
+		      ((sizeof(unsigned int) + sizeof(struct rtnet_device *) +
+			sizeof(struct rtdm_device)) &
+		       (ALIGN_RTOS_TASK - 1))) &
+		     (ALIGN_RTOS_TASK - 1)];
+#endif
+	rtdm_task_t worker_task;
+	rtdm_event_t worker_wakeup;
+	rtdm_event_t xmit_event;
+	rtdm_event_t sync_event;
+
+	unsigned long flags;
+	unsigned int cal_rounds;
+	u32 current_cycle;
+	u64 current_cycle_start;
+	u64 master_packet_delay_ns;
+	nanosecs_rel_t clock_offset;
+
+	struct tdma_job sync_job;
+	struct tdma_job *first_job;
+	struct tdma_job *current_job;
+	volatile unsigned int job_list_revision;
+
+	unsigned int max_slot_id;
+	struct tdma_slot **slot_table;
+
+	struct rt_proc_call *calibration_call;
+	unsigned char master_hw_addr[MAX_ADDR_LEN];
+
+	rtdm_lock_t lock;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	struct rtskb_pool cal_rtskb_pool;
+	u64 cycle_period;
+	u64 backup_sync_inc;
+#endif
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct list_head list_entry;
+#endif
+};
+
+extern struct rtmac_disc tdma_disc;
+
+#define print_jobs()                                                           \
+	do {                                                                   \
+		struct tdma_job *entry;                                        \
+		rtdm_printk("%s:%d - ", __FUNCTION__, __LINE__);               \
+		list_for_each_entry (entry, &tdma->first_job->entry, entry)    \
+			rtdm_printk("%d ", entry->id);                         \
+		rtdm_printk("\n");                                             \
+	} while (0)
+
+#endif /* __TDMA_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h	2022-03-21 12:58:30.439878722 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/tdma/tdma_proto.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_PROTO_H_
+#define __TDMA_PROTO_H_
+
+#include <rtdev.h>
+
+#include <rtmac/tdma/tdma.h>
+
+#define TDMA_FRM_VERSION 0x0201
+
+#define TDMA_FRM_SYNC 0x0000
+#define TDMA_FRM_REQ_CAL 0x0010
+#define TDMA_FRM_RPL_CAL 0x0011
+
+struct tdma_frm_head {
+	u16 version;
+	u16 id;
+} __attribute__((packed));
+
+#define SYNC_FRM(head) ((struct tdma_frm_sync *)(head))
+
+struct tdma_frm_sync {
+	struct tdma_frm_head head;
+	u32 cycle_no;
+	u64 xmit_stamp;
+	u64 sched_xmit_stamp;
+} __attribute__((packed));
+
+#define REQ_CAL_FRM(head) ((struct tdma_frm_req_cal *)(head))
+
+struct tdma_frm_req_cal {
+	struct tdma_frm_head head;
+	u64 xmit_stamp;
+	u32 reply_cycle;
+	u64 reply_slot_offset;
+} __attribute__((packed));
+
+#define RPL_CAL_FRM(head) ((struct tdma_frm_rpl_cal *)(head))
+
+struct tdma_frm_rpl_cal {
+	struct tdma_frm_head head;
+	u64 request_xmit_stamp;
+	u64 reception_stamp;
+	u64 xmit_stamp;
+} __attribute__((packed));
+
+void tdma_xmit_sync_frame(struct tdma_priv *tdma);
+int tdma_xmit_request_cal_frame(struct tdma_priv *tdma, u32 reply_cycle,
+				u64 reply_slot_offset);
+
+int tdma_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev);
+int tdma_nrt_packet_tx(struct rtskb *rtskb);
+
+int tdma_packet_rx(struct rtskb *rtskb);
+
+unsigned int tdma_get_mtu(struct rtnet_device *rtdev, unsigned int priority);
+
+#endif /* __TDMA_PROTO_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h	2022-03-21 12:58:30.431878800 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/rtmac_disc.h
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTMAC_DISC_H_
+#define __RTMAC_DISC_H_
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+#include <rtdev.h>
+#include <rtnet_chrdev.h>
+
+#define RTMAC_NO_VNIC NULL
+#define RTMAC_DEFAULT_VNIC rtmac_vnic_xmit
+
+typedef int (*vnic_xmit_handler)(struct sk_buff *skb, struct net_device *dev);
+
+struct rtmac_priv {
+	int (*orig_start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+	struct net_device *vnic;
+	struct net_device_stats vnic_stats;
+	struct rtskb_pool vnic_skb_pool;
+	unsigned int vnic_max_mtu;
+
+	u8 disc_priv[0] __attribute__((aligned(16)));
+};
+
+struct rtmac_proc_entry {
+	const char *name;
+	int (*handler)(struct xnvfile_regular_iterator *it, void *data);
+	struct xnvfile_regular vfile;
+};
+
+struct rtmac_disc {
+	struct list_head list;
+
+	const char *name;
+	unsigned int priv_size; /* size of rtmac_priv.disc_priv */
+	u16 disc_type;
+
+	int (*packet_rx)(struct rtskb *skb);
+	/* rt_packet_tx prototype must be compatible with hard_start_xmit */
+	int (*rt_packet_tx)(struct rtskb *skb, struct rtnet_device *dev);
+	int (*nrt_packet_tx)(struct rtskb *skb);
+
+	unsigned int (*get_mtu)(struct rtnet_device *rtdev,
+				unsigned int priority);
+
+	vnic_xmit_handler vnic_xmit;
+
+	int (*attach)(struct rtnet_device *rtdev, void *disc_priv);
+	int (*detach)(struct rtnet_device *rtdev, void *disc_priv);
+
+	struct rtnet_ioctls ioctls;
+
+	struct rtmac_proc_entry *proc_entries;
+	unsigned nr_proc_entries;
+
+	struct module *owner;
+};
+
+int rtmac_disc_attach(struct rtnet_device *rtdev, struct rtmac_disc *disc);
+int rtmac_disc_detach(struct rtnet_device *rtdev);
+
+int __rtmac_disc_register(struct rtmac_disc *disc, struct module *module);
+#define rtmac_disc_register(disc) __rtmac_disc_register(disc, THIS_MODULE)
+
+void rtmac_disc_deregister(struct rtmac_disc *disc);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_disciplines_show(struct xnvfile_regular_iterator *it, void *d);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+#endif /* __RTMAC_DISC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h	2022-03-21 12:58:30.424878869 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/nomac/nomac_ioctl.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_IOCTL_H_
+#define __NOMAC_IOCTL_H_
+
+int nomac_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		unsigned long arg);
+
+#endif /* __NOMAC_IOCTL_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h	2022-03-21 12:58:30.416878947 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/nomac/nomac_dev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_DEV_H_
+#define __NOMAC_DEV_H_
+
+#include <rtmac/nomac/nomac.h>
+
+int nomac_dev_init(struct rtnet_device *rtdev, struct nomac_priv *nomac);
+
+static inline void nomac_dev_release(struct nomac_priv *nomac)
+{
+	rtdm_dev_unregister(&nomac->api_device);
+}
+
+#endif /* __NOMAC_DEV_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h	2022-03-21 12:58:30.409879015 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/nomac/nomac_proto.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_PROTO_H_
+#define __NOMAC_PROTO_H_
+
+#include <rtdev.h>
+
+int nomac_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev);
+int nomac_nrt_packet_tx(struct rtskb *rtskb);
+
+int nomac_packet_rx(struct rtskb *rtskb);
+
+int nomac_proto_init(void);
+void nomac_proto_cleanup(void);
+
+#endif /* __NOMAC_PROTO_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h	2022-03-21 12:58:30.401879093 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/nomac/nomac.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_H_
+#define __NOMAC_H_
+
+#include <rtdm/driver.h>
+
+#include <rtmac/rtmac_disc.h>
+
+#define RTMAC_TYPE_NOMAC 0
+
+#define NOMAC_MAGIC 0x004D0A0C
+
+struct nomac_priv {
+	unsigned int magic;
+	struct rtnet_device *rtdev;
+	char device_name[32];
+	struct rtdm_driver api_driver;
+	struct rtdm_device api_device;
+	/* ... */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct list_head list_entry;
+#endif
+};
+
+extern struct rtmac_disc nomac_disc;
+
+#endif /* __NOMAC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h	2022-03-21 12:58:30.394879161 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtmac/rtmac_proc.h
+ *
+ *  rtmac - real-time networking medium access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTMAC_PROC_H_
+#define __RTMAC_PROC_H_
+
+int rtmac_disc_proc_register(struct rtmac_disc *disc);
+void rtmac_disc_proc_unregister(struct rtmac_disc *disc);
+
+int rtmac_proc_register(void);
+void rtmac_proc_release(void);
+
+#endif /* __RTMAC_PROC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h	2022-03-21 12:58:30.386879239 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_rtpc.h	1970-01-01 01:00:00.000000000 +0100
+/* include/rtmac/rtmac_vnic.h
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *               2003 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __RTMAC_VNIC_H_
+#define __RTMAC_VNIC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/netdevice.h>
+
+#include <rtmac/rtmac_disc.h>
+
+#define DEFAULT_VNIC_RTSKBS 32
+
+int rtmac_vnic_rx(struct rtskb *skb, u16 type);
+
+int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void rtmac_vnic_set_max_mtu(struct rtnet_device *rtdev, unsigned int max_mtu);
+
+int rtmac_vnic_add(struct rtnet_device *rtdev, vnic_xmit_handler vnic_xmit);
+int rtmac_vnic_unregister(struct rtnet_device *rtdev);
+
+static inline void rtmac_vnic_cleanup(struct rtnet_device *rtdev)
+{
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+
+	rtskb_pool_release(&mac_priv->vnic_skb_pool);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_vnics_show(struct xnvfile_regular_iterator *it, void *data);
+#endif
+
+int __init rtmac_vnic_module_init(void);
+void rtmac_vnic_module_cleanup(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTMAC_VNIC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_rtpc.h	2022-03-21 12:58:30.379879307 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtnet_chrdev.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtnet_rtpc.h
+ *
+ *  RTnet - real-time networking subsystem
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_RTPC_H_
+#define __RTNET_RTPC_H_
+
+#include <linux/init.h>
+
+#include <rtnet_internal.h>
+
+struct rt_proc_call;
+
+typedef int (*rtpc_proc)(struct rt_proc_call *call);
+typedef void (*rtpc_copy_back_proc)(struct rt_proc_call *call, void *priv_data);
+typedef void (*rtpc_cleanup_proc)(void *priv_data);
+
+struct rt_proc_call {
+	struct list_head list_entry;
+	int processed;
+	rtpc_proc proc;
+	int result;
+	atomic_t ref_count;
+	wait_queue_head_t call_wq;
+	rtpc_cleanup_proc cleanup_handler;
+	char priv_data[0] __attribute__((aligned(8)));
+};
+
+#define CALL_PENDING 1000 /* result value for blocked calls */
+
+int rtnet_rtpc_dispatch_call(rtpc_proc rt_proc, unsigned int timeout,
+			     void *priv_data, size_t priv_data_size,
+			     rtpc_copy_back_proc copy_back_handler,
+			     rtpc_cleanup_proc cleanup_handler);
+
+void rtnet_rtpc_complete_call(struct rt_proc_call *call, int result);
+void rtnet_rtpc_complete_call_nrt(struct rt_proc_call *call, int result);
+
+#define rtpc_dispatch_call rtnet_rtpc_dispatch_call
+#define rtpc_complete_call rtnet_rtpc_complete_call
+#define rtpc_complete_call_nrt rtnet_rtpc_complete_call_nrt
+
+#define rtpc_get_priv(call, type) (type *)(call->priv_data)
+#define rtpc_get_result(call) call->result
+#define rtpc_set_result(call, new_result) call->result = new_result
+#define rtpc_set_cleanup_handler(call, handler) call->cleanup_handler = handler;
+
+int __init rtpc_init(void);
+void rtpc_cleanup(void);
+
+#endif /* __RTNET_RTPC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtnet_chrdev.h	2022-03-21 12:58:30.371879386 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtnet_chrdev.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999    Lineo, Inc
+ *                1999,2002 David A. Schleef <ds@schleef.org>
+ *                2002 Ulrich Marx <marx@fet.uni-hannover.de>
+ *                2003,2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_CHRDEV_H_
+#define __RTNET_CHRDEV_H_
+
+#include <rtdev.h>
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+/* new extensible interface */
+struct rtnet_ioctls {
+	/* internal usage only */
+	struct list_head entry;
+	atomic_t ref_count;
+
+	/* provider specification */
+	const char *service_name;
+	unsigned int ioctl_type;
+	int (*handler)(struct rtnet_device *rtdev, unsigned int request,
+		       unsigned long arg);
+};
+
+extern int rtnet_register_ioctls(struct rtnet_ioctls *ioctls);
+extern void rtnet_unregister_ioctls(struct rtnet_ioctls *ioctls);
+
+extern int __init rtnet_chrdev_init(void);
+extern void rtnet_chrdev_release(void);
+
+#else /* ifndef __KERNEL__ */
+
+#include <net/if.h> /* IFNAMSIZ */
+#include <linux/types.h>
+
+#endif /* __KERNEL__ */
+
+#define RTNET_MINOR 240 /* user interface for /dev/rtnet */
+#define DEV_ADDR_LEN 32 /* avoids inconsistent MAX_ADDR_LEN */
+
+struct rtnet_ioctl_head {
+	char if_name[IFNAMSIZ];
+};
+
+struct rtnet_core_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		/*** rtifconfig **/
+		struct {
+			__u32 ip_addr;
+			__u32 broadcast_ip;
+			__u32 set_dev_flags;
+			__u32 clear_dev_flags;
+			__u32 dev_addr_type;
+			__u32 __padding;
+			__u8 dev_addr[DEV_ADDR_LEN];
+		} up;
+
+		struct {
+			__u32 ifindex;
+			__u32 type;
+			__u32 ip_addr;
+			__u32 broadcast_ip;
+			__u32 mtu;
+			__u32 flags;
+			__u8 dev_addr[DEV_ADDR_LEN];
+		} info;
+
+		__u64 __padding[8];
+	} args;
+};
+
+#define RTNET_IOC_NODEV_PARAM 0x80
+
+#define RTNET_IOC_TYPE_CORE 0
+#define RTNET_IOC_TYPE_RTCFG 1
+#define RTNET_IOC_TYPE_IPV4 2
+#define RTNET_IOC_TYPE_RTMAC_NOMAC 100
+#define RTNET_IOC_TYPE_RTMAC_TDMA 110
+
+#define IOC_RT_IFUP _IOW(RTNET_IOC_TYPE_CORE, 0, struct rtnet_core_cmd)
+#define IOC_RT_IFDOWN _IOW(RTNET_IOC_TYPE_CORE, 1, struct rtnet_core_cmd)
+#define IOC_RT_IFINFO                                                          \
+	_IOWR(RTNET_IOC_TYPE_CORE, 2 | RTNET_IOC_NODEV_PARAM,                  \
+	      struct rtnet_core_cmd)
+
+#endif /* __RTNET_CHRDEV_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h	2022-03-21 12:58:30.364879454 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_file.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_FILE_H_
+#define __RTCFG_FILE_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct rtcfg_file {
+	struct list_head entry;
+	int ref_count;
+	const char *name;
+	size_t size;
+	void *buffer;
+};
+
+struct rtcfg_file *rtcfg_get_file(const char *filename);
+void rtcfg_add_file(struct rtcfg_file *file);
+int rtcfg_release_file(struct rtcfg_file *file);
+
+#endif /* __RTCFG_FILE_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h	2022-03-21 12:58:30.356879532 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_H_INTERNAL_
+#define __RTCFG_H_INTERNAL_
+
+#include <rtdm/driver.h>
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+/***
+ * RTcfg debugging
+ */
+#ifdef CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG
+
+extern int rtcfg_debug;
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#define RTCFG_DEFAULT_DEBUG_LEVEL 10
+
+#define RTCFG_DEBUG(n, args...) (rtcfg_debug >= (n)) ? (rtdm_printk(args)) : 0
+#else
+#define RTCFG_DEBUG(n, args...)
+#endif /* CONFIG_RTCFG_DEBUG */
+
+#endif /* __RTCFG_H_INTERNAL_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h	2022-03-21 12:58:30.349879600 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_event.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_EVENT_H_
+#define __RTCFG_EVENT_H_
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include <rtcfg_chrdev.h>
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_rtpc.h>
+
+#define FLAG_TIMER_STARTED 16
+#define FLAG_TIMER_SHUTDOWN 17
+#define FLAG_TIMER_PENDING 18
+
+#define _FLAG_TIMER_STARTED (1 << FLAG_TIMER_STARTED)
+#define _FLAG_TIMER_SHUTDOWN (1 << FLAG_TIMER_SHUTDOWN)
+#define _FLAG_TIMER_PENDING (1 << FLAG_TIMER_PENDING)
+
+typedef enum {
+	RTCFG_MAIN_OFF,
+	RTCFG_MAIN_SERVER_RUNNING,
+	RTCFG_MAIN_CLIENT_0,
+	RTCFG_MAIN_CLIENT_1,
+	RTCFG_MAIN_CLIENT_ANNOUNCED,
+	RTCFG_MAIN_CLIENT_ALL_KNOWN,
+	RTCFG_MAIN_CLIENT_ALL_FRAMES,
+	RTCFG_MAIN_CLIENT_2,
+	RTCFG_MAIN_CLIENT_READY
+} RTCFG_MAIN_STATE;
+
+struct rtcfg_station {
+	u8 mac_addr[ETH_ALEN]; /* Ethernet-specific! */
+	u8 flags;
+};
+
+struct rtcfg_device {
+	RTCFG_MAIN_STATE state;
+	u32 other_stations;
+	u32 stations_found;
+	u32 stations_ready;
+	rtdm_mutex_t dev_mutex;
+	struct list_head event_calls;
+	rtdm_lock_t event_calls_lock;
+	rtdm_timer_t timer;
+	unsigned long flags;
+	unsigned int burstrate;
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_directory proc_entry;
+	struct xnvfile_regular proc_state_vfile;
+	struct xnvfile_regular proc_stations_vfile;
+#endif
+
+	union {
+		struct {
+			unsigned int addr_type;
+			union {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+				u32 ip_addr;
+#endif
+			} srv_addr;
+			u8 srv_mac_addr[MAX_ADDR_LEN];
+			u8 *stage2_buffer;
+			u32 cfg_len;
+			u32 cfg_offs;
+			unsigned int packet_counter;
+			u32 chain_len;
+			struct rtskb *stage2_chain;
+			u32 max_stations;
+			struct rtcfg_station *station_addr_list;
+		} clt;
+
+		struct {
+			u32 clients_configured;
+			struct list_head conn_list;
+			u16 heartbeat;
+			u64 heartbeat_timeout;
+		} srv;
+	} spec;
+};
+
+extern struct rtcfg_device device[MAX_RT_DEVICES];
+extern const char *rtcfg_event[];
+extern const char *rtcfg_main_state[];
+
+int rtcfg_do_main_event(int ifindex, RTCFG_EVENT event_id, void *event_data);
+void rtcfg_next_main_state(int ifindex, RTCFG_MAIN_STATE state);
+
+void rtcfg_queue_blocking_call(int ifindex, struct rt_proc_call *call);
+struct rt_proc_call *rtcfg_dequeue_blocking_call(int ifindex);
+void rtcfg_complete_cmd(int ifindex, RTCFG_EVENT event_id, int result);
+void rtcfg_reset_device(int ifindex);
+
+void rtcfg_init_state_machines(void);
+void rtcfg_cleanup_state_machines(void);
+
+#endif /* __RTCFG_EVENT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h	2022-03-21 12:58:30.341879678 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_client_event.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_CLIENT_EVENT_H_
+#define __RTCFG_CLIENT_EVENT_H_
+
+#include <rtcfg_chrdev.h>
+
+int rtcfg_main_state_client_0(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data);
+int rtcfg_main_state_client_1(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data);
+int rtcfg_main_state_client_announced(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data);
+int rtcfg_main_state_client_all_known(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data);
+int rtcfg_main_state_client_all_frames(int ifindex, RTCFG_EVENT event_id,
+				       void *event_data);
+int rtcfg_main_state_client_2(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data);
+int rtcfg_main_state_client_ready(int ifindex, RTCFG_EVENT event_id,
+				  void *event_data);
+
+#endif /* __RTCFG_CLIENT_EVENT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h	2022-03-21 12:58:30.334879746 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_timer.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_TIMER_H_
+#define __RTCFG_TIMER_H_
+
+void rtcfg_timer(rtdm_timer_t *t);
+
+void rtcfg_timer_run(void);
+
+void rtcfg_thread_signal(void);
+
+#endif /* __RTCFG_TIMER_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h	2022-03-21 12:58:30.327879815 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_proc.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_PROC_H_
+#define __RTCFG_PROC_H_
+
+#include <rtnet_internal.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+extern struct mutex nrt_proc_lock;
+
+void rtcfg_update_conn_proc_entries(int ifindex);
+void rtcfg_remove_conn_proc_entries(int ifindex);
+
+int rtcfg_init_proc(void);
+void rtcfg_cleanup_proc(void);
+
+static inline void rtcfg_lockwr_proc(int ifindex)
+{
+	mutex_lock(&nrt_proc_lock);
+	rtcfg_remove_conn_proc_entries(ifindex);
+}
+
+static inline void rtcfg_unlockwr_proc(int ifindex)
+{
+	rtcfg_update_conn_proc_entries(ifindex);
+	mutex_unlock(&nrt_proc_lock);
+}
+
+#else
+
+#define rtcfg_lockwr_proc(x)                                                   \
+	do {                                                                   \
+	} while (0)
+#define rtcfg_unlockwr_proc(x)                                                 \
+	do {                                                                   \
+	} while (0)
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+#endif /* __RTCFG_PROC_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h	2022-03-21 12:58:30.319879893 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_frame.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_FRAME_H_
+#define __RTCFG_FRAME_H_
+
+#include <linux/init.h>
+#include <linux/if_packet.h>
+#include <asm/byteorder.h>
+
+#include <rtcfg/rtcfg_event.h>
+
+#define ETH_RTCFG 0x9022
+
+#define RTCFG_SKB_PRIO                                                         \
+	RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+#define RTCFG_ID_STAGE_1_CFG 0
+#define RTCFG_ID_ANNOUNCE_NEW 1
+#define RTCFG_ID_ANNOUNCE_REPLY 2
+#define RTCFG_ID_STAGE_2_CFG 3
+#define RTCFG_ID_STAGE_2_CFG_FRAG 4
+#define RTCFG_ID_ACK_CFG 5
+#define RTCFG_ID_READY 6
+#define RTCFG_ID_HEARTBEAT 7
+#define RTCFG_ID_DEAD_STATION 8
+
+#define RTCFG_ADDRSIZE_MAC 0
+#define RTCFG_ADDRSIZE_IP 4
+#define RTCFG_MAX_ADDRSIZE RTCFG_ADDRSIZE_IP
+
+#define RTCFG_FLAG_STAGE_2_DATA 0
+#define RTCFG_FLAG_READY 1
+
+#define _RTCFG_FLAG_STAGE_2_DATA (1 << RTCFG_FLAG_STAGE_2_DATA)
+#define _RTCFG_FLAG_READY (1 << RTCFG_FLAG_READY)
+
+struct rtcfg_frm_head {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 id : 5;
+	u8 version : 3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	u8 version : 3;
+	u8 id : 5;
+#else
+#error unsupported byte order
+#endif
+} __attribute__((packed));
+
+struct rtcfg_frm_stage_1_cfg {
+	struct rtcfg_frm_head head;
+	u8 addr_type;
+	u8 client_addr[0];
+	u8 server_addr[0];
+	u8 burstrate;
+	u16 cfg_len;
+	u8 cfg_data[0];
+} __attribute__((packed));
+
+struct rtcfg_frm_announce {
+	struct rtcfg_frm_head head;
+	u8 addr_type;
+	u8 addr[0];
+	u8 flags;
+	u8 burstrate;
+} __attribute__((packed));
+
+struct rtcfg_frm_stage_2_cfg {
+	struct rtcfg_frm_head head;
+	u8 flags;
+	u32 stations;
+	u16 heartbeat_period;
+	u32 cfg_len;
+	u8 cfg_data[0];
+} __attribute__((packed));
+
+struct rtcfg_frm_stage_2_cfg_frag {
+	struct rtcfg_frm_head head;
+	u32 frag_offs;
+	u8 cfg_data[0];
+} __attribute__((packed));
+
+struct rtcfg_frm_ack_cfg {
+	struct rtcfg_frm_head head;
+	u32 ack_len;
+} __attribute__((packed));
+
+struct rtcfg_frm_simple {
+	struct rtcfg_frm_head head;
+} __attribute__((packed));
+
+struct rtcfg_frm_dead_station {
+	struct rtcfg_frm_head head;
+	u8 addr_type;
+	u8 logical_addr[0];
+	u8 physical_addr[32];
+} __attribute__((packed));
+
+int rtcfg_send_stage_1(struct rtcfg_connection *conn);
+int rtcfg_send_stage_2(struct rtcfg_connection *conn, int send_data);
+int rtcfg_send_stage_2_frag(struct rtcfg_connection *conn);
+int rtcfg_send_announce_new(int ifindex);
+int rtcfg_send_announce_reply(int ifindex, u8 *dest_mac_addr);
+int rtcfg_send_ack(int ifindex);
+int rtcfg_send_dead_station(struct rtcfg_connection *conn);
+
+int rtcfg_send_simple_frame(int ifindex, int frame_id, u8 *dest_addr);
+
+#define rtcfg_send_ready(ifindex)                                              \
+	rtcfg_send_simple_frame(ifindex, RTCFG_ID_READY, NULL)
+#define rtcfg_send_heartbeat(ifindex)                                          \
+	rtcfg_send_simple_frame(ifindex, RTCFG_ID_HEARTBEAT,                   \
+				device[ifindex].spec.clt.srv_mac_addr)
+
+int __init rtcfg_init_frames(void);
+void rtcfg_cleanup_frames(void);
+
+#endif /* __RTCFG_FRAME_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h	2022-03-21 12:58:30.312879961 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg/rtcfg_ioctl.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_IOCTL_H_
+#define __RTCFG_IOCTL_H_
+
+extern struct rtnet_ioctls rtcfg_ioctls;
+
+#define rtcfg_init_ioctls() rtnet_register_ioctls(&rtcfg_ioctls)
+#define rtcfg_cleanup_ioctls() rtnet_unregister_ioctls(&rtcfg_ioctls)
+
+#endif /* __RTCFG_IOCTL_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h	2022-03-21 12:58:30.304880039 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/ethernet/eth.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *	include/rtcfg/rtcfg_conn_event.h
+ *
+ *	Real-Time Configuration Distribution Protocol
+ *
+ *	Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the Free Software
+ *	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_CONN_EVENT_H_
+#define __RTCFG_CONN_EVENT_H_
+
+#include <linux/netdevice.h>
+
+#include <rtcfg_chrdev.h>
+#include <rtcfg/rtcfg_file.h>
+#include <rtnet_internal.h>
+
+typedef enum {
+	RTCFG_CONN_SEARCHING,
+	RTCFG_CONN_STAGE_1,
+	RTCFG_CONN_STAGE_2,
+	RTCFG_CONN_READY,
+	RTCFG_CONN_DEAD
+} RTCFG_CONN_STATE;
+
+struct rtcfg_connection {
+	struct list_head entry;
+	int ifindex;
+	RTCFG_CONN_STATE state;
+	u8 mac_addr[MAX_ADDR_LEN];
+	unsigned int addr_type;
+	union {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		u32 ip_addr;
+#endif
+	} addr;
+	void *stage1_data;
+	size_t stage1_size;
+	struct rtcfg_file *stage2_file;
+	u32 cfg_offs;
+	unsigned int flags;
+	unsigned int burstrate;
+	nanosecs_abs_t last_frame;
+	u64 cfg_timeout;
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_regular proc_entry;
+#endif
+};
+
+int rtcfg_do_conn_event(struct rtcfg_connection *conn, RTCFG_EVENT event_id,
+			void *event_data);
+
+#endif /* __RTCFG_CONN_EVENT_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/ethernet/eth.h	2022-03-21 12:58:30.297880107 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/include/rtcfg_chrdev.h	1970-01-01 01:00:00.000000000 +0100
+/* ethernet/eth.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_ETH_H_
+#define __RTNET_ETH_H_
+
+#include <rtskb.h>
+#include <rtdev.h>
+
+extern int rt_eth_header(struct rtskb *skb, struct rtnet_device *rtdev,
+			 unsigned short type, void *daddr, void *saddr,
+			 unsigned int len);
+extern unsigned short rt_eth_type_trans(struct rtskb *skb,
+					struct rtnet_device *dev);
+
+#endif /* __RTNET_ETH_H_ */
+++ linux-patched/drivers/xenomai/net/stack/include/rtcfg_chrdev.h	2022-03-21 12:58:30.289880185 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/packet/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  include/rtcfg.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_H_
+#define __RTCFG_H_
+
+#include <rtnet_chrdev.h>
+
+#define ERTCFG_START 0x0F00
+#define ESTAGE1SIZE ERTCFG_START
+
+#define FLAG_STAGE_2_DATA 0x0001
+#define FLAG_READY 0x0002
+#define FLAG_ASSIGN_ADDR_BY_MAC 0x0100
+
+#define RTCFG_ADDR_MAC 0x00
+#define RTCFG_ADDR_IP 0x01
+#define RTCFG_ADDR_MASK 0xFF
+
+typedef enum {
+	RTCFG_CMD_SERVER,
+	RTCFG_CMD_ADD,
+	RTCFG_CMD_DEL,
+	RTCFG_CMD_WAIT,
+	RTCFG_CMD_CLIENT,
+	RTCFG_CMD_ANNOUNCE,
+	RTCFG_CMD_READY,
+	RTCFG_CMD_DETACH,
+
+	/* internal usage only */
+	RTCFG_TIMER,
+	RTCFG_FRM_STAGE_1_CFG,
+	RTCFG_FRM_ANNOUNCE_NEW,
+	RTCFG_FRM_ANNOUNCE_REPLY,
+	RTCFG_FRM_STAGE_2_CFG,
+	RTCFG_FRM_STAGE_2_CFG_FRAG,
+	RTCFG_FRM_ACK_CFG,
+	RTCFG_FRM_READY,
+	RTCFG_FRM_HEARTBEAT,
+	RTCFG_FRM_DEAD_STATION
+} RTCFG_EVENT;
+
+struct rtskb;
+struct rtcfg_station;
+struct rtcfg_connection;
+struct rtcfg_file;
+
+struct rtcfg_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		struct {
+			__u32 period;
+			__u32 burstrate;
+			__u32 heartbeat;
+			__u32 threshold;
+			__u32 flags;
+		} server;
+
+		struct {
+			__u32 addr_type;
+			__u32 ip_addr;
+			__u8 mac_addr[DEV_ADDR_LEN];
+			__u32 timeout;
+			__u16 stage1_size;
+			__u16 __padding;
+			void *stage1_data;
+			const char *stage2_filename;
+
+			/* internal usage only */
+			struct rtcfg_connection *conn_buf;
+			struct rtcfg_file *stage2_file;
+		} add;
+
+		struct {
+			__u32 addr_type;
+			__u32 ip_addr;
+			__u8 mac_addr[DEV_ADDR_LEN];
+
+			/* internal usage only */
+			struct rtcfg_connection *conn_buf;
+			struct rtcfg_file *stage2_file;
+		} del;
+
+		struct {
+			__u32 timeout;
+		} wait;
+
+		struct {
+			__u32 timeout;
+			__u32 max_stations;
+			__u64 buffer_size;
+			void *buffer;
+
+			/* internal usage only */
+			struct rtcfg_station *station_buf;
+			struct rtskb *rtskb;
+		} client;
+
+		struct {
+			__u32 timeout;
+			__u32 flags;
+			__u32 burstrate;
+			__u32 __padding;
+			__u64 buffer_size;
+			void *buffer;
+
+			/* internal usage only */
+			struct rtskb *rtskb;
+		} announce;
+
+		struct {
+			__u32 timeout;
+		} ready;
+
+		struct {
+			/* internal usage only */
+			struct rtcfg_connection *conn_buf;
+			struct rtcfg_file *stage2_file;
+			struct rtcfg_station *station_addr_list;
+			struct rtskb *stage2_chain;
+		} detach;
+
+		__u64 __padding[16];
+	} args;
+
+	/* internal usage only */
+	union {
+		struct {
+			int ifindex;
+			RTCFG_EVENT event_id;
+		} data;
+
+		__u64 __padding[2];
+	} internal;
+};
+
+#define RTCFG_IOC_SERVER                                                       \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_SERVER, struct rtcfg_cmd)
+#define RTCFG_IOC_ADD                                                          \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_ADD, struct rtcfg_cmd)
+#define RTCFG_IOC_DEL                                                          \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_DEL, struct rtcfg_cmd)
+#define RTCFG_IOC_WAIT                                                         \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_WAIT, struct rtcfg_cmd)
+#define RTCFG_IOC_CLIENT                                                       \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_CLIENT, struct rtcfg_cmd)
+#define RTCFG_IOC_ANNOUNCE                                                     \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_ANNOUNCE, struct rtcfg_cmd)
+#define RTCFG_IOC_READY                                                        \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_READY, struct rtcfg_cmd)
+#define RTCFG_IOC_DETACH                                                       \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_DETACH, struct rtcfg_cmd)
+
+#endif /* __RTCFG_H_ */
+++ linux-patched/drivers/xenomai/net/stack/packet/Kconfig	2022-03-21 12:58:30.282880253 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/packet/af_packet.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_RTPACKET
+    depends on XENO_DRIVERS_NET
+    tristate "Real-Time Packet Socket Support"
+    default y
+    help
+    Enables real-time packet sockets for RTnet. This support is
+    implemented in a separate module. When loaded, application programs
+    can send and received so-called "cooked" packets directly at OSI layer
+    2 (device layer). This means that RTnet will still maintain the
+    device-dependent packet header but leave the full data segment to the
+    user.
+
+    Examples like raw-ethernet or netshm make use of this support. See
+    also Linux man page packet(7).
+++ linux-patched/drivers/xenomai/net/stack/packet/af_packet.c	2022-03-21 12:58:30.275880322 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/packet/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  packet/af_packet.c
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *  Copyright (C) 2006 Jorge Almeida <j-almeida@criticalsoftware.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+#include <stack_mgr.h>
+
+MODULE_LICENSE("GPL");
+
+/***
+ *  rt_packet_rcv
+ */
+static int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct rtsocket *sock =
+		container_of(pt, struct rtsocket, prot.packet.packet_type);
+	int ifindex = sock->prot.packet.ifindex;
+	void (*callback_func)(struct rtdm_fd *, void *);
+	void *callback_arg;
+	rtdm_lockctx_t context;
+
+	if (unlikely((ifindex != 0) && (ifindex != skb->rtdev->ifindex)))
+		return -EUNATCH;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+	if (pt->type == htons(ETH_P_ALL)) {
+		struct rtskb *clone_skb = rtskb_clone(skb, &sock->skb_pool);
+		if (clone_skb == NULL)
+			goto out;
+		skb = clone_skb;
+	} else
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+		if (unlikely(rtskb_acquire(skb, &sock->skb_pool) < 0)) {
+		kfree_rtskb(skb);
+		goto out;
+	}
+
+	rtskb_queue_tail(&sock->incoming, skb);
+	rtdm_sem_up(&sock->pending_sem);
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+	callback_func = sock->callback_func;
+	callback_arg = sock->callback_arg;
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	if (callback_func)
+		callback_func(rt_socket_fd(sock), callback_arg);
+
+out:
+	return 0;
+}
+
+static bool rt_packet_trylock(struct rtpacket_type *pt)
+{
+	struct rtsocket *sock =
+		container_of(pt, struct rtsocket, prot.packet.packet_type);
+	struct rtdm_fd *fd = rtdm_private_to_fd(sock);
+
+	if (rtdm_fd_lock(fd) < 0)
+		return false;
+
+	return true;
+}
+
+static void rt_packet_unlock(struct rtpacket_type *pt)
+{
+	struct rtsocket *sock =
+		container_of(pt, struct rtsocket, prot.packet.packet_type);
+	struct rtdm_fd *fd = rtdm_private_to_fd(sock);
+
+	rtdm_fd_unlock(fd);
+}
+
+/***
+ *  rt_packet_bind
+ */
+static int rt_packet_bind(struct rtdm_fd *fd, struct rtsocket *sock,
+			  const struct sockaddr *addr, socklen_t addrlen)
+{
+	struct sockaddr_ll _sll, *sll;
+	struct rtpacket_type *pt = &sock->prot.packet.packet_type;
+	int new_type;
+	int ret;
+	rtdm_lockctx_t context;
+
+	if (addrlen < sizeof(struct sockaddr_ll))
+		return -EINVAL;
+
+	sll = rtnet_get_arg(fd, &_sll, addr, sizeof(_sll));
+	if (IS_ERR(sll))
+		return PTR_ERR(sll);
+
+	if (sll->sll_family != AF_PACKET)
+		return -EINVAL;
+
+	new_type =
+		(sll->sll_protocol != 0) ? sll->sll_protocol : sock->protocol;
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+	/* release existing binding */
+	if (pt->type != 0)
+		rtdev_remove_pack(pt);
+
+	pt->type = new_type;
+	sock->prot.packet.ifindex = sll->sll_ifindex;
+
+	/* if protocol is non-zero, register the packet type */
+	if (new_type != 0) {
+		pt->handler = rt_packet_rcv;
+		pt->err_handler = NULL;
+		pt->trylock = rt_packet_trylock;
+		pt->unlock = rt_packet_unlock;
+
+		ret = rtdev_add_pack(pt);
+	} else
+		ret = 0;
+
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	return ret;
+}
+
+/***
+ *  rt_packet_getsockname
+ */
+static int rt_packet_getsockname(struct rtdm_fd *fd, struct rtsocket *sock,
+				 struct sockaddr *addr, socklen_t *addrlen)
+{
+	struct sockaddr_ll _sll, *sll;
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+	socklen_t _namelen, *namelen;
+	int ret;
+
+	namelen = rtnet_get_arg(fd, &_namelen, addrlen, sizeof(_namelen));
+	if (IS_ERR(namelen))
+		return PTR_ERR(namelen);
+
+	if (*namelen < sizeof(struct sockaddr_ll))
+		return -EINVAL;
+
+	sll = rtnet_get_arg(fd, &_sll, addr, sizeof(_sll));
+	if (IS_ERR(sll))
+		return PTR_ERR(sll);
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+	sll->sll_family = AF_PACKET;
+	sll->sll_ifindex = sock->prot.packet.ifindex;
+	sll->sll_protocol = sock->protocol;
+
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	rtdev = rtdev_get_by_index(sll->sll_ifindex);
+	if (rtdev != NULL) {
+		sll->sll_hatype = rtdev->type;
+		sll->sll_halen = rtdev->addr_len;
+		memcpy(sll->sll_addr, rtdev->dev_addr, rtdev->addr_len);
+		rtdev_dereference(rtdev);
+	} else {
+		sll->sll_hatype = 0;
+		sll->sll_halen = 0;
+	}
+
+	*namelen = sizeof(struct sockaddr_ll);
+
+	ret = rtnet_put_arg(fd, addr, sll, sizeof(*sll));
+	if (ret)
+		return ret;
+
+	return rtnet_put_arg(fd, addrlen, namelen, sizeof(*namelen));
+}
+
+/***
+ * rt_packet_socket - initialize a packet socket
+ */
+static int rt_packet_socket(struct rtdm_fd *fd, int protocol)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int ret;
+
+	if ((ret = rt_socket_init(fd, protocol)) != 0)
+		return ret;
+
+	sock->prot.packet.packet_type.type = protocol;
+	sock->prot.packet.ifindex = 0;
+	sock->prot.packet.packet_type.trylock = rt_packet_trylock;
+	sock->prot.packet.packet_type.unlock = rt_packet_unlock;
+
+	/* if protocol is non-zero, register the packet type */
+	if (protocol != 0) {
+		sock->prot.packet.packet_type.handler = rt_packet_rcv;
+		sock->prot.packet.packet_type.err_handler = NULL;
+
+		if ((ret = rtdev_add_pack(&sock->prot.packet.packet_type)) <
+		    0) {
+			rt_socket_cleanup(fd);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/***
+ *  rt_packet_close
+ */
+static void rt_packet_close(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct rtpacket_type *pt = &sock->prot.packet.packet_type;
+	struct rtskb *del;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+	if (pt->type != 0) {
+		rtdev_remove_pack(pt);
+		pt->type = 0;
+	}
+
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	/* free packets in incoming queue */
+	while ((del = rtskb_dequeue(&sock->incoming)) != NULL) {
+		kfree_rtskb(del);
+	}
+
+	rt_socket_cleanup(fd);
+}
+
+/***
+ *  rt_packet_ioctl
+ */
+static int rt_packet_ioctl(struct rtdm_fd *fd, unsigned int request,
+			   void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	const struct _rtdm_setsockaddr_args *setaddr;
+	struct _rtdm_setsockaddr_args _setaddr;
+	const struct _rtdm_getsockaddr_args *getaddr;
+	struct _rtdm_getsockaddr_args _getaddr;
+
+	/* fast path for common socket IOCTLs */
+	if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK)
+		return rt_socket_common_ioctl(fd, request, arg);
+
+	switch (request) {
+	case _RTIOC_BIND:
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		return rt_packet_bind(fd, sock, setaddr->addr,
+				      setaddr->addrlen);
+
+	case _RTIOC_GETSOCKNAME:
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+		return rt_packet_getsockname(fd, sock, getaddr->addr,
+					     getaddr->addrlen);
+
+	default:
+		return rt_socket_if_ioctl(fd, request, arg);
+	}
+}
+
+/***
+ *  rt_packet_recvmsg
+ */
+static ssize_t rt_packet_recvmsg(struct rtdm_fd *fd, struct user_msghdr *msg,
+				 int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	ssize_t len;
+	size_t copy_len;
+	struct rtskb *rtskb;
+	struct sockaddr_ll sll;
+	int ret;
+	nanosecs_rel_t timeout = sock->timeout;
+	socklen_t namelen;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	/* non-blocking receive? */
+	if (msg_flags & MSG_DONTWAIT)
+		timeout = -1;
+
+	ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL);
+	if (unlikely(ret < 0))
+		switch (ret) {
+		default:
+			ret = -EBADF; /* socket has been closed */
+			fallthrough;
+		case -EWOULDBLOCK:
+		case -ETIMEDOUT:
+		case -EINTR:
+			rtdm_drop_iovec(iov, iov_fast);
+			return ret;
+		}
+
+	rtskb = rtskb_dequeue_chain(&sock->incoming);
+	RTNET_ASSERT(rtskb != NULL, return -EFAULT;);
+
+	/* copy the address if required. */
+	if (msg->msg_name) {
+		struct rtnet_device *rtdev = rtskb->rtdev;
+		memset(&sll, 0, sizeof(sll));
+		sll.sll_family = AF_PACKET;
+		sll.sll_hatype = rtdev->type;
+		sll.sll_protocol = rtskb->protocol;
+		sll.sll_pkttype = rtskb->pkt_type;
+		sll.sll_ifindex = rtdev->ifindex;
+
+		if (msg->msg_namelen < 0) {
+			ret = -EINVAL;
+			goto fail;
+		}
+		namelen = min(sizeof(sll), (size_t)msg->msg_namelen);
+
+		/* Ethernet specific - we rather need some parse handler here */
+		memcpy(sll.sll_addr, rtskb->mac.ethernet->h_source, ETH_ALEN);
+		sll.sll_halen = ETH_ALEN;
+		ret = rtnet_put_arg(fd, msg->msg_name, &sll, namelen);
+		if (ret)
+			goto fail;
+
+		msg->msg_namelen = sizeof(sll);
+	}
+
+	/* Include the header in raw delivery */
+	if (rtdm_fd_to_context(fd)->device->driver->socket_type != SOCK_DGRAM)
+		rtskb_push(rtskb, rtskb->data - rtskb->mac.raw);
+
+	/* The data must not be longer than the available buffer size */
+	copy_len = rtskb->len;
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+	if (len < 0) {
+		copy_len = len;
+		goto out;
+	}
+
+	if (copy_len > len) {
+		copy_len = len;
+		msg->msg_flags |= MSG_TRUNC;
+	}
+
+	copy_len = rtnet_write_to_iov(fd, iov, msg->msg_iovlen, rtskb->data,
+				      copy_len);
+out:
+	if ((msg_flags & MSG_PEEK) == 0) {
+		kfree_rtskb(rtskb);
+	} else {
+		rtskb_queue_head(&sock->incoming, rtskb);
+		rtdm_sem_up(&sock->pending_sem);
+	}
+
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return copy_len;
+fail:
+	copy_len = ret;
+	goto out;
+}
+
+/***
+ *  rt_packet_sendmsg
+ */
+static ssize_t rt_packet_sendmsg(struct rtdm_fd *fd,
+				 const struct user_msghdr *msg, int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	size_t len;
+	struct sockaddr_ll _sll, *sll;
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned short proto;
+	unsigned char *addr;
+	int ifindex;
+	ssize_t ret;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
+		return -EOPNOTSUPP;
+	if (msg_flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	if (msg->msg_name == NULL) {
+		/* Note: We do not care about races with rt_packet_bind here -
+	   the user has to do so. */
+		ifindex = sock->prot.packet.ifindex;
+		proto = sock->prot.packet.packet_type.type;
+		addr = NULL;
+		sll = NULL;
+	} else {
+		sll = rtnet_get_arg(fd, &_sll, msg->msg_name, sizeof(_sll));
+		if (IS_ERR(sll)) {
+			ret = PTR_ERR(sll);
+			goto abort;
+		}
+
+		if ((msg->msg_namelen < sizeof(struct sockaddr_ll)) ||
+		    (msg->msg_namelen <
+		     (sll->sll_halen +
+		      offsetof(struct sockaddr_ll, sll_addr))) ||
+		    ((sll->sll_family != AF_PACKET) &&
+		     (sll->sll_family != AF_UNSPEC))) {
+			ret = -EINVAL;
+			goto abort;
+		}
+
+		ifindex = sll->sll_ifindex;
+		proto = sll->sll_protocol;
+		addr = sll->sll_addr;
+	}
+
+	if ((rtdev = rtdev_get_by_index(ifindex)) == NULL) {
+		ret = -ENODEV;
+		goto abort;
+	}
+
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+	rtskb = alloc_rtskb(rtdev->hard_header_len + len, &sock->skb_pool);
+	if (rtskb == NULL) {
+		ret = -ENOBUFS;
+		goto out;
+	}
+
+	/* If an RTmac discipline is active, this becomes a pure sanity check to
+       avoid writing beyond rtskb boundaries. The hard check is then performed
+       upon rtdev_xmit() by the discipline's xmit handler. */
+	if (len >
+	    rtdev->mtu +
+		    ((rtdm_fd_to_context(fd)->device->driver->socket_type ==
+		      SOCK_RAW) ?
+			     rtdev->hard_header_len :
+			     0)) {
+		ret = -EMSGSIZE;
+		goto err;
+	}
+
+	if ((sll != NULL) && (sll->sll_halen != rtdev->addr_len)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	rtskb->rtdev = rtdev;
+	rtskb->priority = sock->priority;
+
+	if (rtdev->hard_header) {
+		int hdr_len;
+
+		ret = -EINVAL;
+		hdr_len = rtdev->hard_header(rtskb, rtdev, ntohs(proto), addr,
+					     NULL, len);
+		if (rtdm_fd_to_context(fd)->device->driver->socket_type !=
+		    SOCK_DGRAM) {
+			rtskb->tail = rtskb->data;
+			rtskb->len = 0;
+		} else if (hdr_len < 0)
+			goto err;
+	}
+
+	ret = rtnet_read_from_iov(fd, iov, msg->msg_iovlen,
+				  rtskb_put(rtskb, len), len);
+
+	if ((rtdev->flags & IFF_UP) != 0) {
+		if ((ret = rtdev_xmit(rtskb)) == 0)
+			ret = len;
+	} else {
+		ret = -ENETDOWN;
+		goto err;
+	}
+
+out:
+	rtdev_dereference(rtdev);
+abort:
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return ret;
+err:
+	kfree_rtskb(rtskb);
+	goto out;
+}
+
+static struct rtdm_driver packet_proto_drv = {
+    .profile_info =     RTDM_PROFILE_INFO(packet,
+					RTDM_CLASS_NETWORK,
+					RTDM_SUBCLASS_RTNET,
+					RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =     1,
+    .context_size =     sizeof(struct rtsocket),
+
+    .protocol_family =  PF_PACKET,
+    .socket_type =      SOCK_DGRAM,
+
+
+    .ops = {
+	.socket =       rt_packet_socket,
+	.close =        rt_packet_close,
+	.ioctl_rt =     rt_packet_ioctl,
+	.ioctl_nrt =    rt_packet_ioctl,
+	.recvmsg_rt =   rt_packet_recvmsg,
+	.sendmsg_rt =   rt_packet_sendmsg,
+	.select =       rt_socket_select_bind,
+    },
+};
+
+static struct rtdm_device packet_proto_dev = {
+	.driver = &packet_proto_drv,
+	.label = "packet",
+};
+
+static struct rtdm_driver raw_packet_proto_drv = {
+    .profile_info =     RTDM_PROFILE_INFO(raw_packet,
+					RTDM_CLASS_NETWORK,
+					RTDM_SUBCLASS_RTNET,
+					RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =     1,
+    .context_size =     sizeof(struct rtsocket),
+
+    .protocol_family =  PF_PACKET,
+    .socket_type =      SOCK_RAW,
+
+    .ops = {
+	.socket =       rt_packet_socket,
+	.close =        rt_packet_close,
+	.ioctl_rt =     rt_packet_ioctl,
+	.ioctl_nrt =    rt_packet_ioctl,
+	.recvmsg_rt =   rt_packet_recvmsg,
+	.sendmsg_rt =   rt_packet_sendmsg,
+	.select =       rt_socket_select_bind,
+    },
+};
+
+static struct rtdm_device raw_packet_proto_dev = {
+	.driver = &raw_packet_proto_drv,
+	.label = "raw_packet",
+};
+
+static int __init rt_packet_proto_init(void)
+{
+	int err;
+
+	err = rtdm_dev_register(&packet_proto_dev);
+	if (err)
+		return err;
+
+	err = rtdm_dev_register(&raw_packet_proto_dev);
+	if (err)
+		rtdm_dev_unregister(&packet_proto_dev);
+
+	return err;
+}
+
+static void rt_packet_proto_release(void)
+{
+	rtdm_dev_unregister(&packet_proto_dev);
+	rtdm_dev_unregister(&raw_packet_proto_dev);
+}
+
+module_init(rt_packet_proto_init);
+module_exit(rt_packet_proto_release);
+
+/**********************************************************
+ * Utilities                                              *
+ **********************************************************/
+
+static int hex2int(unsigned char hex_char)
+{
+	if ((hex_char >= '0') && (hex_char <= '9'))
+		return hex_char - '0';
+	else if ((hex_char >= 'a') && (hex_char <= 'f'))
+		return hex_char - 'a' + 10;
+	else if ((hex_char >= 'A') && (hex_char <= 'F'))
+		return hex_char - 'A' + 10;
+	else
+		return -EINVAL;
+}
+
+int rt_eth_aton(unsigned char *addr_buf, const char *mac)
+{
+	int i = 0;
+	int nibble;
+
+	while (1) {
+		if (*mac == 0)
+			return -EINVAL;
+
+		if ((nibble = hex2int(*mac++)) < 0)
+			return nibble;
+		*addr_buf = nibble << 4;
+
+		if (*mac == 0)
+			return -EINVAL;
+
+		if ((nibble = hex2int(*mac++)) < 0)
+			return nibble;
+		*addr_buf++ |= nibble;
+
+		if (++i == 6)
+			break;
+
+		if ((*mac == 0) || (*mac++ != ':'))
+			return -EINVAL;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rt_eth_aton);
+++ linux-patched/drivers/xenomai/net/stack/packet/Makefile	2022-03-21 12:58:30.267880400 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTPACKET) += rtpacket.o
+
+rtpacket-y := af_packet.o
+++ linux-patched/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c	2022-03-21 12:58:30.260880468 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c	1970-01-01 01:00:00.000000000 +0100
+/* rtmac_vnic.c
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *               2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+
+#include <rtnet_internal.h>
+#include <rtdev.h>
+#include <rtnet_port.h> /* for netdev_priv() */
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/rtmac_vnic.h>
+
+static unsigned int vnic_rtskbs = DEFAULT_VNIC_RTSKBS;
+module_param(vnic_rtskbs, uint, 0444);
+MODULE_PARM_DESC(vnic_rtskbs,
+		 "Number of realtime socket buffers per virtual NIC");
+
+static rtdm_nrtsig_t vnic_signal;
+static struct rtskb_queue rx_queue;
+
+int rtmac_vnic_rx(struct rtskb *rtskb, u16 type)
+{
+	struct rtmac_priv *mac_priv = rtskb->rtdev->mac_priv;
+	struct rtskb_pool *pool = &mac_priv->vnic_skb_pool;
+
+	if (rtskb_acquire(rtskb, pool) != 0) {
+		mac_priv->vnic_stats.rx_dropped++;
+		kfree_rtskb(rtskb);
+		return -1;
+	}
+
+	rtskb->protocol = type;
+
+	if (rtskb_queue_tail_check(&rx_queue, rtskb))
+		rtdm_nrtsig_pend(&vnic_signal);
+
+	return 0;
+}
+
+static void rtmac_vnic_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	struct rtskb *rtskb;
+	struct sk_buff *skb;
+	unsigned hdrlen;
+	struct net_device_stats *stats;
+	struct rtnet_device *rtdev;
+
+	while (1) {
+		rtskb = rtskb_dequeue(&rx_queue);
+		if (!rtskb)
+			break;
+
+		rtdev = rtskb->rtdev;
+		hdrlen = rtdev->hard_header_len;
+
+		skb = dev_alloc_skb(hdrlen + rtskb->len + 2);
+		if (skb) {
+			/* the rtskb stamp is useless (different clock), get new one */
+			__net_timestamp(skb);
+
+			skb_reserve(skb,
+				    2); /* Align IP on 16 byte boundaries */
+
+			/* copy Ethernet header */
+			memcpy(skb_put(skb, hdrlen),
+			       rtskb->data - hdrlen - sizeof(struct rtmac_hdr),
+			       hdrlen);
+
+			/* patch the protocol field in the original Ethernet header */
+			((struct ethhdr *)skb->data)->h_proto = rtskb->protocol;
+
+			/* copy data */
+			memcpy(skb_put(skb, rtskb->len), rtskb->data,
+			       rtskb->len);
+
+			skb->dev = rtskb->rtdev->mac_priv->vnic;
+			skb->protocol = eth_type_trans(skb, skb->dev);
+
+			stats = &rtskb->rtdev->mac_priv->vnic_stats;
+
+			kfree_rtskb(rtskb);
+
+			stats->rx_packets++;
+			stats->rx_bytes += skb->len;
+
+			netif_rx(skb);
+		} else {
+			printk("RTmac: VNIC fails to allocate linux skb\n");
+			kfree_rtskb(rtskb);
+		}
+	}
+}
+
+static int rtmac_vnic_copy_mac(struct net_device *dev)
+{
+	memcpy(dev->dev_addr,
+	       (*(struct rtnet_device **)netdev_priv(dev))->dev_addr,
+	       MAX_ADDR_LEN);
+
+	return 0;
+}
+
+int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev);
+	struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats;
+	struct rtskb_pool *pool = &rtdev->mac_priv->vnic_skb_pool;
+	struct ethhdr *ethernet = (struct ethhdr *)skb->data;
+	struct rtskb *rtskb;
+	int res;
+	int data_len;
+
+	rtskb = alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15,
+			    pool);
+	if (!rtskb)
+		return NETDEV_TX_BUSY;
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr));
+
+	data_len = skb->len - dev->hard_header_len;
+	memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len,
+	       data_len);
+
+	res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb,
+			       ntohs(ethernet->h_proto), RTMAC_FLAG_TUNNEL);
+	if (res < 0) {
+		stats->tx_dropped++;
+		kfree_rtskb(rtskb);
+		goto done;
+	}
+
+	RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb);
+		     goto done;);
+
+	res = rtdev->mac_disc->nrt_packet_tx(rtskb);
+	if (res < 0) {
+		stats->tx_dropped++;
+		kfree_rtskb(rtskb);
+	} else {
+		stats->tx_packets++;
+		stats->tx_bytes += skb->len;
+	}
+
+done:
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *rtmac_vnic_get_stats(struct net_device *dev)
+{
+	return &(*(struct rtnet_device **)netdev_priv(dev))
+			->mac_priv->vnic_stats;
+}
+
+static int rtmac_vnic_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) ||
+	    ((unsigned)new_mtu > 1500 - sizeof(struct rtmac_hdr)))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+void rtmac_vnic_set_max_mtu(struct rtnet_device *rtdev, unsigned int max_mtu)
+{
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+	struct net_device *vnic = mac_priv->vnic;
+	unsigned int prev_mtu = mac_priv->vnic_max_mtu;
+
+	mac_priv->vnic_max_mtu = max_mtu - sizeof(struct rtmac_hdr);
+
+	/* set vnic mtu in case max_mtu is smaller than the current mtu or
+       the current mtu was set to previous max_mtu */
+	rtnl_lock();
+	if ((vnic->mtu > mac_priv->vnic_max_mtu) ||
+	    (prev_mtu == mac_priv->vnic_max_mtu)) {
+		dev_set_mtu(vnic, mac_priv->vnic_max_mtu);
+	}
+	rtnl_unlock();
+}
+
+static struct net_device_ops vnic_netdev_ops = {
+	.ndo_open = rtmac_vnic_copy_mac,
+	.ndo_get_stats = rtmac_vnic_get_stats,
+	.ndo_change_mtu = rtmac_vnic_change_mtu,
+};
+
+static void rtmac_vnic_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &vnic_netdev_ops;
+	dev->flags &= ~IFF_MULTICAST;
+}
+
+int rtmac_vnic_add(struct rtnet_device *rtdev, vnic_xmit_handler vnic_xmit)
+{
+	int res;
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+	struct net_device *vnic;
+	char buf[IFNAMSIZ];
+
+	/* does the discipline request vnic support? */
+	if (!vnic_xmit)
+		return 0;
+
+	mac_priv->vnic = NULL;
+	mac_priv->vnic_max_mtu = rtdev->mtu - sizeof(struct rtmac_hdr);
+	memset(&mac_priv->vnic_stats, 0, sizeof(mac_priv->vnic_stats));
+
+	/* create the rtskb pool */
+	if (rtskb_pool_init(&mac_priv->vnic_skb_pool, vnic_rtskbs, NULL, NULL) <
+	    vnic_rtskbs) {
+		res = -ENOMEM;
+		goto error;
+	}
+
+	snprintf(buf, sizeof(buf), "vnic%d", rtdev->ifindex - 1);
+
+	vnic = alloc_netdev(sizeof(struct rtnet_device *), buf,
+			    NET_NAME_UNKNOWN, rtmac_vnic_setup);
+	if (!vnic) {
+		res = -ENOMEM;
+		goto error;
+	}
+
+	vnic_netdev_ops.ndo_start_xmit = vnic_xmit;
+	vnic->mtu = mac_priv->vnic_max_mtu;
+	*(struct rtnet_device **)netdev_priv(vnic) = rtdev;
+	rtmac_vnic_copy_mac(vnic);
+
+	res = register_netdev(vnic);
+	if (res < 0)
+		goto error;
+
+	mac_priv->vnic = vnic;
+
+	return 0;
+
+error:
+	rtskb_pool_release(&mac_priv->vnic_skb_pool);
+	return res;
+}
+
+int rtmac_vnic_unregister(struct rtnet_device *rtdev)
+{
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+
+	if (mac_priv->vnic) {
+		rtskb_pool_release(&mac_priv->vnic_skb_pool);
+		unregister_netdev(mac_priv->vnic);
+		free_netdev(mac_priv->vnic);
+		mac_priv->vnic = NULL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_vnics_show(struct xnvfile_regular_iterator *it, void *d)
+{
+	struct rtnet_device *rtdev;
+	int i;
+	int err;
+
+	xnvfile_printf(it, "RT-NIC name\tVNIC name\n");
+
+	for (i = 1; i <= MAX_RT_DEVICES; i++) {
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev == NULL)
+			continue;
+
+		err = mutex_lock_interruptible(&rtdev->nrt_lock);
+		if (err < 0) {
+			rtdev_dereference(rtdev);
+			return err;
+		}
+
+		if (rtdev->mac_priv != NULL) {
+			struct rtmac_priv *rtmac;
+
+			rtmac = (struct rtmac_priv *)rtdev->mac_priv;
+			xnvfile_printf(it, "%-15s %s\n", rtdev->name,
+				       rtmac->vnic->name);
+		}
+
+		mutex_unlock(&rtdev->nrt_lock);
+		rtdev_dereference(rtdev);
+	}
+
+	return 0;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+int __init rtmac_vnic_module_init(void)
+{
+	rtskb_queue_init(&rx_queue);
+
+	rtdm_nrtsig_init(&vnic_signal, rtmac_vnic_signal_handler, NULL);
+
+	return 0;
+}
+
+void rtmac_vnic_module_cleanup(void)
+{
+	struct rtskb *rtskb;
+
+	rtdm_nrtsig_destroy(&vnic_signal);
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		kfree_rtskb(rtskb);
+	}
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c	2022-03-21 12:58:30.252880546 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/tdma/tdma_worker.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/tdma/tdma_proto.h>
+
+static void do_slot_job(struct tdma_priv *tdma, struct tdma_slot *job,
+			rtdm_lockctx_t lockctx)
+{
+	struct rtskb *rtskb;
+
+	if ((job->period != 1) &&
+	    (tdma->current_cycle % job->period != job->phasing))
+		return;
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	/* wait for slot begin, then send one pending packet */
+	rtdm_task_sleep_abs(tdma->current_cycle_start + SLOT_JOB(job)->offset,
+			    RTDM_TIMERMODE_REALTIME);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+	rtskb = __rtskb_prio_dequeue(SLOT_JOB(job)->queue);
+	if (!rtskb)
+		return;
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	rtmac_xmit(rtskb);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+}
+
+static void do_xmit_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
+{
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	/* wait for beginning of next cycle, then send sync */
+	rtdm_task_sleep_abs(tdma->current_cycle_start + tdma->cycle_period,
+			    RTDM_TIMERMODE_REALTIME);
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+	tdma->current_cycle++;
+	tdma->current_cycle_start += tdma->cycle_period;
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	tdma_xmit_sync_frame(tdma);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+}
+
+static void do_backup_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
+{
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	/* wait for backup slot */
+	rtdm_task_sleep_abs(tdma->current_cycle_start + tdma->backup_sync_inc,
+			    RTDM_TIMERMODE_REALTIME);
+
+	/* take over sync transmission if all earlier masters failed */
+	if (!test_and_clear_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags)) {
+		rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+		tdma->current_cycle++;
+		tdma->current_cycle_start += tdma->cycle_period;
+		rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+		tdma_xmit_sync_frame(tdma);
+
+		set_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags);
+	} else
+		clear_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+}
+
+static struct tdma_job *do_request_cal_job(struct tdma_priv *tdma,
+					   struct tdma_request_cal *job,
+					   rtdm_lockctx_t lockctx)
+{
+	struct rt_proc_call *call;
+	struct tdma_job *prev_job;
+	int err;
+
+	if ((job->period != 1) &&
+	    (tdma->current_cycle % job->period != job->phasing))
+		return &job->head;
+
+	/* remove job until we get a reply */
+	__list_del(job->head.entry.prev, job->head.entry.next);
+	job->head.ref_count--;
+	prev_job = tdma->current_job =
+		list_entry(job->head.entry.prev, struct tdma_job, entry);
+	prev_job->ref_count++;
+	tdma->job_list_revision++;
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	rtdm_task_sleep_abs(tdma->current_cycle_start + job->offset,
+			    RTDM_TIMERMODE_REALTIME);
+	err = tdma_xmit_request_cal_frame(
+		tdma, tdma->current_cycle + job->period, job->offset);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+
+	/* terminate call on error */
+	if (err < 0) {
+		call = tdma->calibration_call;
+		tdma->calibration_call = NULL;
+
+		if (call) {
+			rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+			rtpc_complete_call(call, err);
+			rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+		}
+	}
+
+	return prev_job;
+}
+
+static struct tdma_job *do_reply_cal_job(struct tdma_priv *tdma,
+					 struct tdma_reply_cal *job,
+					 rtdm_lockctx_t lockctx)
+{
+	struct tdma_job *prev_job;
+
+	if (job->reply_cycle > tdma->current_cycle)
+		return &job->head;
+
+	/* remove the job */
+	__list_del(job->head.entry.prev, job->head.entry.next);
+	job->head.ref_count--;
+	prev_job = tdma->current_job =
+		list_entry(job->head.entry.prev, struct tdma_job, entry);
+	prev_job->ref_count++;
+	tdma->job_list_revision++;
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	if (job->reply_cycle == tdma->current_cycle) {
+		/* send reply in the assigned slot */
+		rtdm_task_sleep_abs(tdma->current_cycle_start +
+					    job->reply_offset,
+				    RTDM_TIMERMODE_REALTIME);
+		rtmac_xmit(job->reply_rtskb);
+	} else {
+		/* cleanup if cycle already passed */
+		kfree_rtskb(job->reply_rtskb);
+	}
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+
+	return prev_job;
+}
+
+void tdma_worker(void *arg)
+{
+	struct tdma_priv *tdma = arg;
+	struct tdma_job *job;
+	rtdm_lockctx_t lockctx;
+	int ret;
+
+	ret = rtdm_event_wait(&tdma->worker_wakeup);
+	if (ret)
+		return;
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+
+	job = tdma->first_job;
+
+	while (!rtdm_task_should_stop()) {
+		job->ref_count++;
+		switch (job->id) {
+		case WAIT_ON_SYNC:
+			rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+			ret = rtdm_event_wait(&tdma->sync_event);
+			if (ret)
+				return;
+			rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+			break;
+
+		case XMIT_REQ_CAL:
+			job = do_request_cal_job(tdma, REQUEST_CAL_JOB(job),
+						 lockctx);
+			break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		case XMIT_SYNC:
+			do_xmit_sync_job(tdma, lockctx);
+			break;
+
+		case BACKUP_SYNC:
+			do_backup_sync_job(tdma, lockctx);
+			break;
+
+		case XMIT_RPL_CAL:
+			job = do_reply_cal_job(tdma, REPLY_CAL_JOB(job),
+					       lockctx);
+			break;
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+		default:
+			do_slot_job(tdma, SLOT_JOB(job), lockctx);
+			break;
+		}
+		job->ref_count--;
+
+		job = tdma->current_job =
+			list_entry(job->entry.next, struct tdma_job, entry);
+	}
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c	2022-03-21 12:58:30.245880614 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/tdma/tdma_ioctl.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <asm/div64.h>
+
+#include <tdma_chrdev.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/tdma/tdma.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+static int tdma_ioctl_master(struct rtnet_device *rtdev,
+			     struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	u64 cycle_ms;
+	unsigned int table_size;
+	int ret;
+
+	if (rtdev->mac_priv == NULL) {
+		ret = rtmac_disc_attach(rtdev, &tdma_disc);
+		if (ret < 0)
+			return ret;
+	}
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC) {
+		/* note: we don't clean up an unknown discipline */
+		return -ENOTTY;
+	}
+
+	if (test_bit(TDMA_FLAG_ATTACHED, &tdma->flags)) {
+		/* already attached */
+		return -EBUSY;
+	}
+
+	set_bit(TDMA_FLAG_MASTER, &tdma->flags);
+
+	tdma->cal_rounds = cfg->args.master.cal_rounds;
+
+	/* search at least 3 cycle periods for other masters */
+	cycle_ms = cfg->args.master.cycle_period;
+	do_div(cycle_ms, 1000000);
+	if (cycle_ms == 0)
+		cycle_ms = 1;
+	msleep(3 * cycle_ms);
+
+	if (rtskb_module_pool_init(&tdma->cal_rtskb_pool,
+				   cfg->args.master.max_cal_requests) !=
+	    cfg->args.master.max_cal_requests) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	table_size = sizeof(struct tdma_slot *) *
+		     ((cfg->args.master.max_slot_id >= 1) ?
+			      cfg->args.master.max_slot_id + 1 :
+			      2);
+
+	tdma->slot_table = (struct tdma_slot **)kmalloc(table_size, GFP_KERNEL);
+	if (!tdma->slot_table) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+	tdma->max_slot_id = cfg->args.master.max_slot_id;
+	memset(tdma->slot_table, 0, table_size);
+
+	tdma->cycle_period = cfg->args.master.cycle_period;
+	tdma->sync_job.ref_count = 0;
+	INIT_LIST_HEAD(&tdma->sync_job.entry);
+
+	if (cfg->args.master.backup_sync_offset == 0)
+		tdma->sync_job.id = XMIT_SYNC;
+	else {
+		set_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags);
+		tdma->sync_job.id = BACKUP_SYNC;
+		tdma->backup_sync_inc = cfg->args.master.backup_sync_offset +
+					tdma->cycle_period;
+	}
+
+	/* did we detect another active master? */
+	if (test_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags)) {
+		/* become a slave, we need to calibrate first */
+		tdma->sync_job.id = WAIT_ON_SYNC;
+	} else {
+		if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags))
+			printk("TDMA: warning, no primary master detected!\n");
+		set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags);
+		tdma->current_cycle_start = rtdm_clock_read();
+	}
+
+	tdma->first_job = tdma->current_job = &tdma->sync_job;
+
+	rtdm_event_signal(&tdma->worker_wakeup);
+
+	set_bit(TDMA_FLAG_ATTACHED, &tdma->flags);
+
+	return 0;
+
+err_out:
+	rtmac_disc_detach(rtdev);
+	return ret;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+static int tdma_ioctl_slave(struct rtnet_device *rtdev, struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	unsigned int table_size;
+	int ret;
+
+	if (rtdev->mac_priv == NULL) {
+		ret = rtmac_disc_attach(rtdev, &tdma_disc);
+		if (ret < 0)
+			return ret;
+	}
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC) {
+		/* note: we don't clean up an unknown discipline */
+		return -ENOTTY;
+	}
+
+	if (test_bit(TDMA_FLAG_ATTACHED, &tdma->flags)) {
+		/* already attached */
+		return -EBUSY;
+	}
+
+	tdma->cal_rounds = cfg->args.slave.cal_rounds;
+	if (tdma->cal_rounds == 0)
+		set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags);
+
+	table_size = sizeof(struct tdma_slot *) *
+		     ((cfg->args.slave.max_slot_id >= 1) ?
+			      cfg->args.slave.max_slot_id + 1 :
+			      2);
+
+	tdma->slot_table = (struct tdma_slot **)kmalloc(table_size, GFP_KERNEL);
+	if (!tdma->slot_table) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+	tdma->max_slot_id = cfg->args.slave.max_slot_id;
+	memset(tdma->slot_table, 0, table_size);
+
+	tdma->sync_job.id = WAIT_ON_SYNC;
+	tdma->sync_job.ref_count = 0;
+	INIT_LIST_HEAD(&tdma->sync_job.entry);
+
+	tdma->first_job = tdma->current_job = &tdma->sync_job;
+
+	rtdm_event_signal(&tdma->worker_wakeup);
+
+	set_bit(TDMA_FLAG_ATTACHED, &tdma->flags);
+
+	return 0;
+
+err_out:
+	rtmac_disc_detach(rtdev);
+	return ret;
+}
+
+static int tdma_ioctl_cal_result_size(struct rtnet_device *rtdev,
+				      struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	if (!test_bit(TDMA_FLAG_CALIBRATED, &tdma->flags))
+		return tdma->cal_rounds;
+	else
+		return 0;
+}
+
+int start_calibration(struct rt_proc_call *call)
+{
+	struct tdma_request_cal *req_cal;
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+
+	req_cal = rtpc_get_priv(call, struct tdma_request_cal);
+	tdma = req_cal->tdma;
+
+	/* there are no slots yet, simply add this job after first_job */
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+	tdma->calibration_call = call;
+	tdma->job_list_revision++;
+	list_add(&req_cal->head.entry, &tdma->first_job->entry);
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return -CALL_PENDING;
+}
+
+void copyback_calibration(struct rt_proc_call *call, void *priv_data)
+{
+	struct tdma_request_cal *req_cal;
+	struct tdma_priv *tdma;
+	int i;
+	u64 value;
+	u64 average = 0;
+	u64 min = 0x7FFFFFFFFFFFFFFFLL;
+	u64 max = 0;
+
+	req_cal = rtpc_get_priv(call, struct tdma_request_cal);
+	tdma = req_cal->tdma;
+
+	for (i = 0; i < tdma->cal_rounds; i++) {
+		value = req_cal->result_buffer[i];
+		average += value;
+		if (value < min)
+			min = value;
+		if (value > max)
+			max = value;
+		if ((req_cal->cal_results) &&
+		    (copy_to_user(&req_cal->cal_results[i], &value,
+				  sizeof(value)) != 0))
+			rtpc_set_result(call, -EFAULT);
+	}
+	do_div(average, tdma->cal_rounds);
+	tdma->master_packet_delay_ns = average;
+
+	average += 500;
+	do_div(average, 1000);
+	min += 500;
+	do_div(min, 1000);
+	max += 500;
+	do_div(max, 1000);
+	printk("TDMA: calibrated master-to-slave packet delay: "
+	       "%ld us (min/max: %ld/%ld us)\n",
+	       (unsigned long)average, (unsigned long)min, (unsigned long)max);
+}
+
+void cleanup_calibration(void *priv_data)
+{
+	struct tdma_request_cal *req_cal;
+
+	req_cal = (struct tdma_request_cal *)priv_data;
+	kfree(req_cal->result_buffer);
+}
+
+static int tdma_ioctl_set_slot(struct rtnet_device *rtdev,
+			       struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	int id;
+	int jnt_id;
+	struct tdma_slot *slot, *old_slot;
+	struct tdma_job *job, *prev_job;
+	struct tdma_request_cal req_cal;
+	struct rtskb *rtskb;
+	unsigned int job_list_revision;
+	rtdm_lockctx_t context;
+	int ret;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	id = cfg->args.set_slot.id;
+	if (id > tdma->max_slot_id)
+		return -EINVAL;
+
+	if (cfg->args.set_slot.size == 0)
+		cfg->args.set_slot.size = rtdev->mtu;
+	else if (cfg->args.set_slot.size > rtdev->mtu)
+		return -EINVAL;
+
+	jnt_id = cfg->args.set_slot.joint_slot;
+	if ((jnt_id >= 0) &&
+	    ((jnt_id >= tdma->max_slot_id) || (tdma->slot_table[jnt_id] == 0) ||
+	     (tdma->slot_table[jnt_id]->mtu != cfg->args.set_slot.size)))
+		return -EINVAL;
+
+	slot = (struct tdma_slot *)kmalloc(sizeof(struct tdma_slot),
+					   GFP_KERNEL);
+	if (!slot)
+		return -ENOMEM;
+
+	if (!test_bit(TDMA_FLAG_CALIBRATED, &tdma->flags)) {
+		req_cal.head.id = XMIT_REQ_CAL;
+		req_cal.head.ref_count = 0;
+		req_cal.tdma = tdma;
+		req_cal.offset = cfg->args.set_slot.offset;
+		req_cal.period = cfg->args.set_slot.period;
+		req_cal.phasing = cfg->args.set_slot.phasing;
+		req_cal.cal_rounds = tdma->cal_rounds;
+		req_cal.cal_results = cfg->args.set_slot.cal_results;
+
+		req_cal.result_buffer =
+			kmalloc(req_cal.cal_rounds * sizeof(u64), GFP_KERNEL);
+		if (!req_cal.result_buffer) {
+			kfree(slot);
+			return -ENOMEM;
+		}
+
+		ret = rtpc_dispatch_call(start_calibration, 0, &req_cal,
+					 sizeof(req_cal), copyback_calibration,
+					 cleanup_calibration);
+		if (ret < 0) {
+			/* kick out any pending calibration job before returning */
+			rtdm_lock_get_irqsave(&tdma->lock, context);
+
+			job = list_entry(tdma->first_job->entry.next,
+					 struct tdma_job, entry);
+			if (job != tdma->first_job) {
+				__list_del(job->entry.prev, job->entry.next);
+
+				while (job->ref_count > 0) {
+					rtdm_lock_put_irqrestore(&tdma->lock,
+								 context);
+					msleep(100);
+					rtdm_lock_get_irqsave(&tdma->lock,
+							      context);
+				}
+			}
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+			kfree(slot);
+			return ret;
+		}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		if (test_bit(TDMA_FLAG_MASTER, &tdma->flags)) {
+			u32 cycle_no = (volatile u32)tdma->current_cycle;
+			u64 cycle_ms;
+
+			/* switch back to [backup] master mode */
+			if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags))
+				tdma->sync_job.id = BACKUP_SYNC;
+			else
+				tdma->sync_job.id = XMIT_SYNC;
+
+			/* wait 2 cycle periods for the mode switch */
+			cycle_ms = tdma->cycle_period;
+			do_div(cycle_ms, 1000000);
+			if (cycle_ms == 0)
+				cycle_ms = 1;
+			msleep(2 * cycle_ms);
+
+			/* catch the very unlikely case that the current master died
+               while we just switched the mode */
+			if (cycle_no == (volatile u32)tdma->current_cycle) {
+				kfree(slot);
+				return -ETIME;
+			}
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+		set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags);
+	}
+
+	slot->head.id = id;
+	slot->head.ref_count = 0;
+	slot->period = cfg->args.set_slot.period;
+	slot->phasing = cfg->args.set_slot.phasing;
+	slot->mtu = cfg->args.set_slot.size;
+	slot->size = cfg->args.set_slot.size + rtdev->hard_header_len;
+	slot->offset = cfg->args.set_slot.offset;
+	slot->queue = &slot->local_queue;
+	rtskb_prio_queue_init(&slot->local_queue);
+
+	if (jnt_id >= 0) /* all other validation tests performed above */
+		slot->queue = tdma->slot_table[jnt_id]->queue;
+
+	old_slot = tdma->slot_table[id];
+	if ((id == DEFAULT_NRT_SLOT) &&
+	    (old_slot == tdma->slot_table[DEFAULT_SLOT]))
+		old_slot = NULL;
+
+restart:
+	job_list_revision = tdma->job_list_revision;
+
+	if (!old_slot) {
+		job = tdma->first_job;
+		while (1) {
+			prev_job = job;
+			job = list_entry(job->entry.next, struct tdma_job,
+					 entry);
+			if (((job->id >= 0) &&
+			     ((slot->offset < SLOT_JOB(job)->offset) ||
+			      ((slot->offset == SLOT_JOB(job)->offset) &&
+			       (slot->head.id <= SLOT_JOB(job)->head.id)))) ||
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+			    ((job->id == XMIT_RPL_CAL) &&
+			     (slot->offset <
+			      REPLY_CAL_JOB(job)->reply_offset)) ||
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+			    (job == tdma->first_job))
+				break;
+		}
+
+	} else
+		prev_job = list_entry(old_slot->head.entry.prev,
+				      struct tdma_job, entry);
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	if (job_list_revision != tdma->job_list_revision) {
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		msleep(100);
+		goto restart;
+	}
+
+	if (old_slot)
+		__list_del(old_slot->head.entry.prev,
+			   old_slot->head.entry.next);
+
+	list_add(&slot->head.entry, &prev_job->entry);
+	tdma->slot_table[id] = slot;
+	if ((id == DEFAULT_SLOT) &&
+	    (tdma->slot_table[DEFAULT_NRT_SLOT] == old_slot))
+		tdma->slot_table[DEFAULT_NRT_SLOT] = slot;
+
+	if (old_slot) {
+		while (old_slot->head.ref_count > 0) {
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+			msleep(100);
+			rtdm_lock_get_irqsave(&tdma->lock, context);
+		}
+
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		/* search for other slots linked to the old one */
+		for (jnt_id = 0; jnt_id < tdma->max_slot_id; jnt_id++)
+			if ((tdma->slot_table[jnt_id] != 0) &&
+			    (tdma->slot_table[jnt_id]->queue ==
+			     &old_slot->local_queue)) {
+				/* found a joint slot, move or detach it now */
+				rtdm_lock_get_irqsave(&tdma->lock, context);
+
+				while (tdma->slot_table[jnt_id]->head.ref_count >
+				       0) {
+					rtdm_lock_put_irqrestore(&tdma->lock,
+								 context);
+					msleep(100);
+					rtdm_lock_get_irqsave(&tdma->lock,
+							      context);
+				}
+
+				/* If the new slot size is larger, detach the other slot,
+                 * update it otherwise. */
+				if (slot->mtu > tdma->slot_table[jnt_id]->mtu)
+					tdma->slot_table[jnt_id]->queue =
+						&tdma->slot_table[jnt_id]
+							 ->local_queue;
+				else {
+					tdma->slot_table[jnt_id]->mtu =
+						slot->mtu;
+					tdma->slot_table[jnt_id]->queue =
+						slot->queue;
+				}
+
+				rtdm_lock_put_irqrestore(&tdma->lock, context);
+			}
+	} else
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	rtmac_vnic_set_max_mtu(rtdev, cfg->args.set_slot.size);
+
+	if (old_slot) {
+		/* avoid that the formerly joint queue gets purged */
+		old_slot->queue = &old_slot->local_queue;
+
+		/* Without any reference to the old job and no joint slots we can
+         * safely purge its queue without lock protection.
+         * NOTE: Reconfiguring a slot during runtime may lead to packet
+         *       drops! */
+		while ((rtskb = __rtskb_prio_dequeue(old_slot->queue)))
+			kfree_rtskb(rtskb);
+
+		kfree(old_slot);
+	}
+
+	return 0;
+}
+
+int tdma_cleanup_slot(struct tdma_priv *tdma, struct tdma_slot *slot)
+{
+	struct rtskb *rtskb;
+	unsigned int id, jnt_id;
+	rtdm_lockctx_t context;
+
+	if (!slot)
+		return -EINVAL;
+
+	id = slot->head.id;
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	__list_del(slot->head.entry.prev, slot->head.entry.next);
+
+	if (id == DEFAULT_NRT_SLOT)
+		tdma->slot_table[DEFAULT_NRT_SLOT] =
+			tdma->slot_table[DEFAULT_SLOT];
+	else {
+		if ((id == DEFAULT_SLOT) &&
+		    (tdma->slot_table[DEFAULT_NRT_SLOT] == slot))
+			tdma->slot_table[DEFAULT_NRT_SLOT] = NULL;
+		tdma->slot_table[id] = NULL;
+	}
+
+	while (slot->head.ref_count > 0) {
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+		msleep(100);
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+	}
+
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	/* search for other slots linked to this one */
+	for (jnt_id = 0; jnt_id < tdma->max_slot_id; jnt_id++)
+		if ((tdma->slot_table[jnt_id] != 0) &&
+		    (tdma->slot_table[jnt_id]->queue == &slot->local_queue)) {
+			/* found a joint slot, detach it now under lock protection */
+			rtdm_lock_get_irqsave(&tdma->lock, context);
+
+			while (tdma->slot_table[jnt_id]->head.ref_count > 0) {
+				rtdm_lock_put_irqrestore(&tdma->lock, context);
+				msleep(100);
+				rtdm_lock_get_irqsave(&tdma->lock, context);
+			}
+			tdma->slot_table[jnt_id]->queue =
+				&tdma->slot_table[jnt_id]->local_queue;
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+		}
+
+	/* avoid that the formerly joint queue gets purged */
+	slot->queue = &slot->local_queue;
+
+	/* No need to protect the queue access here -
+     * no one is referring to this job anymore
+     * (ref_count == 0, all joint slots detached). */
+	while ((rtskb = __rtskb_prio_dequeue(slot->queue)))
+		kfree_rtskb(rtskb);
+
+	kfree(slot);
+
+	return 0;
+}
+
+static int tdma_ioctl_remove_slot(struct rtnet_device *rtdev,
+				  struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	int id;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	id = cfg->args.remove_slot.id;
+	if (id > tdma->max_slot_id)
+		return -EINVAL;
+
+	if ((id == DEFAULT_NRT_SLOT) && (tdma->slot_table[DEFAULT_NRT_SLOT] ==
+					 tdma->slot_table[DEFAULT_SLOT]))
+		return -EINVAL;
+
+	return tdma_cleanup_slot(tdma, tdma->slot_table[id]);
+}
+
+static int tdma_ioctl_detach(struct rtnet_device *rtdev)
+{
+	struct tdma_priv *tdma;
+	int ret;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	ret = rtmac_disc_detach(rtdev);
+
+	return ret;
+}
+
+int tdma_ioctl(struct rtnet_device *rtdev, unsigned int request,
+	       unsigned long arg)
+{
+	struct tdma_config cfg;
+	int ret;
+
+	ret = copy_from_user(&cfg, (void *)arg, sizeof(cfg));
+	if (ret != 0)
+		return -EFAULT;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	switch (request) {
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	case TDMA_IOC_MASTER:
+		ret = tdma_ioctl_master(rtdev, &cfg);
+		break;
+#endif
+	case TDMA_IOC_SLAVE:
+		ret = tdma_ioctl_slave(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_CAL_RESULT_SIZE:
+		ret = tdma_ioctl_cal_result_size(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_SET_SLOT:
+		ret = tdma_ioctl_set_slot(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_REMOVE_SLOT:
+		ret = tdma_ioctl_remove_slot(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_DETACH:
+		ret = tdma_ioctl_detach(rtdev);
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c	2022-03-21 12:58:30.237880692 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/tdma/tdma_dev.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2003-2006 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtmac.h>
+#include <rtmac/tdma/tdma.h>
+
+struct tdma_dev_ctx {
+	rtdm_task_t *cycle_waiter;
+};
+
+static int tdma_dev_open(struct rtdm_fd *fd, int oflags)
+{
+	struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd);
+
+	ctx->cycle_waiter = NULL;
+
+	return 0;
+}
+
+static void tdma_dev_close(struct rtdm_fd *fd)
+{
+	struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd);
+	rtdm_lockctx_t lock_ctx;
+
+	cobalt_atomic_enter(lock_ctx);
+	if (ctx->cycle_waiter)
+		rtdm_task_unblock(ctx->cycle_waiter);
+	cobalt_atomic_leave(lock_ctx);
+}
+
+static int wait_on_sync(struct tdma_dev_ctx *tdma_ctx, rtdm_event_t *sync_event)
+{
+	rtdm_lockctx_t lock_ctx;
+	int ret;
+
+	cobalt_atomic_enter(lock_ctx);
+	/* keep it simple: only one waiter per device instance allowed */
+	if (!tdma_ctx->cycle_waiter) {
+		tdma_ctx->cycle_waiter = rtdm_task_current();
+		ret = rtdm_event_wait(sync_event);
+		tdma_ctx->cycle_waiter = NULL;
+	} else
+		ret = -EBUSY;
+	cobalt_atomic_leave(lock_ctx);
+
+	return ret;
+}
+
+static int tdma_dev_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd);
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t lock_ctx;
+	int ret;
+
+	tdma = container_of(rtdm_fd_to_context(fd)->device, struct tdma_priv,
+			    api_device);
+
+	switch (request) {
+	case RTMAC_RTIOC_TIMEOFFSET: {
+		nanosecs_rel_t offset;
+
+		rtdm_lock_get_irqsave(&tdma->lock, lock_ctx);
+		offset = tdma->clock_offset;
+		rtdm_lock_put_irqrestore(&tdma->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			if (!rtdm_rw_user_ok(fd, arg, sizeof(__s64)) ||
+			    rtdm_copy_to_user(fd, arg, &offset, sizeof(__s64)))
+				return -EFAULT;
+		} else
+			*(__s64 *)arg = offset;
+
+		return 0;
+	}
+	case RTMAC_RTIOC_WAITONCYCLE:
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		if ((long)arg != TDMA_WAIT_ON_SYNC)
+			return -EINVAL;
+
+		return wait_on_sync(ctx, &tdma->sync_event);
+
+	case RTMAC_RTIOC_WAITONCYCLE_EX: {
+		struct rtmac_waitinfo *waitinfo = (struct rtmac_waitinfo *)arg;
+		struct rtmac_waitinfo waitinfo_buf;
+
+#define WAITINFO_HEAD_SIZE                                                     \
+	((char *)&waitinfo_buf.cycle_no - (char *)&waitinfo_buf)
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		if (rtdm_fd_is_user(fd)) {
+			if (!rtdm_rw_user_ok(fd, waitinfo,
+					     sizeof(struct rtmac_waitinfo)) ||
+			    rtdm_copy_from_user(fd, &waitinfo_buf, arg,
+						WAITINFO_HEAD_SIZE))
+				return -EFAULT;
+
+			waitinfo = &waitinfo_buf;
+		}
+
+		if ((waitinfo->type != TDMA_WAIT_ON_SYNC) ||
+		    (waitinfo->size < sizeof(struct rtmac_waitinfo)))
+			return -EINVAL;
+
+		ret = wait_on_sync(ctx, &tdma->sync_event);
+		if (ret)
+			return ret;
+
+		rtdm_lock_get_irqsave(&tdma->lock, lock_ctx);
+		waitinfo->cycle_no = tdma->current_cycle;
+		waitinfo->cycle_start = tdma->current_cycle_start;
+		waitinfo->clock_offset = tdma->clock_offset;
+		rtdm_lock_put_irqrestore(&tdma->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			if (rtdm_copy_to_user(fd, arg, &waitinfo_buf,
+					      sizeof(struct rtmac_waitinfo)))
+				return -EFAULT;
+		}
+
+		return 0;
+	}
+	default:
+		return -ENOTTY;
+	}
+}
+
+static struct rtdm_driver tdma_driver = { .profile_info = RTDM_PROFILE_INFO(
+						  tdma, RTDM_CLASS_RTMAC,
+						  RTDM_SUBCLASS_TDMA,
+						  RTNET_RTDM_VER),
+					  .device_flags = RTDM_NAMED_DEVICE,
+					  .device_count = 1,
+					  .context_size =
+						  sizeof(struct tdma_dev_ctx),
+					  .ops = {
+						  .open = tdma_dev_open,
+						  .ioctl_rt = tdma_dev_ioctl,
+						  .ioctl_nrt = tdma_dev_ioctl,
+						  .close = tdma_dev_close,
+					  } };
+
+int tdma_dev_init(struct rtnet_device *rtdev, struct tdma_priv *tdma)
+{
+	char *pos;
+
+	strcpy(tdma->device_name, "TDMA");
+	for (pos = rtdev->name + strlen(rtdev->name) - 1;
+	     (pos >= rtdev->name) && ((*pos) >= '0') && (*pos <= '9'); pos--)
+		;
+	strncat(tdma->device_name + 4, pos + 1, IFNAMSIZ - 4);
+
+	tdma->api_driver = tdma_driver;
+	tdma->api_device.driver = &tdma->api_driver;
+	tdma->api_device.label = tdma->device_name;
+
+	return rtdm_dev_register(&tdma->api_device);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/Kconfig	2022-03-21 12:58:30.230880760 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_TDMA
+    tristate "TDMA discipline for RTmac"
+    depends on XENO_DRIVERS_NET_RTMAC
+    default y
+    help
+    The Time Division Multiple Access discipline is the default RTmac
+    protocol for Ethernet networks. It consists of a master synchronising
+    the access of the slaves to the media by periodically issuing frames.
+    Backup masters can be set up to take over if the primary master fails.
+    TDMA also provides a global clock across all participants. The tdmacfg
+    tool can be used to configure a real-time NIC to use TDMA.
+
+    See Documenatation/README.rtmac for further details.
+
+config XENO_DRIVERS_NET_TDMA_MASTER
+    bool "TDMA master support"
+    depends on XENO_DRIVERS_NET_TDMA
+    default y
+    help
+    Enables TDMA master and backup master support for the node. This can
+    be switched of to reduce the memory footprint of pure slave nodes.
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c	2022-03-21 12:58:30.222880838 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/tdma/tdma_module.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <asm/div64.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <rtdm/driver.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/tdma/tdma.h>
+#include <rtmac/tdma/tdma_dev.h>
+#include <rtmac/tdma/tdma_ioctl.h>
+#include <rtmac/tdma/tdma_proto.h>
+#include <rtmac/tdma/tdma_worker.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int tdma_proc_read(struct xnvfile_regular_iterator *it, void *data)
+{
+	int d, err = 0;
+	struct rtnet_device *rtdev;
+	struct tdma_priv *tdma;
+	const char *state;
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	u64 cycle;
+#endif
+
+	xnvfile_printf(it, "Interface       API Device      Operation Mode  "
+			   "Cycle   State\n");
+
+	for (d = 1; d <= MAX_RT_DEVICES; d++) {
+		rtdev = rtdev_get_by_index(d);
+		if (!rtdev)
+			continue;
+
+		err = mutex_lock_interruptible(&rtdev->nrt_lock);
+		if (err < 0) {
+			rtdev_dereference(rtdev);
+			break;
+		}
+
+		if (!rtdev->mac_priv)
+			goto unlock_dev;
+		tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+		xnvfile_printf(it, "%-15s %-15s ", rtdev->name,
+			       tdma->api_device.name);
+
+		if (test_bit(TDMA_FLAG_CALIBRATED, &tdma->flags)) {
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+			if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags) &&
+			    !test_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags))
+				state = "stand-by";
+			else
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+				state = "active";
+		} else
+			state = "init";
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		if (test_bit(TDMA_FLAG_MASTER, &tdma->flags)) {
+			cycle = tdma->cycle_period + 500;
+			do_div(cycle, 1000);
+			if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags))
+				xnvfile_printf(it, "Backup Master   %-7ld %s\n",
+					       (unsigned long)cycle, state);
+			else
+				xnvfile_printf(it, "Master          %-7ld %s\n",
+					       (unsigned long)cycle, state);
+		} else
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+			xnvfile_printf(it, "Slave           -       %s\n",
+				       state);
+
+	unlock_dev:
+		mutex_unlock(&rtdev->nrt_lock);
+		rtdev_dereference(rtdev);
+	}
+
+	return err;
+}
+
+int tdma_slots_proc_read(struct xnvfile_regular_iterator *it, void *data)
+{
+	int d, i, err = 0;
+	struct rtnet_device *rtdev;
+	struct tdma_priv *tdma;
+	struct tdma_slot *slot;
+	int jnt_id;
+	u64 slot_offset;
+
+	xnvfile_printf(it, "Interface       "
+			   "Slots (id[->joint]:offset:phasing/period:size)\n");
+
+	for (d = 1; d <= MAX_RT_DEVICES; d++) {
+		rtdev = rtdev_get_by_index(d);
+		if (!rtdev)
+			continue;
+
+		err = mutex_lock_interruptible(&rtdev->nrt_lock);
+		if (err < 0) {
+			rtdev_dereference(rtdev);
+			break;
+		}
+
+		if (!rtdev->mac_priv)
+			goto unlock_dev;
+		tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+		xnvfile_printf(it, "%-15s ", rtdev->name);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags)) {
+			slot_offset = tdma->backup_sync_inc -
+				      tdma->cycle_period + 500;
+			do_div(slot_offset, 1000);
+			xnvfile_printf(it, "bak:%ld  ",
+				       (unsigned long)slot_offset);
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+		if (tdma->slot_table)
+			for (i = 0; i <= tdma->max_slot_id; i++) {
+				slot = tdma->slot_table[i];
+				if (!slot ||
+				    ((i == DEFAULT_NRT_SLOT) &&
+				     (tdma->slot_table[DEFAULT_SLOT] == slot)))
+					continue;
+
+				if (slot->queue == &slot->local_queue) {
+					xnvfile_printf(it, "%d", i);
+				} else
+					for (jnt_id = 0;
+					     jnt_id <= tdma->max_slot_id;
+					     jnt_id++)
+						if (&tdma->slot_table[jnt_id]
+							     ->local_queue ==
+						    slot->queue) {
+							xnvfile_printf(it,
+								       "%d->%d",
+								       i,
+								       jnt_id);
+							break;
+						}
+
+				slot_offset = slot->offset + 500;
+				do_div(slot_offset, 1000);
+				xnvfile_printf(it, ":%ld:%d/%d:%d  ",
+					       (unsigned long)slot_offset,
+					       slot->phasing + 1, slot->period,
+					       slot->mtu);
+			}
+
+		xnvfile_printf(it, "\n");
+
+	unlock_dev:
+		mutex_unlock(&rtdev->nrt_lock);
+		rtdev_dereference(rtdev);
+	}
+
+	return err;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+int tdma_attach(struct rtnet_device *rtdev, void *priv)
+{
+	struct tdma_priv *tdma = (struct tdma_priv *)priv;
+	int ret;
+
+	memset(tdma, 0, sizeof(struct tdma_priv));
+
+	tdma->magic = TDMA_MAGIC;
+	tdma->rtdev = rtdev;
+
+	rtdm_lock_init(&tdma->lock);
+
+	rtdm_event_init(&tdma->worker_wakeup, 0);
+	rtdm_event_init(&tdma->xmit_event, 0);
+	rtdm_event_init(&tdma->sync_event, 0);
+
+	ret = tdma_dev_init(rtdev, tdma);
+	if (ret < 0)
+		goto err_out1;
+
+	ret = rtdm_task_init(&tdma->worker_task, "rtnet-tdma", tdma_worker,
+			     tdma, DEF_WORKER_PRIO, 0);
+	if (ret != 0)
+		goto err_out2;
+
+	return 0;
+
+err_out2:
+	tdma_dev_release(tdma);
+
+err_out1:
+	rtdm_event_destroy(&tdma->sync_event);
+	rtdm_event_destroy(&tdma->xmit_event);
+	rtdm_event_destroy(&tdma->worker_wakeup);
+
+	return ret;
+}
+
+int tdma_detach(struct rtnet_device *rtdev, void *priv)
+{
+	struct tdma_priv *tdma = (struct tdma_priv *)priv;
+	struct tdma_job *job, *tmp;
+
+	rtdm_event_destroy(&tdma->sync_event);
+	rtdm_event_destroy(&tdma->xmit_event);
+	rtdm_event_destroy(&tdma->worker_wakeup);
+
+	tdma_dev_release(tdma);
+
+	rtdm_task_destroy(&tdma->worker_task);
+
+	list_for_each_entry_safe (job, tmp, &tdma->first_job->entry, entry) {
+		if (job->id >= 0)
+			tdma_cleanup_slot(tdma, SLOT_JOB(job));
+		else if (job->id == XMIT_RPL_CAL) {
+			__list_del(job->entry.prev, job->entry.next);
+			kfree_rtskb(REPLY_CAL_JOB(job)->reply_rtskb);
+		}
+	}
+
+	if (tdma->slot_table)
+		kfree(tdma->slot_table);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	if (test_bit(TDMA_FLAG_MASTER, &tdma->flags))
+		rtskb_pool_release(&tdma->cal_rtskb_pool);
+#endif
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct rtmac_proc_entry tdma_proc_entries[] = {
+	{ name: "tdma", handler: tdma_proc_read },
+	{ name: "tdma_slots", handler: tdma_slots_proc_read },
+};
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct rtmac_disc tdma_disc = {
+	name: "TDMA",
+	priv_size: sizeof(struct tdma_priv),
+	disc_type: __constant_htons(RTMAC_TYPE_TDMA),
+
+	packet_rx: tdma_packet_rx,
+	rt_packet_tx: tdma_rt_packet_tx,
+	nrt_packet_tx: tdma_nrt_packet_tx,
+
+	get_mtu: tdma_get_mtu,
+
+	vnic_xmit: RTMAC_DEFAULT_VNIC,
+
+	attach: tdma_attach,
+	detach: tdma_detach,
+
+	ioctls: {
+		service_name: "RTmac/TDMA",
+		ioctl_type: RTNET_IOC_TYPE_RTMAC_TDMA,
+		handler: tdma_ioctl
+	},
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	proc_entries: tdma_proc_entries,
+	nr_proc_entries: ARRAY_SIZE(tdma_proc_entries),
+#endif /* CONFIG_XENO_OPT_VFILE */
+};
+
+int __init tdma_init(void)
+{
+	int ret;
+
+	printk("RTmac/TDMA: init time division multiple access control "
+	       "mechanism\n");
+
+	ret = rtmac_disc_register(&tdma_disc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+void tdma_release(void)
+{
+	rtmac_disc_deregister(&tdma_disc);
+
+	printk("RTmac/TDMA: unloaded\n");
+}
+
+module_init(tdma_init);
+module_exit(tdma_release);
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/Makefile	2022-03-21 12:58:30.215880907 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_TDMA) += tdma.o
+
+tdma-y := \
+	tdma_dev.o \
+	tdma_ioctl.o \
+	tdma_module.o \
+	tdma_proto.o \
+	tdma_worker.o
+++ linux-patched/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c	2022-03-21 12:58:30.207880985 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/tdma/tdma_proto.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include "asm/div64.h"
+
+#include <rtdev.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/tdma/tdma_proto.h>
+
+void tdma_xmit_sync_frame(struct tdma_priv *tdma)
+{
+	struct rtnet_device *rtdev = tdma->rtdev;
+	struct rtskb *rtskb;
+	struct tdma_frm_sync *sync;
+
+	rtskb = alloc_rtskb(rtdev->hard_header_len + sizeof(struct rtmac_hdr) +
+				    sizeof(struct tdma_frm_sync) + 15,
+			    &global_pool);
+	if (!rtskb)
+		goto err_out;
+
+	rtskb_reserve(rtskb,
+		      (rtdev->hard_header_len + sizeof(struct rtmac_hdr) + 15) &
+			      ~15);
+
+	sync = (struct tdma_frm_sync *)rtskb_put(rtskb,
+						 sizeof(struct tdma_frm_sync));
+
+	if (rtmac_add_header(rtdev, rtdev->broadcast, rtskb, RTMAC_TYPE_TDMA,
+			     0) < 0) {
+		kfree_rtskb(rtskb);
+		goto err_out;
+	}
+
+	sync->head.version = __constant_htons(TDMA_FRM_VERSION);
+	sync->head.id = __constant_htons(TDMA_FRM_SYNC);
+
+	sync->cycle_no = htonl(tdma->current_cycle);
+	sync->xmit_stamp = tdma->clock_offset;
+	sync->sched_xmit_stamp =
+		cpu_to_be64(tdma->clock_offset + tdma->current_cycle_start);
+
+	rtskb->xmit_stamp = &sync->xmit_stamp;
+
+	rtmac_xmit(rtskb);
+
+	/* signal local waiters */
+	rtdm_event_pulse(&tdma->sync_event);
+
+	return;
+
+err_out:
+	/*ERROR*/ rtdm_printk("TDMA: Failed to transmit sync frame!\n");
+	return;
+}
+
+int tdma_xmit_request_cal_frame(struct tdma_priv *tdma, u32 reply_cycle,
+				u64 reply_slot_offset)
+{
+	struct rtnet_device *rtdev = tdma->rtdev;
+	struct rtskb *rtskb;
+	struct tdma_frm_req_cal *req_cal;
+	int ret;
+
+	rtskb = alloc_rtskb(rtdev->hard_header_len + sizeof(struct rtmac_hdr) +
+				    sizeof(struct tdma_frm_req_cal) + 15,
+			    &global_pool);
+	ret = -ENOMEM;
+	if (!rtskb)
+		goto err_out;
+
+	rtskb_reserve(rtskb,
+		      (rtdev->hard_header_len + sizeof(struct rtmac_hdr) + 15) &
+			      ~15);
+
+	req_cal = (struct tdma_frm_req_cal *)rtskb_put(
+		rtskb, sizeof(struct tdma_frm_req_cal));
+
+	if ((ret = rtmac_add_header(rtdev, tdma->master_hw_addr, rtskb,
+				    RTMAC_TYPE_TDMA, 0)) < 0) {
+		kfree_rtskb(rtskb);
+		goto err_out;
+	}
+
+	req_cal->head.version = __constant_htons(TDMA_FRM_VERSION);
+	req_cal->head.id = __constant_htons(TDMA_FRM_REQ_CAL);
+
+	req_cal->xmit_stamp = 0;
+	req_cal->reply_cycle = htonl(reply_cycle);
+	req_cal->reply_slot_offset = cpu_to_be64(reply_slot_offset);
+
+	rtskb->xmit_stamp = &req_cal->xmit_stamp;
+
+	ret = rtmac_xmit(rtskb);
+	if (ret < 0)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	/*ERROR*/ rtdm_printk("TDMA: Failed to transmit request calibration "
+			      "frame!\n");
+	return ret;
+}
+
+int tdma_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+	struct tdma_slot *slot;
+	int ret = 0;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	slot = tdma->slot_table[(rtskb->priority & RTSKB_CHANNEL_MASK) >>
+				RTSKB_CHANNEL_SHIFT];
+
+	if (unlikely(!slot)) {
+		ret = -EAGAIN;
+		goto err_out;
+	}
+
+	if (unlikely(rtskb->len > slot->size)) {
+		ret = -EMSGSIZE;
+		goto err_out;
+	}
+
+	__rtskb_prio_queue_tail(slot->queue, rtskb);
+
+err_out:
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return ret;
+}
+
+int tdma_nrt_packet_tx(struct rtskb *rtskb)
+{
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+	struct tdma_slot *slot;
+	int ret = 0;
+
+	tdma = (struct tdma_priv *)rtskb->rtdev->mac_priv->disc_priv;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	rtskb->priority = RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO, DEFAULT_NRT_SLOT);
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	slot = tdma->slot_table[DEFAULT_NRT_SLOT];
+
+	if (unlikely(!slot)) {
+		ret = -EAGAIN;
+		goto err_out;
+	}
+
+	if (unlikely(rtskb->len > slot->size)) {
+		ret = -EMSGSIZE;
+		goto err_out;
+	}
+
+	__rtskb_prio_queue_tail(slot->queue, rtskb);
+
+err_out:
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return ret;
+}
+
+int tdma_packet_rx(struct rtskb *rtskb)
+{
+	struct tdma_priv *tdma;
+	struct tdma_frm_head *head;
+	u64 delay;
+	u64 cycle_start;
+	nanosecs_rel_t clock_offset;
+	struct rt_proc_call *call;
+	struct tdma_request_cal *req_cal_job;
+	rtdm_lockctx_t context;
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	struct rtskb *reply_rtskb;
+	struct rtnet_device *rtdev;
+	struct tdma_frm_rpl_cal *rpl_cal_frm;
+	struct tdma_reply_cal *rpl_cal_job;
+	struct tdma_job *job;
+#endif
+
+	tdma = (struct tdma_priv *)rtskb->rtdev->mac_priv->disc_priv;
+
+	head = (struct tdma_frm_head *)rtskb->data;
+
+	if (head->version != __constant_htons(TDMA_FRM_VERSION))
+		goto kfree_out;
+
+	switch (head->id) {
+	case __constant_htons(TDMA_FRM_SYNC):
+		rtskb_pull(rtskb, sizeof(struct tdma_frm_sync));
+
+		/* see "Time Arithmetics" in the TDMA specification */
+		clock_offset = be64_to_cpu(SYNC_FRM(head)->xmit_stamp) +
+			       tdma->master_packet_delay_ns;
+		clock_offset -= rtskb->time_stamp;
+
+		cycle_start = be64_to_cpu(SYNC_FRM(head)->sched_xmit_stamp) -
+			      clock_offset;
+
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+		tdma->current_cycle = ntohl(SYNC_FRM(head)->cycle_no);
+		tdma->current_cycle_start = cycle_start;
+		tdma->clock_offset = clock_offset;
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		/* note: Ethernet-specific! */
+		memcpy(tdma->master_hw_addr, rtskb->mac.ethernet->h_source,
+		       ETH_ALEN);
+
+		set_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags);
+
+		rtdm_event_pulse(&tdma->sync_event);
+		break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	case __constant_htons(TDMA_FRM_REQ_CAL):
+		RTNET_ASSERT(test_bit(TDMA_FLAG_MASTER, &tdma->flags) &&
+				     test_bit(TDMA_FLAG_CALIBRATED,
+					      &tdma->flags),
+			     break;);
+
+		rtskb_pull(rtskb, sizeof(struct tdma_frm_req_cal));
+
+		rtdev = rtskb->rtdev;
+
+		reply_rtskb = alloc_rtskb(
+			rtdev->hard_header_len + sizeof(struct rtmac_hdr) +
+				sizeof(struct tdma_frm_rpl_cal) + 15,
+			&tdma->cal_rtskb_pool);
+		if (unlikely(!reply_rtskb)) {
+			/*ERROR*/ rtdm_printk(
+				"TDMA: Too many calibration requests "
+				"pending!\n");
+			break;
+		}
+
+		rtskb_reserve(reply_rtskb, (rtdev->hard_header_len +
+					    sizeof(struct rtmac_hdr) + 15) &
+						   ~15);
+
+		rpl_cal_frm = (struct tdma_frm_rpl_cal *)rtskb_put(
+			reply_rtskb, sizeof(struct tdma_frm_rpl_cal));
+
+		/* note: Ethernet-specific! */
+		if (unlikely(rtmac_add_header(
+				     rtdev, rtskb->mac.ethernet->h_source,
+				     reply_rtskb, RTMAC_TYPE_TDMA, 0) < 0)) {
+			kfree_rtskb(reply_rtskb);
+			break;
+		}
+
+		rpl_cal_frm->head.version = __constant_htons(TDMA_FRM_VERSION);
+		rpl_cal_frm->head.id = __constant_htons(TDMA_FRM_RPL_CAL);
+
+		rpl_cal_frm->request_xmit_stamp = REQ_CAL_FRM(head)->xmit_stamp;
+		rpl_cal_frm->reception_stamp = cpu_to_be64(rtskb->time_stamp);
+		rpl_cal_frm->xmit_stamp = 0;
+
+		reply_rtskb->xmit_stamp = &rpl_cal_frm->xmit_stamp;
+
+		/* use reply_rtskb memory behind the frame as job buffer */
+		rpl_cal_job = (struct tdma_reply_cal *)reply_rtskb->tail;
+		RTNET_ASSERT(reply_rtskb->tail +
+					     sizeof(struct tdma_reply_cal) <=
+				     reply_rtskb->buf_end,
+			     rtskb_over_panic(reply_rtskb,
+					      sizeof(struct tdma_reply_cal),
+					      current_text_addr()););
+
+		rpl_cal_job->head.id = XMIT_RPL_CAL;
+		rpl_cal_job->head.ref_count = 0;
+		rpl_cal_job->reply_cycle =
+			ntohl(REQ_CAL_FRM(head)->reply_cycle);
+		rpl_cal_job->reply_rtskb = reply_rtskb;
+		rpl_cal_job->reply_offset =
+			be64_to_cpu(REQ_CAL_FRM(head)->reply_slot_offset);
+
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+
+		job = tdma->current_job;
+		while (1) {
+			job = list_entry(job->entry.prev, struct tdma_job,
+					 entry);
+			if ((job == tdma->first_job) ||
+			    ((job->id >= 0) && (SLOT_JOB(job)->offset <
+						rpl_cal_job->reply_offset)) ||
+			    ((job->id == XMIT_RPL_CAL) &&
+			     (REPLY_CAL_JOB(job)->reply_offset <
+			      rpl_cal_job->reply_offset)))
+				break;
+		}
+		list_add(&rpl_cal_job->head.entry, &job->entry);
+		tdma->job_list_revision++;
+
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		break;
+#endif
+
+	case __constant_htons(TDMA_FRM_RPL_CAL):
+		rtskb_pull(rtskb, sizeof(struct tdma_frm_rpl_cal));
+
+		/* see "Time Arithmetics" in the TDMA specification */
+		delay = (rtskb->time_stamp -
+			 be64_to_cpu(RPL_CAL_FRM(head)->request_xmit_stamp)) -
+			(be64_to_cpu(RPL_CAL_FRM(head)->xmit_stamp) -
+			 be64_to_cpu(RPL_CAL_FRM(head)->reception_stamp));
+		delay = (delay + 1) >> 1;
+
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+
+		call = tdma->calibration_call;
+		if (call == NULL) {
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+			break;
+		}
+		req_cal_job = rtpc_get_priv(call, struct tdma_request_cal);
+
+		req_cal_job->result_buffer[--req_cal_job->cal_rounds] = delay;
+
+		if (req_cal_job->cal_rounds > 0) {
+			tdma->job_list_revision++;
+			list_add(&req_cal_job->head.entry,
+				 &tdma->first_job->entry);
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		} else {
+			tdma->calibration_call = NULL;
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+			rtpc_complete_call(call, 0);
+		}
+
+		break;
+
+	default:
+		/*ERROR*/ rtdm_printk("TDMA: Unknown frame %d!\n",
+				      ntohs(head->id));
+	}
+
+kfree_out:
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+unsigned int tdma_get_mtu(struct rtnet_device *rtdev, unsigned int priority)
+{
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+	struct tdma_slot *slot;
+	unsigned int mtu;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	slot = tdma->slot_table[(priority & RTSKB_CHANNEL_MASK) >>
+				RTSKB_CHANNEL_SHIFT];
+
+	if (unlikely(!slot)) {
+		mtu = rtdev->mtu;
+		goto out;
+	}
+
+	mtu = slot->mtu;
+
+out:
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return mtu;
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/Kconfig	2022-03-21 12:58:30.200881053 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/rtmac_proto.c	1970-01-01 01:00:00.000000000 +0100
+menuconfig XENO_DRIVERS_NET_RTMAC
+    depends on XENO_DRIVERS_NET
+    tristate "RTmac Layer"
+    default y
+    help
+    The Real-Time Media Access Control layer allows to extend the RTnet
+    stack with software-based access control mechanisms (also called
+    disciplines) for nondeterministic transport media. Disciplines can be
+    attached and detached per real-time device. RTmac also provides a
+    framework for tunnelling non-time-critical packets through real-time
+    networks by installing virtual NICs (VNIC) in the Linux domain.
+
+    See Documentation/README.rtmac for further information.
+
+source "drivers/xenomai/net/stack/rtmac/tdma/Kconfig"
+source "drivers/xenomai/net/stack/rtmac/nomac/Kconfig"
+++ linux-patched/drivers/xenomai/net/stack/rtmac/rtmac_proto.c	2022-03-21 12:58:30.192881131 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/rtmac_syms.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/rtmac_proto.c
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtdm/driver.h>
+#include <stack_mgr.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/rtmac_vnic.h>
+
+int rtmac_proto_rx(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct rtmac_disc *disc = skb->rtdev->mac_disc;
+	struct rtmac_hdr *hdr;
+
+	if (disc == NULL) {
+		goto error;
+	}
+
+	hdr = (struct rtmac_hdr *)skb->data;
+	rtskb_pull(skb, sizeof(struct rtmac_hdr));
+
+	if (hdr->ver != RTMAC_VERSION) {
+		rtdm_printk(
+			"RTmac: received unsupported RTmac protocol version on "
+			"device %s.  Got 0x%x but expected 0x%x\n",
+			skb->rtdev->name, hdr->ver, RTMAC_VERSION);
+		goto error;
+	}
+
+	if (hdr->flags & RTMAC_FLAG_TUNNEL)
+		rtmac_vnic_rx(skb, hdr->type);
+	else if (disc->disc_type == hdr->type)
+		disc->packet_rx(skb);
+	return 0;
+
+error:
+	kfree_rtskb(skb);
+	return 0;
+}
+
+struct rtpacket_type rtmac_packet_type = { .type = __constant_htons(ETH_RTMAC),
+					   .handler = rtmac_proto_rx };
+
+void rtmac_proto_release(void)
+{
+	rtdev_remove_pack(&rtmac_packet_type);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/rtmac_syms.c	2022-03-21 12:58:30.185881199 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/rtmac_disc.c	1970-01-01 01:00:00.000000000 +0100
+/* rtmac_syms.c
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *               2003 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_vnic.h>
+
+EXPORT_SYMBOL_GPL(__rtmac_disc_register);
+EXPORT_SYMBOL_GPL(rtmac_disc_deregister);
+
+EXPORT_SYMBOL_GPL(rtmac_disc_attach);
+EXPORT_SYMBOL_GPL(rtmac_disc_detach);
+
+EXPORT_SYMBOL_GPL(rtmac_vnic_set_max_mtu);
+
+EXPORT_SYMBOL_GPL(rtmac_vnic_xmit);
+++ linux-patched/drivers/xenomai/net/stack/rtmac/rtmac_disc.c	2022-03-21 12:58:30.178881268 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac_disc.c
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+
+#include <rtnet_internal.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proc.h>
+#include <rtmac/rtmac_vnic.h>
+
+static DEFINE_MUTEX(disc_list_lock);
+static LIST_HEAD(disc_list);
+
+/***
+ *  rtmac_disc_attach
+ *
+ *  @rtdev       attaches a discipline to a device
+ *  @disc        discipline to attach
+ *
+ *  0            success
+ *  -EBUSY       other discipline active
+ *  -ENOMEM      could not allocate memory
+ *
+ *  Note: must be called with rtdev->nrt_lock acquired
+ */
+int rtmac_disc_attach(struct rtnet_device *rtdev, struct rtmac_disc *disc)
+{
+	int ret;
+	struct rtmac_priv *priv;
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->attach != NULL, return -EINVAL;);
+
+	if (rtdev->mac_disc) {
+		printk("RTmac: another discipline for rtdev '%s' active.\n",
+		       rtdev->name);
+		return -EBUSY;
+	}
+
+	if (rtdev->flags & IFF_LOOPBACK)
+		return -EINVAL;
+
+	if (!try_module_get(disc->owner))
+		return -EIDRM;
+
+	if (!rtdev_reference(rtdev)) {
+		ret = -EIDRM;
+		goto err_module_put;
+	}
+
+	/* alloc memory */
+	priv = kmalloc(sizeof(struct rtmac_priv) + disc->priv_size, GFP_KERNEL);
+	if (!priv) {
+		printk("RTmac: kmalloc returned NULL for rtmac!\n");
+		return -ENOMEM;
+	}
+	priv->orig_start_xmit = rtdev->start_xmit;
+
+	/* call attach function of discipline */
+	ret = disc->attach(rtdev, priv->disc_priv);
+	if (ret < 0)
+		goto err_kfree_priv;
+
+	/* now attach RTmac to device */
+	rtdev->mac_disc = disc;
+	rtdev->mac_priv = priv;
+	rtdev->start_xmit = disc->rt_packet_tx;
+	if (disc->get_mtu)
+		rtdev->get_mtu = disc->get_mtu;
+	rtdev->mac_detach = rtmac_disc_detach;
+
+	/* create the VNIC */
+	ret = rtmac_vnic_add(rtdev, disc->vnic_xmit);
+	if (ret < 0) {
+		printk("RTmac: Warning, VNIC creation failed for rtdev %s.\n",
+		       rtdev->name);
+		goto err_disc_detach;
+	}
+
+	return 0;
+
+err_disc_detach:
+	disc->detach(rtdev, priv->disc_priv);
+err_kfree_priv:
+	kfree(priv);
+	rtdev_dereference(rtdev);
+err_module_put:
+	module_put(disc->owner);
+	return ret;
+}
+
+/***
+ *  rtmac_disc_detach
+ *
+ *  @rtdev       detaches a discipline from a device
+ *
+ *  0            success
+ *  -1           discipline has no detach function
+ *  -EINVAL      called with rtdev=NULL
+ *  -ENODEV      no discipline active on dev
+ *
+ *  Note: must be called with rtdev->nrt_lock acquired
+ */
+int rtmac_disc_detach(struct rtnet_device *rtdev)
+{
+	int ret;
+	struct rtmac_disc *disc;
+	struct rtmac_priv *priv;
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+
+	disc = rtdev->mac_disc;
+	if (!disc)
+		return -ENODEV;
+
+	RTNET_ASSERT(disc->detach != NULL, return -EINVAL;);
+
+	priv = rtdev->mac_priv;
+	RTNET_ASSERT(priv != NULL, return -EINVAL;);
+
+	ret = rtmac_vnic_unregister(rtdev);
+	if (ret < 0)
+		return ret;
+
+	/* call release function of discipline */
+	ret = disc->detach(rtdev, priv->disc_priv);
+	if (ret < 0)
+		return ret;
+
+	rtmac_vnic_cleanup(rtdev);
+
+	/* restore start_xmit and get_mtu */
+	rtdev->start_xmit = priv->orig_start_xmit;
+	rtdev->get_mtu = rt_hard_mtu;
+
+	/* remove pointers from rtdev */
+	rtdev->mac_disc = NULL;
+	rtdev->mac_priv = NULL;
+	rtdev->mac_detach = NULL;
+
+	rtdev_dereference(rtdev);
+
+	kfree(priv);
+
+	module_put(disc->owner);
+
+	return 0;
+}
+
+static struct rtmac_disc *rtmac_get_disc_by_name(const char *name)
+{
+	struct list_head *disc;
+
+	mutex_lock(&disc_list_lock);
+
+	list_for_each (disc, &disc_list) {
+		if (strcmp(((struct rtmac_disc *)disc)->name, name) == 0) {
+			mutex_unlock(&disc_list_lock);
+			return (struct rtmac_disc *)disc;
+		}
+	}
+
+	mutex_unlock(&disc_list_lock);
+
+	return NULL;
+}
+
+int __rtmac_disc_register(struct rtmac_disc *disc, struct module *module)
+{
+	int ret;
+
+	RTNET_ASSERT(disc != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->name != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->rt_packet_tx != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->nrt_packet_tx != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->attach != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->detach != NULL, return -EINVAL;);
+
+	disc->owner = module;
+
+	if (rtmac_get_disc_by_name(disc->name) != NULL) {
+		printk("RTmac: discipline '%s' already registered!\n",
+		       disc->name);
+		return -EBUSY;
+	}
+
+	ret = rtnet_register_ioctls(&disc->ioctls);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = rtmac_disc_proc_register(disc);
+	if (ret < 0) {
+		rtnet_unregister_ioctls(&disc->ioctls);
+		return ret;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	mutex_lock(&disc_list_lock);
+
+	list_add(&disc->list, &disc_list);
+
+	mutex_unlock(&disc_list_lock);
+
+	return 0;
+}
+
+void rtmac_disc_deregister(struct rtmac_disc *disc)
+{
+	RTNET_ASSERT(disc != NULL, return;);
+
+	mutex_lock(&disc_list_lock);
+
+	list_del(&disc->list);
+
+	mutex_unlock(&disc_list_lock);
+
+	rtnet_unregister_ioctls(&disc->ioctls);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtmac_disc_proc_unregister(disc);
+#endif /* CONFIG_XENO_OPT_VFILE */
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_disciplines_show(struct xnvfile_regular_iterator *it, void *d)
+{
+	struct rtmac_disc *disc;
+	int err;
+
+	err = mutex_lock_interruptible(&disc_list_lock);
+	if (err < 0)
+		return err;
+
+	xnvfile_printf(it, "Name\t\tID\n");
+
+	list_for_each_entry (disc, &disc_list, list)
+		xnvfile_printf(it, "%-15s %04X\n", disc->name,
+			       ntohs(disc->disc_type));
+
+	mutex_unlock(&disc_list_lock);
+
+	return 0;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+++ linux-patched/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c	2022-03-21 12:58:30.170881345 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/nomac/nomac_proto.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+
+#include <rtdev.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/nomac/nomac.h>
+
+static struct rtskb_queue nrt_rtskb_queue;
+static rtdm_task_t wrapper_task;
+static rtdm_event_t wakeup_sem;
+
+int nomac_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	/* unused here, just to demonstrate access to the discipline state
+    struct nomac_priv   *nomac =
+        (struct nomac_priv *)rtdev->mac_priv->disc_priv; */
+	int ret;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	/* no MAC: we simply transmit the packet under xmit_lock */
+	rtdm_mutex_lock(&rtdev->xmit_mutex);
+	ret = rtmac_xmit(rtskb);
+	rtdm_mutex_unlock(&rtdev->xmit_mutex);
+
+	return ret;
+}
+
+int nomac_nrt_packet_tx(struct rtskb *rtskb)
+{
+	struct rtnet_device *rtdev = rtskb->rtdev;
+	/* unused here, just to demonstrate access to the discipline state
+    struct nomac_priv   *nomac =
+        (struct nomac_priv *)rtdev->mac_priv->disc_priv; */
+	int ret;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	/* note: this routine may be called both in rt and non-rt context
+     *       => detect and wrap the context if necessary */
+	if (!rtdm_in_rt_context()) {
+		rtskb_queue_tail(&nrt_rtskb_queue, rtskb);
+		rtdm_event_signal(&wakeup_sem);
+		return 0;
+	} else {
+		/* no MAC: we simply transmit the packet under xmit_lock */
+		rtdm_mutex_lock(&rtdev->xmit_mutex);
+		ret = rtmac_xmit(rtskb);
+		rtdm_mutex_unlock(&rtdev->xmit_mutex);
+
+		return ret;
+	}
+}
+
+void nrt_xmit_task(void *arg)
+{
+	struct rtskb *rtskb;
+	struct rtnet_device *rtdev;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&wakeup_sem) < 0)
+			break;
+
+		while ((rtskb = rtskb_dequeue(&nrt_rtskb_queue))) {
+			rtdev = rtskb->rtdev;
+
+			/* no MAC: we simply transmit the packet under xmit_lock */
+			rtdm_mutex_lock(&rtdev->xmit_mutex);
+			rtmac_xmit(rtskb);
+			rtdm_mutex_unlock(&rtdev->xmit_mutex);
+		}
+	}
+}
+
+int nomac_packet_rx(struct rtskb *rtskb)
+{
+	/* actually, NoMAC doesn't expect any control packet */
+	kfree_rtskb(rtskb);
+
+	return 0;
+}
+
+int __init nomac_proto_init(void)
+{
+	int ret;
+
+	rtskb_queue_init(&nrt_rtskb_queue);
+	rtdm_event_init(&wakeup_sem, 0);
+
+	ret = rtdm_task_init(&wrapper_task, "rtnet-nomac", nrt_xmit_task, 0,
+			     RTDM_TASK_LOWEST_PRIORITY, 0);
+	if (ret < 0) {
+		rtdm_event_destroy(&wakeup_sem);
+		return ret;
+	}
+
+	return 0;
+}
+
+void nomac_proto_cleanup(void)
+{
+	rtdm_event_destroy(&wakeup_sem);
+	rtdm_task_destroy(&wrapper_task);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c	2022-03-21 12:58:30.163881414 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/nomac/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/nomac/nomac_ioctl.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <nomac_chrdev.h>
+#include <rtmac/nomac/nomac.h>
+
+static int nomac_ioctl_attach(struct rtnet_device *rtdev)
+{
+	struct nomac_priv *nomac;
+	int ret;
+
+	if (rtdev->mac_priv == NULL) {
+		ret = rtmac_disc_attach(rtdev, &nomac_disc);
+		if (ret < 0)
+			return ret;
+	}
+
+	nomac = (struct nomac_priv *)rtdev->mac_priv->disc_priv;
+	if (nomac->magic != NOMAC_MAGIC)
+		return -ENOTTY;
+
+	/* ... */
+
+	return 0;
+}
+
+static int nomac_ioctl_detach(struct rtnet_device *rtdev)
+{
+	struct nomac_priv *nomac;
+	int ret;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	nomac = (struct nomac_priv *)rtdev->mac_priv->disc_priv;
+	if (nomac->magic != NOMAC_MAGIC)
+		return -ENOTTY;
+
+	ret = rtmac_disc_detach(rtdev);
+
+	/* ... */
+
+	return ret;
+}
+
+int nomac_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		unsigned long arg)
+{
+	struct nomac_config cfg;
+	int ret;
+
+	ret = copy_from_user(&cfg, (void *)arg, sizeof(cfg));
+	if (ret != 0)
+		return -EFAULT;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	switch (request) {
+	case NOMAC_IOC_ATTACH:
+		ret = nomac_ioctl_attach(rtdev);
+		break;
+
+	case NOMAC_IOC_DETACH:
+		ret = nomac_ioctl_detach(rtdev);
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/nomac/Kconfig	2022-03-21 12:58:30.155881492 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_NOMAC
+    tristate "NoMAC discipline for RTmac"
+    depends on XENO_DRIVERS_NET_RTMAC
+    default n
+    help
+    This no-operation RTmac discipline is intended to act as a template
+    for new implementations. However, it can be compiled and used (see
+    nomaccfg management tool), but don't expect any improved determinism
+    of your network. ;)
+++ linux-patched/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c	2022-03-21 12:58:30.148881560 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/nomac/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/nomac/nomac_dev.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtmac.h>
+#include <rtmac/nomac/nomac.h>
+
+static int nomac_dev_openclose(void)
+{
+	return 0;
+}
+
+static int nomac_dev_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct nomac_priv *nomac;
+
+	nomac = container_of(rtdm_fd_to_context(fd)->device, struct nomac_priv,
+			     api_device);
+
+	switch (request) {
+	case RTMAC_RTIOC_TIMEOFFSET:
+
+	case RTMAC_RTIOC_WAITONCYCLE:
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static struct rtdm_driver
+	nomac_driver = { .profile_info = RTDM_PROFILE_INFO(
+				 nomac, RTDM_CLASS_RTMAC,
+				 RTDM_SUBCLASS_UNMANAGED, RTNET_RTDM_VER),
+			 .device_flags = RTDM_NAMED_DEVICE,
+			 .device_count = 1,
+			 .context_size = 0,
+			 .ops = {
+				 .open = (typeof(nomac_driver.ops.open))
+					 nomac_dev_openclose,
+				 .ioctl_rt = nomac_dev_ioctl,
+				 .ioctl_nrt = nomac_dev_ioctl,
+				 .close = (typeof(nomac_driver.ops.close))
+					 nomac_dev_openclose,
+			 } };
+
+int nomac_dev_init(struct rtnet_device *rtdev, struct nomac_priv *nomac)
+{
+	char *pos;
+
+	strcpy(nomac->device_name, "NOMAC");
+	for (pos = rtdev->name + strlen(rtdev->name) - 1;
+	     (pos >= rtdev->name) && ((*pos) >= '0') && (*pos <= '9'); pos--)
+		;
+	strncat(nomac->device_name + 5, pos + 1, IFNAMSIZ - 5);
+
+	nomac->api_driver = nomac_driver;
+	nomac->api_device.driver = &nomac->api_driver;
+	nomac->api_device.label = nomac->device_name;
+
+	return rtdm_dev_register(&nomac->api_device);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtmac/nomac/Makefile	2022-03-21 12:58:30.141881628 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_NOMAC) += nomac.o
+
+nomac-y := \
+	nomac_dev.o \
+	nomac_ioctl.o \
+	nomac_module.o \
+	nomac_proto.o
+++ linux-patched/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c	2022-03-21 12:58:30.133881706 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac/nomac/nomac_module.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <rtdm/driver.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/nomac/nomac.h>
+#include <rtmac/nomac/nomac_dev.h>
+#include <rtmac/nomac/nomac_ioctl.h>
+#include <rtmac/nomac/nomac_proto.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+LIST_HEAD(nomac_devices);
+DEFINE_MUTEX(nomac_nrt_lock);
+
+int nomac_proc_read(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct nomac_priv *entry;
+
+	mutex_lock(&nomac_nrt_lock);
+
+	xnvfile_printf(it, "Interface       API Device      State\n");
+
+	list_for_each_entry (entry, &nomac_devices, list_entry)
+		xnvfile_printf(it, "%-15s %-15s Attached\n", entry->rtdev->name,
+			       entry->api_device.name);
+
+	mutex_unlock(&nomac_nrt_lock);
+
+	return 0;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+int nomac_attach(struct rtnet_device *rtdev, void *priv)
+{
+	struct nomac_priv *nomac = (struct nomac_priv *)priv;
+	int ret;
+
+	nomac->magic = NOMAC_MAGIC;
+	nomac->rtdev = rtdev;
+
+	/* ... */
+
+	ret = nomac_dev_init(rtdev, nomac);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	mutex_lock(&nomac_nrt_lock);
+	list_add(&nomac->list_entry, &nomac_devices);
+	mutex_unlock(&nomac_nrt_lock);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	return 0;
+}
+
+int nomac_detach(struct rtnet_device *rtdev, void *priv)
+{
+	struct nomac_priv *nomac = (struct nomac_priv *)priv;
+
+	nomac_dev_release(nomac);
+
+	/* ... */
+#ifdef CONFIG_XENO_OPT_VFILE
+	mutex_lock(&nomac_nrt_lock);
+	list_del(&nomac->list_entry);
+	mutex_unlock(&nomac_nrt_lock);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct rtmac_proc_entry nomac_proc_entries[] = {
+	{ name: "nomac", handler: nomac_proc_read },
+};
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct rtmac_disc nomac_disc = {
+	name: "NoMAC",
+	priv_size: sizeof(struct nomac_priv),
+	disc_type: __constant_htons(RTMAC_TYPE_NOMAC),
+
+	packet_rx: nomac_packet_rx,
+	rt_packet_tx: nomac_rt_packet_tx,
+	nrt_packet_tx: nomac_nrt_packet_tx,
+
+	get_mtu: NULL,
+
+	vnic_xmit: RTMAC_DEFAULT_VNIC,
+
+	attach: nomac_attach,
+	detach: nomac_detach,
+
+	ioctls: {
+		service_name: "RTmac/NoMAC",
+		ioctl_type: RTNET_IOC_TYPE_RTMAC_NOMAC,
+		handler: nomac_ioctl
+	},
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	proc_entries: nomac_proc_entries,
+	nr_proc_entries: ARRAY_SIZE(nomac_proc_entries),
+#endif /* CONFIG_XENO_OPT_VFILE */
+};
+
+int __init nomac_init(void)
+{
+	int ret;
+
+	printk("RTmac/NoMAC: init void media access control mechanism\n");
+
+	ret = nomac_proto_init();
+	if (ret < 0)
+		return ret;
+
+	ret = rtmac_disc_register(&nomac_disc);
+	if (ret < 0) {
+		nomac_proto_cleanup();
+		return ret;
+	}
+
+	return 0;
+}
+
+void nomac_release(void)
+{
+	rtmac_disc_deregister(&nomac_disc);
+	nomac_proto_cleanup();
+
+	printk("RTmac/NoMAC: unloaded\n");
+}
+
+module_init(nomac_init);
+module_exit(nomac_release);
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/net/stack/rtmac/Makefile	2022-03-21 12:58:30.126881775 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/rtmac_proc.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_NOMAC) += nomac/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_TDMA) += tdma/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTMAC) += rtmac.o
+
+rtmac-y := \
+	rtmac_disc.o \
+	rtmac_module.o \
+	rtmac_proc.o \
+	rtmac_proto.o \
+	rtmac_syms.o \
+	rtmac_vnic.o
+++ linux-patched/drivers/xenomai/net/stack/rtmac/rtmac_proc.c	2022-03-21 12:58:30.118881853 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtmac/rtmac_module.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtmac_proc.c
+ *
+ *  rtmac - real-time networking medium access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <rtnet_internal.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/rtmac_proc.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_directory rtmac_proc_root;
+
+static struct xnvfile_regular_ops rtnet_rtmac_disciplines_vfile_ops = {
+	.show = rtnet_rtmac_disciplines_show,
+};
+
+static struct xnvfile_regular rtnet_rtmac_disciplines_vfile = {
+	.ops = &rtnet_rtmac_disciplines_vfile_ops,
+};
+
+static struct xnvfile_regular_ops rtnet_rtmac_vnics_vfile_ops = {
+	.show = rtnet_rtmac_vnics_show,
+};
+
+static struct xnvfile_regular rtnet_rtmac_vnics_vfile = {
+	.ops = &rtnet_rtmac_vnics_vfile_ops,
+};
+
+static int rtnet_rtmac_disc_show(struct xnvfile_regular_iterator *it,
+				 void *data)
+{
+	struct rtmac_proc_entry *entry;
+	entry = container_of(it->vfile, struct rtmac_proc_entry, vfile);
+	return entry->handler(it, data);
+}
+
+static struct xnvfile_regular_ops rtnet_rtmac_disc_vfile_ops = {
+	.show = rtnet_rtmac_disc_show,
+};
+
+int rtmac_disc_proc_register(struct rtmac_disc *disc)
+{
+	int i, err;
+	struct rtmac_proc_entry *entry;
+
+	for (i = 0; i < disc->nr_proc_entries; i++) {
+		entry = &disc->proc_entries[i];
+
+		entry->vfile.ops = &rtnet_rtmac_disc_vfile_ops;
+		err = xnvfile_init_regular(entry->name, &entry->vfile,
+					   &rtmac_proc_root);
+		if (err < 0) {
+			while (--i >= 0)
+				xnvfile_destroy_regular(
+					&disc->proc_entries[i].vfile);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+void rtmac_disc_proc_unregister(struct rtmac_disc *disc)
+{
+	int i;
+
+	for (i = 0; i < disc->nr_proc_entries; i++)
+		xnvfile_destroy_regular(&disc->proc_entries[i].vfile);
+}
+
+int rtmac_proc_register(void)
+{
+	int err;
+
+	err = xnvfile_init_dir("rtmac", &rtmac_proc_root, &rtnet_proc_root);
+	if (err < 0)
+		goto err1;
+
+	err = xnvfile_init_regular("disciplines",
+				   &rtnet_rtmac_disciplines_vfile,
+				   &rtmac_proc_root);
+	if (err < 0)
+		goto err2;
+
+	err = xnvfile_init_regular("vnics", &rtnet_rtmac_vnics_vfile,
+				   &rtmac_proc_root);
+	if (err < 0)
+		goto err3;
+
+	return 0;
+
+err3:
+	xnvfile_destroy_regular(&rtnet_rtmac_disciplines_vfile);
+
+err2:
+	xnvfile_destroy_dir(&rtmac_proc_root);
+
+err1:
+	/*ERRMSG*/ printk("RTmac: unable to initialize /proc entries\n");
+	return err;
+}
+
+void rtmac_proc_release(void)
+{
+	xnvfile_destroy_regular(&rtnet_rtmac_vnics_vfile);
+	xnvfile_destroy_regular(&rtnet_rtmac_disciplines_vfile);
+	xnvfile_destroy_dir(&rtmac_proc_root);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+++ linux-patched/drivers/xenomai/net/stack/rtmac/rtmac_module.c	2022-03-21 12:58:30.111881921 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/iovec.c	1970-01-01 01:00:00.000000000 +0100
+/* rtmac_module.c
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *               2003 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <rtdm/driver.h>
+
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proc.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/rtmac_vnic.h>
+
+int __init rtmac_init(void)
+{
+	int ret = 0;
+
+	printk("RTmac: init realtime media access control\n");
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = rtmac_proc_register();
+	if (ret < 0)
+		return ret;
+#endif
+
+	ret = rtmac_vnic_module_init();
+	if (ret < 0)
+		goto error1;
+
+	ret = rtmac_proto_init();
+	if (ret < 0)
+		goto error2;
+
+	return 0;
+
+error2:
+	rtmac_vnic_module_cleanup();
+
+error1:
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtmac_proc_release();
+#endif
+	return ret;
+}
+
+void rtmac_release(void)
+{
+	rtmac_proto_release();
+	rtmac_vnic_module_cleanup();
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtmac_proc_release();
+#endif
+
+	printk("RTmac: unloaded\n");
+}
+
+module_init(rtmac_init);
+module_exit(rtmac_release);
+
+MODULE_AUTHOR("Marc Kleine-Budde, Jan Kiszka");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/net/stack/iovec.c	2022-03-21 12:58:30.103881999 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/iovec.c
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *                2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <rtdm/driver.h>
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+
+ssize_t rtnet_write_to_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			   const void *data, size_t len)
+{
+	ssize_t ret = 0;
+	size_t nbytes;
+	int n;
+
+	for (n = 0; len > 0 && n < iovlen; n++, iov++) {
+		if (iov->iov_len == 0)
+			continue;
+
+		nbytes = iov->iov_len;
+		if (nbytes > len)
+			nbytes = len;
+
+		ret = rtnet_put_arg(fd, iov->iov_base, data, nbytes);
+		if (ret)
+			break;
+
+		len -= nbytes;
+		data += nbytes;
+		iov->iov_len -= nbytes;
+		iov->iov_base += nbytes;
+		ret += nbytes;
+		if (ret < 0) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtnet_write_to_iov);
+
+ssize_t rtnet_read_from_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			    void *data, size_t len)
+{
+	ssize_t ret = 0;
+	size_t nbytes;
+	int n;
+
+	for (n = 0; len > 0 && n < iovlen; n++, iov++) {
+		if (iov->iov_len == 0)
+			continue;
+
+		nbytes = iov->iov_len;
+		if (nbytes > len)
+			nbytes = len;
+
+		if (!rtdm_fd_is_user(fd))
+			memcpy(data, iov->iov_base, nbytes);
+		else {
+			ret = rtdm_copy_from_user(fd, data, iov->iov_base,
+						  nbytes);
+			if (ret)
+				break;
+		}
+
+		len -= nbytes;
+		data += nbytes;
+		iov->iov_len -= nbytes;
+		iov->iov_base += nbytes;
+		ret += nbytes;
+		if (ret < 0) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtnet_read_from_iov);
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c	2022-03-21 12:58:30.093882096 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *	rtcfg/rtcfg_proc.c
+ *
+ *	Real-Time Configuration Distribution Protocol
+ *
+ *	Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the Free Software
+ *	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_port.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+DEFINE_MUTEX(nrt_proc_lock);
+static struct xnvfile_directory rtcfg_proc_root;
+
+static int rtnet_rtcfg_proc_lock_get(struct xnvfile *vfile)
+{
+	return mutex_lock_interruptible(&nrt_proc_lock);
+}
+
+static void rtnet_rtcfg_proc_lock_put(struct xnvfile *vfile)
+{
+	return mutex_unlock(&nrt_proc_lock);
+}
+
+static struct xnvfile_lock_ops rtnet_rtcfg_proc_lock_ops = {
+	.get = rtnet_rtcfg_proc_lock_get,
+	.put = rtnet_rtcfg_proc_lock_put,
+};
+
+int rtnet_rtcfg_dev_state_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct rtcfg_device *rtcfg_dev = xnvfile_priv(it->vfile);
+	const char *state_name[] = { "OFF",
+				     "SERVER_RUNNING",
+				     "CLIENT_0",
+				     "CLIENT_1",
+				     "CLIENT_ANNOUNCED",
+				     "CLIENT_ALL_KNOWN",
+				     "CLIENT_ALL_FRAMES",
+				     "CLIENT_2",
+				     "CLIENT_READY" };
+
+	xnvfile_printf(it,
+		       "state:\t\t\t%d (%s)\n"
+		       "flags:\t\t\t%08lX\n"
+		       "other stations:\t\t%d\n"
+		       "stations found:\t\t%d\n"
+		       "stations ready:\t\t%d\n",
+		       rtcfg_dev->state, state_name[rtcfg_dev->state],
+		       rtcfg_dev->flags, rtcfg_dev->other_stations,
+		       rtcfg_dev->stations_found, rtcfg_dev->stations_ready);
+
+	if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+		xnvfile_printf(it,
+			       "configured clients:\t%d\n"
+			       "burstrate:\t\t%d\n"
+			       "heartbeat period:\t%d ms\n",
+			       rtcfg_dev->spec.srv.clients_configured,
+			       rtcfg_dev->burstrate,
+			       rtcfg_dev->spec.srv.heartbeat);
+	} else if (rtcfg_dev->state != RTCFG_MAIN_OFF) {
+		xnvfile_printf(
+			it,
+			"address type:\t\t%d\n"
+			"server address:\t\t%02X:%02X:%02X:%02X:%02X:%02X\n"
+			"stage 2 config:\t\t%d/%d\n",
+			rtcfg_dev->spec.clt.addr_type,
+			rtcfg_dev->spec.clt.srv_mac_addr[0],
+			rtcfg_dev->spec.clt.srv_mac_addr[1],
+			rtcfg_dev->spec.clt.srv_mac_addr[2],
+			rtcfg_dev->spec.clt.srv_mac_addr[3],
+			rtcfg_dev->spec.clt.srv_mac_addr[4],
+			rtcfg_dev->spec.clt.srv_mac_addr[5],
+			rtcfg_dev->spec.clt.cfg_offs,
+			rtcfg_dev->spec.clt.cfg_len);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtcfg_dev_state_vfile_ops = {
+	.show = rtnet_rtcfg_dev_state_show,
+};
+
+int rtnet_rtcfg_dev_stations_show(struct xnvfile_regular_iterator *it, void *d)
+{
+	struct rtcfg_device *rtcfg_dev = xnvfile_priv(it->vfile);
+	struct rtcfg_connection *conn;
+	struct rtcfg_station *station;
+	int i;
+
+	if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+		list_for_each_entry (conn, &rtcfg_dev->spec.srv.conn_list,
+				     entry) {
+			if ((conn->state != RTCFG_CONN_SEARCHING) &&
+			    (conn->state != RTCFG_CONN_DEAD))
+				xnvfile_printf(
+					it,
+					"%02X:%02X:%02X:%02X:%02X:%02X\t%02X\n",
+					conn->mac_addr[0], conn->mac_addr[1],
+					conn->mac_addr[2], conn->mac_addr[3],
+					conn->mac_addr[4], conn->mac_addr[5],
+					conn->flags);
+		}
+	} else if (rtcfg_dev->spec.clt.station_addr_list) {
+		for (i = 0; i < rtcfg_dev->stations_found; i++) {
+			station = &rtcfg_dev->spec.clt.station_addr_list[i];
+
+			xnvfile_printf(
+				it, "%02X:%02X:%02X:%02X:%02X:%02X\t%02X\n",
+				station->mac_addr[0], station->mac_addr[1],
+				station->mac_addr[2], station->mac_addr[3],
+				station->mac_addr[4], station->mac_addr[5],
+				station->flags);
+		}
+	}
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtcfg_dev_stations_vfile_ops = {
+	.show = rtnet_rtcfg_dev_stations_show,
+};
+
+int rtnet_rtcfg_dev_conn_state_show(struct xnvfile_regular_iterator *it,
+				    void *d)
+{
+	struct rtcfg_connection *conn = xnvfile_priv(it->vfile);
+	char *state_name[] = { "SEARCHING", "STAGE_1", "STAGE_2", "READY",
+			       "DEAD" };
+
+	xnvfile_printf(it,
+		       "state:\t\t\t%d (%s)\n"
+		       "flags:\t\t\t%02X\n"
+		       "stage 1 size:\t\t%zd\n"
+		       "stage 2 filename:\t%s\n"
+		       "stage 2 size:\t\t%zd\n"
+		       "stage 2 offset:\t\t%d\n"
+		       "burstrate:\t\t%d\n"
+		       "mac address:\t\t%02X:%02X:%02X:%02X:%02X:%02X\n",
+		       conn->state, state_name[conn->state], conn->flags,
+		       conn->stage1_size,
+		       (conn->stage2_file) ? conn->stage2_file->name : "-",
+		       (conn->stage2_file) ? conn->stage2_file->size : 0,
+		       conn->cfg_offs, conn->burstrate, conn->mac_addr[0],
+		       conn->mac_addr[1], conn->mac_addr[2], conn->mac_addr[3],
+		       conn->mac_addr[4], conn->mac_addr[5]);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if ((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP)
+		xnvfile_printf(it, "ip:\t\t\t%u.%u.%u.%u\n",
+			       NIPQUAD(conn->addr.ip_addr));
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtcfg_dev_conn_state_vfile_ops = {
+	.show = rtnet_rtcfg_dev_conn_state_show,
+};
+
+void rtcfg_update_conn_proc_entries(int ifindex)
+{
+	struct rtcfg_device *dev = &device[ifindex];
+	struct rtcfg_connection *conn;
+	char name_buf[64];
+
+	if (dev->state != RTCFG_MAIN_SERVER_RUNNING)
+		return;
+
+	list_for_each_entry (conn, &dev->spec.srv.conn_list, entry) {
+		switch (conn->addr_type & RTCFG_ADDR_MASK) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		case RTCFG_ADDR_IP:
+			snprintf(name_buf, 64, "CLIENT_%u.%u.%u.%u",
+				 NIPQUAD(conn->addr.ip_addr));
+			break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+		default: /* RTCFG_ADDR_MAC */
+			snprintf(name_buf, 64,
+				 "CLIENT_%02X%02X%02X%02X%02X%02X",
+				 conn->mac_addr[0], conn->mac_addr[1],
+				 conn->mac_addr[2], conn->mac_addr[3],
+				 conn->mac_addr[4], conn->mac_addr[5]);
+			break;
+		}
+		memset(&conn->proc_entry, '\0', sizeof(conn->proc_entry));
+		conn->proc_entry.entry.lockops = &rtnet_rtcfg_proc_lock_ops;
+		conn->proc_entry.ops = &rtnet_rtcfg_dev_conn_state_vfile_ops;
+		xnvfile_priv(&conn->proc_entry) = conn;
+
+		xnvfile_init_regular(name_buf, &conn->proc_entry,
+				     &dev->proc_entry);
+	}
+}
+
+void rtcfg_remove_conn_proc_entries(int ifindex)
+{
+	struct rtcfg_device *dev = &device[ifindex];
+	struct rtcfg_connection *conn;
+
+	if (dev->state != RTCFG_MAIN_SERVER_RUNNING)
+		return;
+
+	list_for_each_entry (conn, &dev->spec.srv.conn_list, entry)
+		xnvfile_destroy_regular(&conn->proc_entry);
+}
+
+void rtcfg_new_rtdev(struct rtnet_device *rtdev)
+{
+	struct rtcfg_device *dev = &device[rtdev->ifindex];
+	int err;
+
+	mutex_lock(&nrt_proc_lock);
+
+	memset(&dev->proc_entry, '\0', sizeof(dev->proc_entry));
+	err = xnvfile_init_dir(rtdev->name, &dev->proc_entry, &rtcfg_proc_root);
+	if (err < 0)
+		goto error1;
+
+	memset(&dev->proc_state_vfile, '\0', sizeof(dev->proc_state_vfile));
+	dev->proc_state_vfile.entry.lockops = &rtnet_rtcfg_proc_lock_ops;
+	dev->proc_state_vfile.ops = &rtnet_rtcfg_dev_state_vfile_ops;
+	xnvfile_priv(&dev->proc_state_vfile) = dev;
+
+	err = xnvfile_init_regular("state", &dev->proc_state_vfile,
+				   &dev->proc_entry);
+	if (err < 0)
+		goto error2;
+
+	memset(&dev->proc_stations_vfile, '\0',
+	       sizeof(dev->proc_stations_vfile));
+	dev->proc_stations_vfile.entry.lockops = &rtnet_rtcfg_proc_lock_ops;
+	dev->proc_stations_vfile.ops = &rtnet_rtcfg_dev_stations_vfile_ops;
+	xnvfile_priv(&dev->proc_stations_vfile) = dev;
+
+	err = xnvfile_init_regular("stations_list", &dev->proc_stations_vfile,
+				   &dev->proc_entry);
+	if (err < 0)
+		goto error3;
+
+	mutex_unlock(&nrt_proc_lock);
+
+	return;
+
+error3:
+	xnvfile_destroy_regular(&dev->proc_state_vfile);
+error2:
+	xnvfile_destroy_dir(&dev->proc_entry);
+error1:
+	dev->proc_entry.entry.pde = NULL;
+	mutex_unlock(&nrt_proc_lock);
+}
+
+void rtcfg_remove_rtdev(struct rtnet_device *rtdev)
+{
+	struct rtcfg_device *dev = &device[rtdev->ifindex];
+
+	// To-Do: issue down command
+
+	mutex_lock(&nrt_proc_lock);
+
+	if (dev->proc_entry.entry.pde) {
+		rtcfg_remove_conn_proc_entries(rtdev->ifindex);
+
+		xnvfile_destroy_regular(&dev->proc_stations_vfile);
+		xnvfile_destroy_regular(&dev->proc_state_vfile);
+		xnvfile_destroy_dir(&dev->proc_entry);
+		dev->proc_entry.entry.pde = NULL;
+	}
+
+	mutex_unlock(&nrt_proc_lock);
+}
+
+static struct rtdev_event_hook rtdev_hook = { .register_device =
+						      rtcfg_new_rtdev,
+					      .unregister_device =
+						      rtcfg_remove_rtdev,
+					      .ifup = NULL,
+					      .ifdown = NULL };
+
+int rtcfg_init_proc(void)
+{
+	struct rtnet_device *rtdev;
+	int i, err;
+
+	err = xnvfile_init_dir("rtcfg", &rtcfg_proc_root, &rtnet_proc_root);
+	if (err < 0)
+		goto err1;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev) {
+			rtcfg_new_rtdev(rtdev);
+			rtdev_dereference(rtdev);
+		}
+	}
+
+	rtdev_add_event_hook(&rtdev_hook);
+	return 0;
+
+err1:
+	printk("RTcfg: unable to initialise /proc entries\n");
+	return err;
+}
+
+void rtcfg_cleanup_proc(void)
+{
+	struct rtnet_device *rtdev;
+	int i;
+
+	rtdev_del_event_hook(&rtdev_hook);
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev) {
+			rtcfg_remove_rtdev(rtdev);
+			rtdev_dereference(rtdev);
+		}
+	}
+
+	xnvfile_destroy_dir(&rtcfg_proc_root);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c	2022-03-21 12:58:30.086882165 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_ioctl.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+
+#include <rtcfg_chrdev.h>
+#include <rtnet_rtpc.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_proc.h>
+
+int rtcfg_event_handler(struct rt_proc_call *call)
+{
+	struct rtcfg_cmd *cmd_event;
+
+	cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+	return rtcfg_do_main_event(cmd_event->internal.data.ifindex,
+				   cmd_event->internal.data.event_id, call);
+}
+
+void keep_cmd_add(struct rt_proc_call *call, void *priv_data)
+{
+	/* do nothing on error (<0), or if file already present (=0) */
+	if (rtpc_get_result(call) <= 0)
+		return;
+
+	/* Don't cleanup any buffers, we are going to recycle them! */
+	rtpc_set_cleanup_handler(call, NULL);
+}
+
+void cleanup_cmd_add(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *buf;
+
+	/* unlock proc and update directory structure */
+	rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+
+	buf = cmd->args.add.conn_buf;
+	if (buf != NULL)
+		kfree(buf);
+
+	buf = cmd->args.add.stage1_data;
+	if (buf != NULL)
+		kfree(buf);
+
+	if (cmd->args.add.stage2_file != NULL) {
+		buf = cmd->args.add.stage2_file->buffer;
+		if (buf != NULL)
+			vfree(buf);
+		kfree(cmd->args.add.stage2_file);
+	}
+}
+
+void cleanup_cmd_del(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *buf;
+
+	/* unlock proc and update directory structure */
+	rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+
+	if (cmd->args.del.conn_buf != NULL) {
+		buf = cmd->args.del.conn_buf->stage1_data;
+		if (buf != NULL)
+			kfree(buf);
+		kfree(cmd->args.del.conn_buf);
+	}
+
+	if (cmd->args.del.stage2_file != NULL) {
+		buf = cmd->args.del.stage2_file->buffer;
+		if (buf != NULL)
+			vfree(buf);
+		kfree(cmd->args.del.stage2_file);
+	}
+}
+
+void copy_stage_1_data(struct rt_proc_call *call, void *priv_data)
+{
+	struct rtcfg_cmd *cmd;
+	int result = rtpc_get_result(call);
+
+	if (result <= 0)
+		return;
+
+	cmd = rtpc_get_priv(call, struct rtcfg_cmd);
+
+	if (cmd->args.client.buffer_size < (size_t)result)
+		rtpc_set_result(call, -ENOSPC);
+	else if (copy_to_user(cmd->args.client.buffer,
+			      cmd->args.client.rtskb->data, result) != 0)
+		rtpc_set_result(call, -EFAULT);
+}
+
+void cleanup_cmd_client(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *station_buf;
+	struct rtskb *rtskb;
+
+	station_buf = cmd->args.client.station_buf;
+	if (station_buf != NULL)
+		kfree(station_buf);
+
+	rtskb = cmd->args.client.rtskb;
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+void copy_stage_2_data(struct rt_proc_call *call, void *priv_data)
+{
+	struct rtcfg_cmd *cmd;
+	int result = rtpc_get_result(call);
+	struct rtskb *rtskb;
+
+	if (result <= 0)
+		return;
+
+	cmd = rtpc_get_priv(call, struct rtcfg_cmd);
+
+	if (cmd->args.announce.buffer_size < (size_t)result)
+		rtpc_set_result(call, -ENOSPC);
+	else {
+		rtskb = cmd->args.announce.rtskb;
+		do {
+			if (copy_to_user(cmd->args.announce.buffer, rtskb->data,
+					 rtskb->len) != 0) {
+				rtpc_set_result(call, -EFAULT);
+				break;
+			}
+			cmd->args.announce.buffer += rtskb->len;
+			rtskb = rtskb->next;
+		} while (rtskb != NULL);
+	}
+}
+
+void cleanup_cmd_announce(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	struct rtskb *rtskb;
+
+	rtskb = cmd->args.announce.rtskb;
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+void cleanup_cmd_detach(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *buf;
+
+	/* unlock proc and update directory structure */
+	rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+
+	if (cmd->args.detach.conn_buf) {
+		buf = cmd->args.detach.conn_buf->stage1_data;
+		if (buf != NULL)
+			kfree(buf);
+		kfree(cmd->args.detach.conn_buf);
+	}
+
+	if (cmd->args.detach.stage2_file != NULL) {
+		buf = cmd->args.detach.stage2_file->buffer;
+		if (buf)
+			vfree(buf);
+		kfree(cmd->args.detach.stage2_file);
+	}
+
+	if (cmd->args.detach.station_addr_list)
+		kfree(cmd->args.detach.station_addr_list);
+
+	if (cmd->args.detach.stage2_chain)
+		kfree_rtskb(cmd->args.detach.stage2_chain);
+}
+
+static int load_cfg_file(struct rtcfg_file *cfgfile, struct rtcfg_cmd *cmd)
+{
+	size_t file_size = 0;
+	struct file *filp;
+	loff_t i_size;
+	int ret;
+
+	filp = filp_open(cfgfile->name, O_RDONLY, 0);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	i_size = i_size_read(file_inode(filp));
+	if (i_size <= 0) {
+		/* allocate buffer even for empty files */
+		cfgfile->buffer = vmalloc(1);
+	} else {
+		cfgfile->buffer = NULL; /* Leave allocation to the kernel. */
+		ret = read_file_from_kernel(filp, &cfgfile->buffer,
+					i_size_read(file_inode(filp)),
+					&file_size, READING_UNKNOWN);
+		if (ret < 0) {
+			fput(filp);
+			return ret;
+		}
+	}
+
+	fput(filp);
+	cfgfile->size = file_size;
+
+	/* dispatch again, this time with new file attached */
+	return rtpc_dispatch_call(rtcfg_event_handler, 0, cmd,
+				sizeof(*cmd), NULL, cleanup_cmd_add);
+}
+
+int rtcfg_ioctl_add(struct rtnet_device *rtdev, struct rtcfg_cmd *cmd)
+{
+	struct rtcfg_connection *conn_buf;
+	struct rtcfg_file *file = NULL;
+	void *data_buf;
+	size_t size;
+	int ret;
+
+	conn_buf = kmalloc(sizeof(struct rtcfg_connection), GFP_KERNEL);
+	if (conn_buf == NULL)
+		return -ENOMEM;
+	cmd->args.add.conn_buf = conn_buf;
+
+	data_buf = NULL;
+	size = cmd->args.add.stage1_size;
+	if (size > 0) {
+		/* check stage 1 data size */
+		if (sizeof(struct rtcfg_frm_stage_1_cfg) +
+			    2 * RTCFG_ADDRSIZE_IP + size >
+		    rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO)) {
+			ret = -ESTAGE1SIZE;
+			goto err;
+		}
+
+		data_buf = kmalloc(size, GFP_KERNEL);
+		if (data_buf == NULL) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		ret = copy_from_user(data_buf, cmd->args.add.stage1_data, size);
+		if (ret != 0) {
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+	cmd->args.add.stage1_data = data_buf;
+
+	if (cmd->args.add.stage2_filename != NULL) {
+		size = strnlen_user(cmd->args.add.stage2_filename, PATH_MAX);
+
+		file = kmalloc(sizeof(struct rtcfg_file) + size, GFP_KERNEL);
+		if (file == NULL) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		file->name = (char *)file + sizeof(struct rtcfg_file);
+		file->buffer = NULL;
+
+		ret = copy_from_user(
+			(void *)file + sizeof(struct rtcfg_file),
+			(const void *)cmd->args.add.stage2_filename, size);
+		if (ret != 0) {
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+	cmd->args.add.stage2_file = file;
+
+	/* lock proc structure for modification */
+	rtcfg_lockwr_proc(cmd->internal.data.ifindex);
+
+	ret = rtpc_dispatch_call(rtcfg_event_handler, 0, cmd, sizeof(*cmd),
+				 keep_cmd_add, cleanup_cmd_add);
+
+	/* load file if missing */
+	if (ret > 0) {
+		ret = load_cfg_file(file, cmd);
+		if (ret) {
+			rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+			goto err;
+		}
+	}
+
+	return ret;
+
+err:
+	kfree(conn_buf);
+	if (data_buf != NULL)
+		kfree(data_buf);
+	if (file != NULL) {
+		if (file->buffer != NULL)
+			vfree(file->buffer);
+		kfree(file);
+	}
+	return ret;
+}
+
+int rtcfg_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		unsigned long arg)
+{
+	struct rtcfg_cmd cmd;
+	struct rtcfg_station *station_buf;
+	int ret;
+
+	ret = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+	if (ret != 0)
+		return -EFAULT;
+
+	cmd.internal.data.ifindex = rtdev->ifindex;
+	cmd.internal.data.event_id = _IOC_NR(request);
+
+	switch (request) {
+	case RTCFG_IOC_SERVER:
+		ret = rtpc_dispatch_call(rtcfg_event_handler, 0, &cmd,
+					 sizeof(cmd), NULL, NULL);
+		break;
+
+	case RTCFG_IOC_ADD:
+		ret = rtcfg_ioctl_add(rtdev, &cmd);
+		break;
+
+	case RTCFG_IOC_DEL:
+		cmd.args.del.conn_buf = NULL;
+		cmd.args.del.stage2_file = NULL;
+
+		/* lock proc structure for modification
+               (unlock in cleanup_cmd_del) */
+		rtcfg_lockwr_proc(cmd.internal.data.ifindex);
+
+		ret = rtpc_dispatch_call(rtcfg_event_handler, 0, &cmd,
+					 sizeof(cmd), NULL, cleanup_cmd_del);
+		break;
+
+	case RTCFG_IOC_WAIT:
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.wait.timeout, &cmd,
+					 sizeof(cmd), NULL, NULL);
+		break;
+
+	case RTCFG_IOC_CLIENT:
+		station_buf = kmalloc(sizeof(struct rtcfg_station) *
+					      cmd.args.client.max_stations,
+				      GFP_KERNEL);
+		if (station_buf == NULL)
+			return -ENOMEM;
+		cmd.args.client.station_buf = station_buf;
+		cmd.args.client.rtskb = NULL;
+
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.client.timeout, &cmd,
+					 sizeof(cmd), copy_stage_1_data,
+					 cleanup_cmd_client);
+		break;
+
+	case RTCFG_IOC_ANNOUNCE:
+		cmd.args.announce.rtskb = NULL;
+
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.announce.timeout, &cmd,
+					 sizeof(cmd), copy_stage_2_data,
+					 cleanup_cmd_announce);
+		break;
+
+	case RTCFG_IOC_READY:
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.ready.timeout, &cmd,
+					 sizeof(cmd), NULL, NULL);
+		break;
+
+	case RTCFG_IOC_DETACH:
+		do {
+			cmd.args.detach.conn_buf = NULL;
+			cmd.args.detach.stage2_file = NULL;
+			cmd.args.detach.station_addr_list = NULL;
+			cmd.args.detach.stage2_chain = NULL;
+
+			/* lock proc structure for modification
+                   (unlock in cleanup_cmd_detach) */
+			rtcfg_lockwr_proc(cmd.internal.data.ifindex);
+
+			ret = rtpc_dispatch_call(rtcfg_event_handler, 0, &cmd,
+						 sizeof(cmd), NULL,
+						 cleanup_cmd_detach);
+		} while (ret == -EAGAIN);
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+struct rtnet_ioctls rtcfg_ioctls = { .service_name = "RTcfg",
+				     .ioctl_type = RTNET_IOC_TYPE_RTCFG,
+				     .handler = rtcfg_ioctl };
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c	2022-03-21 12:58:30.078882243 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_event.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include <rtdev.h>
+#include <ipv4/route.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_client_event.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_file.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+/*** Common and Server States ***/
+static int rtcfg_main_state_off(int ifindex, RTCFG_EVENT event_id,
+				void *event_data);
+static int rtcfg_main_state_server_running(int ifindex, RTCFG_EVENT event_id,
+					   void *event_data);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG
+const char *rtcfg_event[] = { "RTCFG_CMD_SERVER",
+			      "RTCFG_CMD_ADD",
+			      "RTCFG_CMD_DEL",
+			      "RTCFG_CMD_WAIT",
+			      "RTCFG_CMD_CLIENT",
+			      "RTCFG_CMD_ANNOUNCE",
+			      "RTCFG_CMD_READY",
+			      "RTCFG_CMD_DETACH",
+			      "RTCFG_TIMER",
+			      "RTCFG_FRM_STAGE_1_CFG",
+			      "RTCFG_FRM_ANNOUNCE_NEW",
+			      "RTCFG_FRM_ANNOUNCE_REPLY",
+			      "RTCFG_FRM_STAGE_2_CFG",
+			      "RTCFG_FRM_STAGE_2_CFG_FRAG",
+			      "RTCFG_FRM_ACK_CFG",
+			      "RTCFG_FRM_READY",
+			      "RTCFG_FRM_HEARTBEAT",
+			      "RTCFG_FRM_DEAD_STATION" };
+
+const char *rtcfg_main_state[] = { "RTCFG_MAIN_OFF",
+				   "RTCFG_MAIN_SERVER_RUNNING",
+				   "RTCFG_MAIN_CLIENT_0",
+				   "RTCFG_MAIN_CLIENT_1",
+				   "RTCFG_MAIN_CLIENT_ANNOUNCED",
+				   "RTCFG_MAIN_CLIENT_ALL_KNOWN",
+				   "RTCFG_MAIN_CLIENT_ALL_FRAMES",
+				   "RTCFG_MAIN_CLIENT_2",
+				   "RTCFG_MAIN_CLIENT_READY" };
+
+int rtcfg_debug = RTCFG_DEFAULT_DEBUG_LEVEL;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG */
+
+struct rtcfg_device device[MAX_RT_DEVICES];
+
+static int (*state[])(int ifindex, RTCFG_EVENT event_id,
+		      void *event_data) = { rtcfg_main_state_off,
+					    rtcfg_main_state_server_running,
+					    rtcfg_main_state_client_0,
+					    rtcfg_main_state_client_1,
+					    rtcfg_main_state_client_announced,
+					    rtcfg_main_state_client_all_known,
+					    rtcfg_main_state_client_all_frames,
+					    rtcfg_main_state_client_2,
+					    rtcfg_main_state_client_ready };
+
+static int rtcfg_server_add(struct rtcfg_cmd *cmd_event);
+static int rtcfg_server_del(struct rtcfg_cmd *cmd_event);
+static int rtcfg_server_detach(int ifindex, struct rtcfg_cmd *cmd_event);
+static int rtcfg_server_recv_announce(int ifindex, RTCFG_EVENT event_id,
+				      struct rtskb *rtskb);
+static int rtcfg_server_recv_ack(int ifindex, struct rtskb *rtskb);
+static int rtcfg_server_recv_simple_frame(int ifindex, RTCFG_EVENT event_id,
+					  struct rtskb *rtskb);
+
+int rtcfg_do_main_event(int ifindex, RTCFG_EVENT event_id, void *event_data)
+{
+	int main_state;
+
+	rtdm_mutex_lock(&device[ifindex].dev_mutex);
+
+	main_state = device[ifindex].state;
+
+	RTCFG_DEBUG(3, "RTcfg: %s() rtdev=%d, event=%s, state=%s\n",
+		    __FUNCTION__, ifindex, rtcfg_event[event_id],
+		    rtcfg_main_state[main_state]);
+
+	return (*state[main_state])(ifindex, event_id, event_data);
+}
+
+void rtcfg_next_main_state(int ifindex, RTCFG_MAIN_STATE state)
+{
+	RTCFG_DEBUG(4, "RTcfg: next main state=%s \n", rtcfg_main_state[state]);
+
+	device[ifindex].state = state;
+}
+
+static int rtcfg_main_state_off(int ifindex, RTCFG_EVENT event_id,
+				void *event_data)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_cmd *cmd_event;
+	int ret;
+
+	cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+	switch (event_id) {
+	case RTCFG_CMD_SERVER:
+		INIT_LIST_HEAD(&rtcfg_dev->spec.srv.conn_list);
+
+		ret = rtdm_timer_init(&rtcfg_dev->timer, rtcfg_timer,
+				      "rtcfg-timer");
+		if (ret == 0) {
+			ret = rtdm_timer_start(
+				&rtcfg_dev->timer, XN_INFINITE,
+				(nanosecs_rel_t)cmd_event->args.server.period *
+					1000000,
+				RTDM_TIMERMODE_RELATIVE);
+			if (ret < 0)
+				rtdm_timer_destroy(&rtcfg_dev->timer);
+		}
+		if (ret < 0) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return ret;
+		}
+
+		if (cmd_event->args.server.flags & _RTCFG_FLAG_READY)
+			set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags);
+		set_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags);
+
+		rtcfg_dev->burstrate = cmd_event->args.server.burstrate;
+
+		rtcfg_dev->spec.srv.heartbeat =
+			cmd_event->args.server.heartbeat;
+
+		rtcfg_dev->spec.srv.heartbeat_timeout =
+			((u64)cmd_event->args.server.heartbeat) * 1000000 *
+			cmd_event->args.server.threshold;
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_SERVER_RUNNING);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		break;
+
+	case RTCFG_CMD_CLIENT:
+		rtcfg_dev->spec.clt.station_addr_list =
+			cmd_event->args.client.station_buf;
+		cmd_event->args.client.station_buf = NULL;
+
+		rtcfg_dev->spec.clt.max_stations =
+			cmd_event->args.client.max_stations;
+		rtcfg_dev->other_stations = -1;
+
+		rtcfg_queue_blocking_call(ifindex, call);
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_0);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*** Server States ***/
+
+static int rtcfg_main_state_server_running(int ifindex, RTCFG_EVENT event_id,
+					   void *event_data)
+{
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+	struct rtcfg_device *rtcfg_dev;
+	struct rtskb *rtskb;
+
+	switch (event_id) {
+	case RTCFG_CMD_ADD:
+		call = (struct rt_proc_call *)event_data;
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		return rtcfg_server_add(cmd_event);
+
+	case RTCFG_CMD_DEL:
+		call = (struct rt_proc_call *)event_data;
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		return rtcfg_server_del(cmd_event);
+
+	case RTCFG_CMD_WAIT:
+		call = (struct rt_proc_call *)event_data;
+
+		rtcfg_dev = &device[ifindex];
+
+		if (rtcfg_dev->spec.srv.clients_configured ==
+		    rtcfg_dev->other_stations)
+			rtpc_complete_call(call, 0);
+		else
+			rtcfg_queue_blocking_call(ifindex, call);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_READY:
+		call = (struct rt_proc_call *)event_data;
+
+		rtcfg_dev = &device[ifindex];
+
+		if (rtcfg_dev->stations_ready == rtcfg_dev->other_stations)
+			rtpc_complete_call(call, 0);
+		else
+			rtcfg_queue_blocking_call(ifindex, call);
+
+		if (!test_and_set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags))
+			rtcfg_send_ready(ifindex);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_DETACH:
+		call = (struct rt_proc_call *)event_data;
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		return rtcfg_server_detach(ifindex, cmd_event);
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		rtskb = (struct rtskb *)event_data;
+		return rtcfg_server_recv_announce(ifindex, event_id, rtskb);
+
+	case RTCFG_FRM_ACK_CFG:
+		rtskb = (struct rtskb *)event_data;
+		return rtcfg_server_recv_ack(ifindex, rtskb);
+
+	case RTCFG_FRM_READY:
+	case RTCFG_FRM_HEARTBEAT:
+		rtskb = (struct rtskb *)event_data;
+		return rtcfg_server_recv_simple_frame(ifindex, event_id, rtskb);
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*** Server Command Event Handlers ***/
+
+static int rtcfg_server_add(struct rtcfg_cmd *cmd_event)
+{
+	struct rtcfg_device *rtcfg_dev;
+	struct rtcfg_connection *conn;
+	struct rtcfg_connection *new_conn;
+	struct list_head *entry;
+	unsigned int addr_type;
+
+	rtcfg_dev = &device[cmd_event->internal.data.ifindex];
+	addr_type = cmd_event->args.add.addr_type & RTCFG_ADDR_MASK;
+
+	new_conn = cmd_event->args.add.conn_buf;
+	memset(new_conn, 0, sizeof(struct rtcfg_connection));
+
+	new_conn->ifindex = cmd_event->internal.data.ifindex;
+	new_conn->state = RTCFG_CONN_SEARCHING;
+	new_conn->addr_type = cmd_event->args.add.addr_type;
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	new_conn->addr.ip_addr = cmd_event->args.add.ip_addr;
+#endif
+	new_conn->stage1_data = cmd_event->args.add.stage1_data;
+	new_conn->stage1_size = cmd_event->args.add.stage1_size;
+	new_conn->burstrate = rtcfg_dev->burstrate;
+	new_conn->cfg_timeout = ((u64)cmd_event->args.add.timeout) * 1000000;
+
+	if (cmd_event->args.add.addr_type == RTCFG_ADDR_IP) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		struct rtnet_device *rtdev;
+
+		/* MAC address yet unknown -> use broadcast address */
+		rtdev = rtdev_get_by_index(cmd_event->internal.data.ifindex);
+		if (rtdev == NULL) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return -ENODEV;
+		}
+		memcpy(new_conn->mac_addr, rtdev->broadcast, MAX_ADDR_LEN);
+		rtdev_dereference(rtdev);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		return -EPROTONOSUPPORT;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+	} else
+		memcpy(new_conn->mac_addr, cmd_event->args.add.mac_addr,
+		       MAX_ADDR_LEN);
+
+	/* get stage 2 file */
+	if (cmd_event->args.add.stage2_file != NULL) {
+		if (cmd_event->args.add.stage2_file->buffer != NULL) {
+			new_conn->stage2_file = cmd_event->args.add.stage2_file;
+			rtcfg_add_file(new_conn->stage2_file);
+
+			cmd_event->args.add.stage2_file = NULL;
+		} else {
+			new_conn->stage2_file = rtcfg_get_file(
+				cmd_event->args.add.stage2_file->name);
+			if (new_conn->stage2_file == NULL) {
+				rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+				return 1;
+			}
+		}
+	}
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		if (
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+			((addr_type == RTCFG_ADDR_IP) &&
+			 (conn->addr.ip_addr == cmd_event->args.add.ip_addr)) ||
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+			((addr_type == RTCFG_ADDR_MAC) &&
+			 (memcmp(conn->mac_addr, new_conn->mac_addr,
+				 MAX_ADDR_LEN) == 0))) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+			if ((new_conn->stage2_file) &&
+			    (rtcfg_release_file(new_conn->stage2_file) == 0)) {
+				/* Note: This assignment cannot overwrite a valid file pointer.
+		 * Effectively, it will only be executed when
+		 * new_conn->stage2_file is the pointer originally passed by
+		 * rtcfg_ioctl. But checking this assumptions does not cause
+		 * any harm :o)
+		 */
+				RTNET_ASSERT(cmd_event->args.add.stage2_file ==
+						     NULL,
+					     ;);
+
+				cmd_event->args.add.stage2_file =
+					new_conn->stage2_file;
+			}
+
+			return -EEXIST;
+		}
+	}
+
+	list_add_tail(&new_conn->entry, &rtcfg_dev->spec.srv.conn_list);
+	rtcfg_dev->other_stations++;
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	cmd_event->args.add.conn_buf = NULL;
+	cmd_event->args.add.stage1_data = NULL;
+
+	return 0;
+}
+
+static int rtcfg_server_del(struct rtcfg_cmd *cmd_event)
+{
+	struct rtcfg_connection *conn;
+	struct list_head *entry;
+	unsigned int addr_type;
+	struct rtcfg_device *rtcfg_dev;
+
+	rtcfg_dev = &device[cmd_event->internal.data.ifindex];
+	addr_type = cmd_event->args.add.addr_type & RTCFG_ADDR_MASK;
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		if ((addr_type == conn->addr_type) &&
+		    (
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+			    ((addr_type == RTCFG_ADDR_IP) &&
+			     (conn->addr.ip_addr ==
+			      cmd_event->args.add.ip_addr)) ||
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+			    ((addr_type == RTCFG_ADDR_MAC) &&
+			     (memcmp(conn->mac_addr,
+				     cmd_event->args.add.mac_addr,
+				     MAX_ADDR_LEN) == 0)))) {
+			list_del(&conn->entry);
+			rtcfg_dev->other_stations--;
+
+			if (conn->state > RTCFG_CONN_SEARCHING) {
+				rtcfg_dev->stations_found--;
+				if (conn->state >= RTCFG_CONN_STAGE_2)
+					rtcfg_dev->spec.srv.clients_configured--;
+				if (conn->flags & _RTCFG_FLAG_READY)
+					rtcfg_dev->stations_ready--;
+			}
+
+			if ((conn->stage2_file) &&
+			    (rtcfg_release_file(conn->stage2_file) == 0))
+				cmd_event->args.del.stage2_file =
+					conn->stage2_file;
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+			cmd_event->args.del.conn_buf = conn;
+
+			return 0;
+		}
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return -ENOENT;
+}
+
+static int rtcfg_server_detach(int ifindex, struct rtcfg_cmd *cmd_event)
+{
+	struct rtcfg_connection *conn;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	if (!list_empty(&rtcfg_dev->spec.srv.conn_list)) {
+		conn = list_entry(rtcfg_dev->spec.srv.conn_list.next,
+				  struct rtcfg_connection, entry);
+
+		list_del(&conn->entry);
+		rtcfg_dev->other_stations--;
+
+		if (conn->state > RTCFG_CONN_SEARCHING) {
+			rtcfg_dev->stations_found--;
+			if (conn->state >= RTCFG_CONN_STAGE_2)
+				rtcfg_dev->spec.srv.clients_configured--;
+			if (conn->flags & _RTCFG_FLAG_READY)
+				rtcfg_dev->stations_ready--;
+		}
+
+		if ((conn->stage2_file) &&
+		    (rtcfg_release_file(conn->stage2_file) == 0))
+			cmd_event->args.detach.stage2_file = conn->stage2_file;
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		cmd_event->args.detach.conn_buf = conn;
+
+		return -EAGAIN;
+	}
+
+	if (test_and_clear_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags))
+		rtdm_timer_destroy(&rtcfg_dev->timer);
+	rtcfg_reset_device(ifindex);
+
+	rtcfg_next_main_state(ifindex, RTCFG_MAIN_OFF);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return 0;
+}
+
+/*** Server Frame Event Handlers ***/
+
+static int rtcfg_server_recv_announce(int ifindex, RTCFG_EVENT event_id,
+				      struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_frm_announce *announce;
+	struct rtcfg_connection *conn;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_announce)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid announce frame\n");
+		return -EINVAL;
+	}
+
+	announce = (struct rtcfg_frm_announce *)rtskb->data;
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		switch (announce->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+			u32 announce_addr;
+		case RTCFG_ADDR_IP:
+			memcpy(&announce_addr, announce->addr, 4);
+
+			if (((conn->addr_type & RTCFG_ADDR_MASK) ==
+			     RTCFG_ADDR_IP) &&
+			    (announce_addr == conn->addr.ip_addr)) {
+				/* save MAC address - Ethernet-specific! */
+				memcpy(conn->mac_addr,
+				       rtskb->mac.ethernet->h_source, ETH_ALEN);
+
+				/* update routing table */
+				rt_ip_route_add_host(conn->addr.ip_addr,
+						     conn->mac_addr,
+						     rtskb->rtdev);
+
+				/* remove IP address */
+				__rtskb_pull(rtskb, RTCFG_ADDRSIZE_IP);
+
+				rtcfg_do_conn_event(conn, event_id, rtskb);
+
+				goto out;
+			}
+			break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+		case RTCFG_ADDR_MAC:
+			/* Ethernet-specific! */
+			if (memcmp(conn->mac_addr,
+				   rtskb->mac.ethernet->h_source,
+				   ETH_ALEN) == 0) {
+				rtcfg_do_conn_event(conn, event_id, rtskb);
+
+				goto out;
+			}
+			break;
+		}
+	}
+
+out:
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+static int rtcfg_server_recv_ack(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_connection *conn;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_ack_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid ack_cfg frame\n");
+		return -EINVAL;
+	}
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		/* find the corresponding connection - Ethernet-specific! */
+		if (memcmp(conn->mac_addr, rtskb->mac.ethernet->h_source,
+			   ETH_ALEN) != 0)
+			continue;
+
+		rtcfg_do_conn_event(conn, RTCFG_FRM_ACK_CFG, rtskb);
+
+		break;
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+static int rtcfg_server_recv_simple_frame(int ifindex, RTCFG_EVENT event_id,
+					  struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_connection *conn;
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		/* find the corresponding connection - Ethernet-specific! */
+		if (memcmp(conn->mac_addr, rtskb->mac.ethernet->h_source,
+			   ETH_ALEN) != 0)
+			continue;
+
+		rtcfg_do_conn_event(conn, event_id, rtskb);
+
+		break;
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+/*** Utility Functions ***/
+
+void rtcfg_queue_blocking_call(int ifindex, struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	rtdm_lock_get_irqsave(&rtcfg_dev->event_calls_lock, context);
+	list_add_tail(&call->list_entry, &rtcfg_dev->event_calls);
+	rtdm_lock_put_irqrestore(&rtcfg_dev->event_calls_lock, context);
+}
+
+struct rt_proc_call *rtcfg_dequeue_blocking_call(int ifindex)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	rtdm_lock_get_irqsave(&rtcfg_dev->event_calls_lock, context);
+	if (!list_empty(&rtcfg_dev->event_calls)) {
+		call = (struct rt_proc_call *)rtcfg_dev->event_calls.next;
+		list_del(&call->list_entry);
+	} else
+		call = NULL;
+	rtdm_lock_put_irqrestore(&rtcfg_dev->event_calls_lock, context);
+
+	return call;
+}
+
+void rtcfg_complete_cmd(int ifindex, RTCFG_EVENT event_id, int result)
+{
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+
+	while (1) {
+		call = rtcfg_dequeue_blocking_call(ifindex);
+		if (call == NULL)
+			break;
+
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		rtpc_complete_call(call, (cmd_event->internal.data.event_id ==
+					  event_id) ?
+						 result :
+						 -EINVAL);
+	}
+}
+
+void rtcfg_reset_device(int ifindex)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	rtcfg_dev->other_stations = 0;
+	rtcfg_dev->stations_found = 0;
+	rtcfg_dev->stations_ready = 0;
+	rtcfg_dev->flags = 0;
+	rtcfg_dev->burstrate = 0;
+
+	memset(&rtcfg_dev->spec, 0, sizeof(rtcfg_dev->spec));
+	INIT_LIST_HEAD(&rtcfg_dev->spec.srv.conn_list);
+}
+
+void rtcfg_init_state_machines(void)
+{
+	int i;
+	struct rtcfg_device *rtcfg_dev;
+
+	memset(device, 0, sizeof(device));
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtcfg_dev = &device[i];
+		rtcfg_dev->state = RTCFG_MAIN_OFF;
+
+		rtdm_mutex_init(&rtcfg_dev->dev_mutex);
+
+		INIT_LIST_HEAD(&rtcfg_dev->event_calls);
+		rtdm_lock_init(&rtcfg_dev->event_calls_lock);
+	}
+}
+
+void rtcfg_cleanup_state_machines(void)
+{
+	int i;
+	struct rtcfg_device *rtcfg_dev;
+	struct rtcfg_connection *conn;
+	struct list_head *entry;
+	struct list_head *tmp;
+	struct rt_proc_call *call;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtcfg_dev = &device[i];
+
+		if (test_and_clear_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags))
+			rtdm_timer_destroy(&rtcfg_dev->timer);
+
+		/*
+	 * No need to synchronize with rtcfg_timer here: the task running
+	 * rtcfg_timer is already dead.
+	 */
+
+		rtdm_mutex_destroy(&rtcfg_dev->dev_mutex);
+
+		if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+			list_for_each_safe (entry, tmp,
+					    &rtcfg_dev->spec.srv.conn_list) {
+				conn = list_entry(
+					entry, struct rtcfg_connection, entry);
+
+				if (conn->stage1_data != NULL)
+					kfree(conn->stage1_data);
+
+				if ((conn->stage2_file != NULL) &&
+				    (rtcfg_release_file(conn->stage2_file) ==
+				     0)) {
+					vfree(conn->stage2_file->buffer);
+					kfree(conn->stage2_file);
+				}
+
+				kfree(entry);
+			}
+		} else if (rtcfg_dev->state != RTCFG_MAIN_OFF) {
+			if (rtcfg_dev->spec.clt.station_addr_list != NULL)
+				kfree(rtcfg_dev->spec.clt.station_addr_list);
+
+			if (rtcfg_dev->spec.clt.stage2_chain != NULL)
+				kfree_rtskb(rtcfg_dev->spec.clt.stage2_chain);
+		}
+
+		while (1) {
+			call = rtcfg_dequeue_blocking_call(i);
+			if (call == NULL)
+				break;
+
+			rtpc_complete_call_nrt(call, -ENODEV);
+		}
+	}
+}
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/Kconfig	2022-03-21 12:58:30.071882311 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_RTCFG
+    depends on XENO_DRIVERS_NET
+    tristate "RTcfg Service"
+    default y
+    help
+    The Real-Time Configuration service configures and monitors nodes in
+    a RTnet network. It works both with plain MAC as well as with IPv4
+    addresses (in case CONFIG_RTNET_RTIPV4 has been switched on). RTcfg
+    consists of a configuration server, which can run on the same station
+    as the TDMA master e.g., and one or more clients. Clients can join and
+    leave the network during runtime without interfering with other
+    stations. Besides network configuration, the RTcfg server can also
+    distribute custom data.
+
+    See Documentation/README.rtcfg for further information.
+
+config XENO_DRIVERS_NET_RTCFG_DEBUG
+    bool "RTcfg Debugging"
+    depends on XENO_DRIVERS_NET_RTCFG
+    default n
+    help
+    Enables debug message output of the RTcfg state machines. Switch on if
+    you have to trace some problem related to RTcfg.
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c	2022-03-21 12:58:30.063882389 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/Makefile	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_frame.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/if_ether.h>
+
+#include <stack_mgr.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+static unsigned int num_rtskbs = 32;
+module_param(num_rtskbs, uint, 0444);
+MODULE_PARM_DESC(num_rtskbs, "Number of realtime socket buffers used by RTcfg");
+
+static struct rtskb_pool rtcfg_pool;
+static rtdm_task_t rx_task;
+static rtdm_event_t rx_event;
+static struct rtskb_queue rx_queue;
+
+void rtcfg_thread_signal(void)
+{
+	rtdm_event_signal(&rx_event);
+}
+
+static int rtcfg_rx_handler(struct rtskb *rtskb, struct rtpacket_type *pt)
+{
+	if (rtskb_acquire(rtskb, &rtcfg_pool) == 0) {
+		rtskb_queue_tail(&rx_queue, rtskb);
+		rtcfg_thread_signal();
+	} else
+		kfree_rtskb(rtskb);
+
+	return 0;
+}
+
+static void rtcfg_rx_task(void *arg)
+{
+	struct rtskb *rtskb;
+	struct rtcfg_frm_head *frm_head;
+	struct rtnet_device *rtdev;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&rx_event) < 0)
+			break;
+
+		while ((rtskb = rtskb_dequeue(&rx_queue))) {
+			rtdev = rtskb->rtdev;
+
+			if (rtskb->pkt_type == PACKET_OTHERHOST) {
+				kfree_rtskb(rtskb);
+				continue;
+			}
+
+			if (rtskb->len < sizeof(struct rtcfg_frm_head)) {
+				RTCFG_DEBUG(
+					1,
+					"RTcfg: %s() received an invalid frame\n",
+					__FUNCTION__);
+				kfree_rtskb(rtskb);
+				continue;
+			}
+
+			frm_head = (struct rtcfg_frm_head *)rtskb->data;
+
+			if (rtcfg_do_main_event(rtskb->rtdev->ifindex,
+						frm_head->id +
+							RTCFG_FRM_STAGE_1_CFG,
+						rtskb) < 0)
+				kfree_rtskb(rtskb);
+		}
+
+		rtcfg_timer_run();
+	}
+}
+
+int rtcfg_send_frame(struct rtskb *rtskb, struct rtnet_device *rtdev,
+		     u8 *dest_addr)
+{
+	int ret;
+
+	rtskb->rtdev = rtdev;
+	rtskb->priority = RTCFG_SKB_PRIO;
+
+	if (rtdev->hard_header) {
+		ret = rtdev->hard_header(rtskb, rtdev, ETH_RTCFG, dest_addr,
+					 rtdev->dev_addr, rtskb->len);
+		if (ret < 0)
+			goto err;
+	}
+
+	if ((rtdev->flags & IFF_UP) != 0) {
+		ret = 0;
+		if (rtdev_xmit(rtskb) != 0)
+			ret = -EAGAIN;
+	} else {
+		ret = -ENETDOWN;
+		goto err;
+	}
+
+	rtdev_dereference(rtdev);
+	return ret;
+
+err:
+	kfree_rtskb(rtskb);
+	rtdev_dereference(rtdev);
+	return ret;
+}
+
+int rtcfg_send_stage_1(struct rtcfg_connection *conn)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_stage_1_cfg *stage_1_frm;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_stage_1_cfg) + conn->stage1_size +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
+			      2 * RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	stage_1_frm = (struct rtcfg_frm_stage_1_cfg *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));
+
+	stage_1_frm->head.id = RTCFG_ID_STAGE_1_CFG;
+	stage_1_frm->head.version = 0;
+	stage_1_frm->addr_type = conn->addr_type & RTCFG_ADDR_MASK;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (stage_1_frm->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, 2 * RTCFG_ADDRSIZE_IP);
+
+		memcpy(stage_1_frm->client_addr, &(conn->addr.ip_addr), 4);
+
+		stage_1_frm =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_frm) +
+							 RTCFG_ADDRSIZE_IP);
+
+		memcpy(stage_1_frm->server_addr, &(rtdev->local_ip), 4);
+
+		stage_1_frm =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_frm) +
+							 RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	stage_1_frm->burstrate = device[conn->ifindex].burstrate;
+	stage_1_frm->cfg_len = htons(conn->stage1_size);
+
+	memcpy(rtskb_put(rtskb, conn->stage1_size), conn->stage1_data,
+	       conn->stage1_size);
+
+	return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
+}
+
+int rtcfg_send_stage_2(struct rtcfg_connection *conn, int send_data)
+{
+	struct rtnet_device *rtdev;
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_stage_2_cfg *stage_2_frm;
+	size_t total_size;
+	size_t frag_size;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	if (send_data) {
+		total_size = conn->stage2_file->size;
+		frag_size = MIN(rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO) -
+					sizeof(struct rtcfg_frm_stage_2_cfg),
+				total_size);
+	} else {
+		total_size = 0;
+		frag_size = 0;
+	}
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_stage_2_cfg) + frag_size;
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	stage_2_frm = (struct rtcfg_frm_stage_2_cfg *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_stage_2_cfg));
+
+	stage_2_frm->head.id = RTCFG_ID_STAGE_2_CFG;
+	stage_2_frm->head.version = 0;
+	stage_2_frm->flags = rtcfg_dev->flags;
+	stage_2_frm->stations = htonl(rtcfg_dev->other_stations);
+	stage_2_frm->heartbeat_period = htons(rtcfg_dev->spec.srv.heartbeat);
+	stage_2_frm->cfg_len = htonl(total_size);
+
+	if (send_data)
+		memcpy(rtskb_put(rtskb, frag_size), conn->stage2_file->buffer,
+		       frag_size);
+	conn->cfg_offs = frag_size;
+
+	return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
+}
+
+int rtcfg_send_stage_2_frag(struct rtcfg_connection *conn)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_stage_2_cfg_frag *stage_2_frm;
+	size_t frag_size;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	frag_size = MIN(rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO) -
+				sizeof(struct rtcfg_frm_stage_2_cfg_frag),
+			conn->stage2_file->size - conn->cfg_offs);
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_stage_2_cfg_frag) + frag_size;
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	stage_2_frm = (struct rtcfg_frm_stage_2_cfg_frag *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag));
+
+	stage_2_frm->head.id = RTCFG_ID_STAGE_2_CFG_FRAG;
+	stage_2_frm->head.version = 0;
+	stage_2_frm->frag_offs = htonl(conn->cfg_offs);
+
+	memcpy(rtskb_put(rtskb, frag_size),
+	       conn->stage2_file->buffer + conn->cfg_offs, frag_size);
+	conn->cfg_offs += frag_size;
+
+	return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
+}
+
+int rtcfg_send_announce_new(int ifindex)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_announce *announce_new;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_announce) +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     (((rtcfg_dev->spec.clt.addr_type & RTCFG_ADDR_MASK) ==
+		       RTCFG_ADDR_IP) ?
+			      RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	announce_new = (struct rtcfg_frm_announce *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_announce));
+
+	announce_new->head.id = RTCFG_ID_ANNOUNCE_NEW;
+	announce_new->head.version = 0;
+	announce_new->addr_type = rtcfg_dev->spec.clt.addr_type;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (announce_new->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);
+
+		memcpy(announce_new->addr, &(rtdev->local_ip), 4);
+
+		announce_new =
+			(struct rtcfg_frm_announce *)(((u8 *)announce_new) +
+						      RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	announce_new->flags = rtcfg_dev->flags;
+	announce_new->burstrate = rtcfg_dev->burstrate;
+
+	return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
+}
+
+int rtcfg_send_announce_reply(int ifindex, u8 *dest_mac_addr)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_announce *announce_rpl;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_announce) +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     ((rtcfg_dev->spec.clt.addr_type == RTCFG_ADDR_IP) ?
+			      RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	announce_rpl = (struct rtcfg_frm_announce *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_announce));
+
+	announce_rpl->head.id = RTCFG_ID_ANNOUNCE_REPLY;
+	announce_rpl->head.version = 0;
+	announce_rpl->addr_type = rtcfg_dev->spec.clt.addr_type;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (announce_rpl->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);
+
+		memcpy(announce_rpl->addr, &(rtdev->local_ip), 4);
+
+		announce_rpl =
+			(struct rtcfg_frm_announce *)(((u8 *)announce_rpl) +
+						      RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	announce_rpl->flags = rtcfg_dev->flags & _RTCFG_FLAG_READY;
+	announce_rpl->burstrate = 0; /* padding field */
+
+	return rtcfg_send_frame(rtskb, rtdev, dest_mac_addr);
+}
+
+int rtcfg_send_ack(int ifindex)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_ack_cfg *ack_frm;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_ack_cfg);
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	ack_frm = (struct rtcfg_frm_ack_cfg *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_ack_cfg));
+
+	ack_frm->head.id = RTCFG_ID_ACK_CFG;
+	ack_frm->head.version = 0;
+	ack_frm->ack_len = htonl(device[ifindex].spec.clt.cfg_offs);
+
+	return rtcfg_send_frame(rtskb, rtdev,
+				device[ifindex].spec.clt.srv_mac_addr);
+}
+
+int rtcfg_send_simple_frame(int ifindex, int frame_id, u8 *dest_addr)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_simple *simple_frm;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_simple);
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	simple_frm = (struct rtcfg_frm_simple *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_simple));
+
+	simple_frm->head.id = frame_id;
+	simple_frm->head.version = 0;
+
+	return rtcfg_send_frame(rtskb, rtdev,
+				(dest_addr) ? dest_addr : rtdev->broadcast);
+}
+
+int rtcfg_send_dead_station(struct rtcfg_connection *conn)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_dead_station *dead_station_frm;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_dead_station) +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
+			      RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	dead_station_frm = (struct rtcfg_frm_dead_station *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_dead_station));
+
+	dead_station_frm->head.id = RTCFG_ID_DEAD_STATION;
+	dead_station_frm->head.version = 0;
+	dead_station_frm->addr_type = conn->addr_type & RTCFG_ADDR_MASK;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (dead_station_frm->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);
+
+		memcpy(dead_station_frm->logical_addr, &(conn->addr.ip_addr),
+		       4);
+
+		dead_station_frm = (struct rtcfg_frm_dead_station
+					    *)(((u8 *)dead_station_frm) +
+					       RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	/* Ethernet-specific! */
+	memcpy(dead_station_frm->physical_addr, conn->mac_addr, ETH_ALEN);
+	memset(&dead_station_frm->physical_addr[ETH_ALEN], 0,
+	       sizeof(dead_station_frm->physical_addr) - ETH_ALEN);
+
+	return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
+}
+
+static struct rtpacket_type rtcfg_packet_type = { .type = __constant_htons(
+							  ETH_RTCFG),
+						  .handler = rtcfg_rx_handler };
+
+int __init rtcfg_init_frames(void)
+{
+	int ret;
+
+	if (rtskb_module_pool_init(&rtcfg_pool, num_rtskbs) < num_rtskbs)
+		return -ENOMEM;
+
+	rtskb_queue_init(&rx_queue);
+	rtdm_event_init(&rx_event, 0);
+
+	ret = rtdm_task_init(&rx_task, "rtcfg-rx", rtcfg_rx_task, 0,
+			     RTDM_TASK_LOWEST_PRIORITY, 0);
+	if (ret < 0) {
+		rtdm_event_destroy(&rx_event);
+		goto error1;
+	}
+
+	ret = rtdev_add_pack(&rtcfg_packet_type);
+	if (ret < 0)
+		goto error2;
+
+	return 0;
+
+error2:
+	rtdm_event_destroy(&rx_event);
+	rtdm_task_destroy(&rx_task);
+
+error1:
+	rtskb_pool_release(&rtcfg_pool);
+
+	return ret;
+}
+
+void rtcfg_cleanup_frames(void)
+{
+	struct rtskb *rtskb;
+
+	rtdev_remove_pack(&rtcfg_packet_type);
+
+	rtdm_event_destroy(&rx_event);
+	rtdm_task_destroy(&rx_task);
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		kfree_rtskb(rtskb);
+	}
+
+	rtskb_pool_release(&rtcfg_pool);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/Makefile	2022-03-21 12:58:30.056882457 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTCFG) += rtcfg.o
+
+rtcfg-y := \
+	rtcfg_module.o \
+	rtcfg_event.o \
+	rtcfg_client_event.o \
+	rtcfg_conn_event.o \
+	rtcfg_ioctl.o \
+	rtcfg_frame.o \
+	rtcfg_timer.o \
+	rtcfg_file.o \
+	rtcfg_proc.o
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c	2022-03-21 12:58:30.049882525 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_conn_event.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <ipv4/route.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+
+/****************************** states ***************************************/
+static int rtcfg_conn_state_searching(struct rtcfg_connection *conn,
+				      RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_stage_1(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_stage_2(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_ready(struct rtcfg_connection *conn,
+				  RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_dead(struct rtcfg_connection *conn,
+				 RTCFG_EVENT event_id, void *event_data);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG
+const char *rtcfg_conn_state[] = { "RTCFG_CONN_SEARCHING", "RTCFG_CONN_STAGE_1",
+				   "RTCFG_CONN_STAGE_2", "RTCFG_CONN_READY",
+				   "RTCFG_CONN_DEAD" };
+#endif /* CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG */
+
+static void rtcfg_conn_recv_announce_new(struct rtcfg_connection *conn,
+					 struct rtskb *rtskb);
+static void rtcfg_conn_check_cfg_timeout(struct rtcfg_connection *conn);
+static void rtcfg_conn_check_heartbeat(struct rtcfg_connection *conn);
+
+static int (*state[])(struct rtcfg_connection *conn, RTCFG_EVENT event_id,
+		      void *event_data) = {
+	rtcfg_conn_state_searching, rtcfg_conn_state_stage_1,
+	rtcfg_conn_state_stage_2, rtcfg_conn_state_ready, rtcfg_conn_state_dead
+};
+
+int rtcfg_do_conn_event(struct rtcfg_connection *conn, RTCFG_EVENT event_id,
+			void *event_data)
+{
+	int conn_state = conn->state;
+
+	RTCFG_DEBUG(3, "RTcfg: %s() conn=%p, event=%s, state=%s\n",
+		    __FUNCTION__, conn, rtcfg_event[event_id],
+		    rtcfg_conn_state[conn_state]);
+
+	return (*state[conn_state])(conn, event_id, event_data);
+}
+
+static void rtcfg_next_conn_state(struct rtcfg_connection *conn,
+				  RTCFG_CONN_STATE state)
+{
+	RTCFG_DEBUG(4, "RTcfg: next connection state=%s \n",
+		    rtcfg_conn_state[state]);
+
+	conn->state = state;
+}
+
+static int rtcfg_conn_state_searching(struct rtcfg_connection *conn,
+				      RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+
+	switch (event_id) {
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		rtcfg_conn_recv_announce_new(conn, rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		conn->last_frame = rtskb->time_stamp;
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_READY);
+
+		rtcfg_dev->stations_found++;
+		rtcfg_dev->stations_ready++;
+		rtcfg_dev->spec.srv.clients_configured++;
+		if (rtcfg_dev->spec.srv.clients_configured ==
+		    rtcfg_dev->other_stations)
+			rtcfg_complete_cmd(conn->ifindex, RTCFG_CMD_WAIT, 0);
+
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_stage_1(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtcfg_frm_ack_cfg *ack_cfg;
+	int packets;
+
+	switch (event_id) {
+	case RTCFG_FRM_ACK_CFG:
+		conn->last_frame = rtskb->time_stamp;
+
+		ack_cfg = (struct rtcfg_frm_ack_cfg *)rtskb->data;
+		conn->cfg_offs = ntohl(ack_cfg->ack_len);
+
+		if ((conn->flags & _RTCFG_FLAG_STAGE_2_DATA) != 0) {
+			if (conn->cfg_offs >= conn->stage2_file->size) {
+				rtcfg_dev->spec.srv.clients_configured++;
+				if (rtcfg_dev->spec.srv.clients_configured ==
+				    rtcfg_dev->other_stations)
+					rtcfg_complete_cmd(conn->ifindex,
+							   RTCFG_CMD_WAIT, 0);
+				rtcfg_next_conn_state(
+					conn, ((conn->flags &
+						_RTCFG_FLAG_READY) != 0) ?
+						      RTCFG_CONN_READY :
+						      RTCFG_CONN_STAGE_2);
+			} else {
+				packets = conn->burstrate;
+				while ((conn->cfg_offs <
+					conn->stage2_file->size) &&
+				       (packets > 0)) {
+					rtcfg_send_stage_2_frag(conn);
+					packets--;
+				}
+			}
+		} else {
+			rtcfg_dev->spec.srv.clients_configured++;
+			if (rtcfg_dev->spec.srv.clients_configured ==
+			    rtcfg_dev->other_stations)
+				rtcfg_complete_cmd(conn->ifindex,
+						   RTCFG_CMD_WAIT, 0);
+			rtcfg_next_conn_state(
+				conn, ((conn->flags & _RTCFG_FLAG_READY) != 0) ?
+					      RTCFG_CONN_READY :
+					      RTCFG_CONN_STAGE_2);
+		}
+
+		break;
+
+	case RTCFG_TIMER:
+		rtcfg_conn_check_cfg_timeout(conn);
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_stage_2(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+
+	switch (event_id) {
+	case RTCFG_FRM_READY:
+		conn->last_frame = rtskb->time_stamp;
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_READY);
+
+		conn->flags |= _RTCFG_FLAG_READY;
+		rtcfg_dev->stations_ready++;
+
+		if (rtcfg_dev->stations_ready == rtcfg_dev->other_stations)
+			rtcfg_complete_cmd(conn->ifindex, RTCFG_CMD_READY, 0);
+
+		break;
+
+	case RTCFG_TIMER:
+		rtcfg_conn_check_cfg_timeout(conn);
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_ready(struct rtcfg_connection *conn,
+				  RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+
+	switch (event_id) {
+	case RTCFG_TIMER:
+		rtcfg_conn_check_heartbeat(conn);
+		break;
+
+	case RTCFG_FRM_HEARTBEAT:
+		conn->last_frame = rtskb->time_stamp;
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_dead(struct rtcfg_connection *conn,
+				 RTCFG_EVENT event_id, void *event_data)
+{
+	switch (event_id) {
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		rtcfg_conn_recv_announce_new(conn, (struct rtskb *)event_data);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		/* Spec to-do: signal station that it is assumed to be dead
+               (=> reboot command?) */
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void rtcfg_conn_recv_announce_new(struct rtcfg_connection *conn,
+					 struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtcfg_frm_announce *announce_new;
+	int packets;
+
+	conn->last_frame = rtskb->time_stamp;
+
+	announce_new = (struct rtcfg_frm_announce *)rtskb->data;
+
+	conn->flags = announce_new->flags;
+	if (announce_new->burstrate < conn->burstrate)
+		conn->burstrate = announce_new->burstrate;
+
+	rtcfg_next_conn_state(conn, RTCFG_CONN_STAGE_1);
+
+	rtcfg_dev->stations_found++;
+	if ((conn->flags & _RTCFG_FLAG_READY) != 0)
+		rtcfg_dev->stations_ready++;
+
+	if (((conn->flags & _RTCFG_FLAG_STAGE_2_DATA) != 0) &&
+	    (conn->stage2_file != NULL)) {
+		packets = conn->burstrate - 1;
+
+		rtcfg_send_stage_2(conn, 1);
+
+		while ((conn->cfg_offs < conn->stage2_file->size) &&
+		       (packets > 0)) {
+			rtcfg_send_stage_2_frag(conn);
+			packets--;
+		}
+	} else {
+		rtcfg_send_stage_2(conn, 0);
+		conn->flags &= ~_RTCFG_FLAG_STAGE_2_DATA;
+	}
+}
+
+static void rtcfg_conn_check_cfg_timeout(struct rtcfg_connection *conn)
+{
+	struct rtcfg_device *rtcfg_dev;
+
+	if (!conn->cfg_timeout)
+		return;
+
+	if (rtdm_clock_read() >= conn->last_frame + conn->cfg_timeout) {
+		rtcfg_dev = &device[conn->ifindex];
+
+		rtcfg_dev->stations_found--;
+		if (conn->state == RTCFG_CONN_STAGE_2)
+			rtcfg_dev->spec.srv.clients_configured--;
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_SEARCHING);
+		conn->cfg_offs = 0;
+		conn->flags = 0;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		if (conn->addr_type == RTCFG_ADDR_IP) {
+			struct rtnet_device *rtdev;
+
+			/* MAC address yet unknown -> use broadcast address */
+			rtdev = rtdev_get_by_index(conn->ifindex);
+			if (rtdev == NULL)
+				return;
+			memcpy(conn->mac_addr, rtdev->broadcast, MAX_ADDR_LEN);
+			rtdev_dereference(rtdev);
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+	}
+}
+
+static void rtcfg_conn_check_heartbeat(struct rtcfg_connection *conn)
+{
+	u64 timeout;
+	struct rtcfg_device *rtcfg_dev;
+
+	timeout = device[conn->ifindex].spec.srv.heartbeat_timeout;
+	if (!timeout)
+		return;
+
+	if (rtdm_clock_read() >= conn->last_frame + timeout) {
+		rtcfg_dev = &device[conn->ifindex];
+
+		rtcfg_dev->stations_found--;
+		rtcfg_dev->stations_ready--;
+		rtcfg_dev->spec.srv.clients_configured--;
+
+		rtcfg_send_dead_station(conn);
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_DEAD);
+		conn->cfg_offs = 0;
+		conn->flags = 0;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		if ((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) {
+			struct rtnet_device *rtdev =
+				rtdev_get_by_index(conn->ifindex);
+
+			rt_ip_route_del_host(conn->addr.ip_addr, rtdev);
+
+			if (rtdev == NULL)
+				return;
+
+			if (!(conn->addr_type & FLAG_ASSIGN_ADDR_BY_MAC))
+				/* MAC address yet unknown -> use broadcast address */
+				memcpy(conn->mac_addr, rtdev->broadcast,
+				       MAX_ADDR_LEN);
+
+			rtdev_dereference(rtdev);
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+	}
+}
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c	2022-03-21 12:58:30.041882603 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_timer.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+void rtcfg_timer(rtdm_timer_t *t)
+{
+	struct rtcfg_device *rtcfg_dev =
+		container_of(t, struct rtcfg_device, timer);
+
+	set_bit(FLAG_TIMER_PENDING, &rtcfg_dev->flags);
+	rtcfg_thread_signal();
+}
+
+void rtcfg_timer_run_one(int ifindex)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_connection *conn;
+	int last_stage_1 = -1;
+	int burst_credit;
+	int index;
+	int ret, shutdown;
+
+	shutdown = test_and_clear_bit(FLAG_TIMER_SHUTDOWN, &rtcfg_dev->flags);
+
+	if (!test_and_clear_bit(FLAG_TIMER_PENDING, &rtcfg_dev->flags) ||
+	    shutdown)
+		return;
+
+	rtdm_mutex_lock(&rtcfg_dev->dev_mutex);
+
+	if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+		index = 0;
+		burst_credit = rtcfg_dev->burstrate;
+
+		list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+			conn = list_entry(entry, struct rtcfg_connection,
+					  entry);
+
+			if ((conn->state == RTCFG_CONN_SEARCHING) ||
+			    (conn->state == RTCFG_CONN_DEAD)) {
+				if ((burst_credit > 0) &&
+				    (index > last_stage_1)) {
+					if ((ret = rtcfg_send_stage_1(conn)) <
+					    0) {
+						RTCFG_DEBUG(
+							2,
+							"RTcfg: error %d while sending "
+							"stage 1 frame\n",
+							ret);
+					}
+					burst_credit--;
+					last_stage_1 = index;
+				}
+			} else {
+				/* skip connection in history */
+				if (last_stage_1 == (index - 1))
+					last_stage_1 = index;
+
+				rtcfg_do_conn_event(conn, RTCFG_TIMER, NULL);
+			}
+			index++;
+		}
+
+		/* handle pointer overrun of the last stage 1 transmission */
+		if (last_stage_1 == (index - 1))
+			last_stage_1 = -1;
+	} else if (rtcfg_dev->state == RTCFG_MAIN_CLIENT_READY)
+		rtcfg_send_heartbeat(ifindex);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+}
+
+void rtcfg_timer_run(void)
+{
+	int ifindex;
+
+	for (ifindex = 0; ifindex < MAX_RT_DEVICES; ifindex++)
+		rtcfg_timer_run_one(ifindex);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c	2022-03-21 12:58:30.033882681 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_module.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_ioctl.h>
+#include <rtcfg/rtcfg_proc.h>
+
+MODULE_LICENSE("GPL");
+
+int __init rtcfg_init(void)
+{
+	int ret;
+
+	printk("RTcfg: init real-time configuration distribution protocol\n");
+
+	ret = rtcfg_init_ioctls();
+	if (ret != 0)
+		goto error1;
+
+	rtcfg_init_state_machines();
+
+	ret = rtcfg_init_frames();
+	if (ret != 0)
+		goto error2;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = rtcfg_init_proc();
+	if (ret != 0) {
+		rtcfg_cleanup_frames();
+		goto error2;
+	}
+#endif
+
+	return 0;
+
+error2:
+	rtcfg_cleanup_state_machines();
+	rtcfg_cleanup_ioctls();
+
+error1:
+	return ret;
+}
+
+void rtcfg_cleanup(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtcfg_cleanup_proc();
+#endif
+	rtcfg_cleanup_frames();
+	rtcfg_cleanup_state_machines();
+	rtcfg_cleanup_ioctls();
+
+	printk("RTcfg: unloaded\n");
+}
+
+module_init(rtcfg_init);
+module_exit(rtcfg_cleanup);
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c	2022-03-21 12:58:30.025882759 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_file.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+
+#include <rtdm/driver.h>
+#include <rtcfg_chrdev.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_file.h>
+
+/* Note:
+ * We don't need any special lock protection while manipulating the
+ * rtcfg_files list. The list is only accessed through valid connections, and
+ * connections are already lock-protected.
+ */
+LIST_HEAD(rtcfg_files);
+
+struct rtcfg_file *rtcfg_get_file(const char *filename)
+{
+	struct list_head *entry;
+	struct rtcfg_file *file;
+
+	RTCFG_DEBUG(4, "RTcfg: looking for file %s\n", filename);
+
+	list_for_each (entry, &rtcfg_files) {
+		file = list_entry(entry, struct rtcfg_file, entry);
+
+		if (strcmp(file->name, filename) == 0) {
+			file->ref_count++;
+
+			RTCFG_DEBUG(4,
+				    "RTcfg: reusing file entry, now %d users\n",
+				    file->ref_count);
+
+			return file;
+		}
+	}
+
+	return NULL;
+}
+
+void rtcfg_add_file(struct rtcfg_file *file)
+{
+	RTCFG_DEBUG(4, "RTcfg: adding file %s to list\n", file->name);
+
+	file->ref_count = 1;
+	list_add_tail(&file->entry, &rtcfg_files);
+}
+
+int rtcfg_release_file(struct rtcfg_file *file)
+{
+	if (--file->ref_count == 0) {
+		RTCFG_DEBUG(4, "RTcfg: removing file %s from list\n",
+			    file->name);
+
+		list_del(&file->entry);
+	}
+
+	return file->ref_count;
+}
+++ linux-patched/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c	2022-03-21 12:58:30.018882828 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtwlan.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcfg/rtcfg_client_event.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <ipv4/route.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+static int rtcfg_client_get_frag(int ifindex, struct rt_proc_call *call);
+static void rtcfg_client_detach(int ifindex, struct rt_proc_call *call);
+static void rtcfg_client_recv_stage_1(int ifindex, struct rtskb *rtskb);
+static int rtcfg_client_recv_announce(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_recv_stage_2_frag(int ifindex, struct rtskb *rtskb);
+static int rtcfg_client_recv_ready(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_recv_dead_station(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_update_server(int ifindex, struct rtskb *rtskb);
+
+/*** Client States ***/
+
+int rtcfg_main_state_client_0(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+
+	switch (event_id) {
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		rtcfg_client_recv_stage_1(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_1(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_cmd *cmd_event;
+	int ret;
+
+	switch (event_id) {
+	case RTCFG_CMD_CLIENT:
+		/* second trial (buffer was probably too small) */
+		rtcfg_queue_blocking_call(ifindex,
+					  (struct rt_proc_call *)event_data);
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_0);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_ANNOUNCE:
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		if (cmd_event->args.announce.burstrate == 0) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return -EINVAL;
+		}
+
+		rtcfg_queue_blocking_call(ifindex,
+					  (struct rt_proc_call *)event_data);
+
+		if (cmd_event->args.announce.flags & _RTCFG_FLAG_STAGE_2_DATA)
+			set_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags);
+		if (cmd_event->args.announce.flags & _RTCFG_FLAG_READY)
+			set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags);
+		if (cmd_event->args.announce.burstrate < rtcfg_dev->burstrate)
+			rtcfg_dev->burstrate =
+				cmd_event->args.announce.burstrate;
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ANNOUNCED);
+
+		ret = rtcfg_send_announce_new(ifindex);
+		if (ret < 0) {
+			rtcfg_dequeue_blocking_call(ifindex);
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return ret;
+		}
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_announced(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_ANNOUNCE:
+		return rtcfg_client_get_frag(ifindex, call);
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_STAGE_2_CFG:
+		rtcfg_client_recv_stage_2_cfg(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_2_CFG_FRAG:
+		rtcfg_client_recv_stage_2_frag(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations)
+				rtcfg_next_main_state(
+					ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN);
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations)
+				rtcfg_next_main_state(
+					ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN);
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int rtcfg_main_state_client_all_known(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+
+	switch (event_id) {
+	case RTCFG_CMD_ANNOUNCE:
+		return rtcfg_client_get_frag(ifindex, call);
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_STAGE_2_CFG_FRAG:
+		rtcfg_client_recv_stage_2_frag(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_all_frames(int ifindex, RTCFG_EVENT event_id,
+				       void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations) {
+				rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE,
+						   0);
+
+				rtcfg_next_main_state(
+					ifindex,
+					test_bit(RTCFG_FLAG_READY,
+						 &rtcfg_dev->flags) ?
+						RTCFG_MAIN_CLIENT_READY :
+						RTCFG_MAIN_CLIENT_2);
+			}
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations) {
+				rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE,
+						   0);
+
+				rtcfg_next_main_state(
+					ifindex,
+					test_bit(RTCFG_FLAG_READY,
+						 &rtcfg_dev->flags) ?
+						RTCFG_MAIN_CLIENT_READY :
+						RTCFG_MAIN_CLIENT_2);
+			}
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_2(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_READY:
+		rtcfg_dev = &device[ifindex];
+
+		if (rtcfg_dev->stations_ready == rtcfg_dev->other_stations)
+			rtpc_complete_call(call, 0);
+		else
+			rtcfg_queue_blocking_call(ifindex, call);
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_READY);
+
+		if (!test_and_set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags))
+			rtcfg_send_ready(ifindex);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_ready(int ifindex, RTCFG_EVENT event_id,
+				  void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0) {
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_ready ==
+			    rtcfg_dev->other_stations)
+				rtcfg_complete_cmd(ifindex, RTCFG_CMD_READY, 0);
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		rtcfg_client_update_server(ifindex, rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*** Client Command Event Handlers ***/
+
+static int rtcfg_client_get_frag(int ifindex, struct rt_proc_call *call)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) == 0) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		return -EINVAL;
+	}
+
+	rtcfg_send_ack(ifindex);
+
+	if (rtcfg_dev->spec.clt.cfg_offs >= rtcfg_dev->spec.clt.cfg_len) {
+		if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) {
+			rtpc_complete_call(call, 0);
+
+			rtcfg_next_main_state(ifindex,
+					      test_bit(RTCFG_FLAG_READY,
+						       &rtcfg_dev->flags) ?
+						      RTCFG_MAIN_CLIENT_READY :
+						      RTCFG_MAIN_CLIENT_2);
+		} else {
+			rtcfg_next_main_state(ifindex,
+					      RTCFG_MAIN_CLIENT_ALL_FRAMES);
+			rtcfg_queue_blocking_call(ifindex, call);
+		}
+	} else
+		rtcfg_queue_blocking_call(ifindex, call);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return -CALL_PENDING;
+}
+
+/* releases rtcfg_dev->dev_mutex on return */
+static void rtcfg_client_detach(int ifindex, struct rt_proc_call *call)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtcfg_cmd *cmd_event;
+
+	cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+	cmd_event->args.detach.station_addr_list =
+		rtcfg_dev->spec.clt.station_addr_list;
+	cmd_event->args.detach.stage2_chain = rtcfg_dev->spec.clt.stage2_chain;
+
+	while (1) {
+		call = rtcfg_dequeue_blocking_call(ifindex);
+		if (call == NULL)
+			break;
+
+		rtpc_complete_call(call, -ENODEV);
+	}
+
+	if (test_and_clear_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags))
+		rtdm_timer_destroy(&rtcfg_dev->timer);
+	rtcfg_reset_device(ifindex);
+
+	rtcfg_next_main_state(cmd_event->internal.data.ifindex, RTCFG_MAIN_OFF);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+}
+
+/*** Client Frame Event Handlers ***/
+
+static void rtcfg_client_recv_stage_1(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_1_cfg *stage_1_cfg;
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u8 addr_type;
+	int ret;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_1_cfg = (struct rtcfg_frm_stage_1_cfg *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));
+
+	addr_type = stage_1_cfg->addr_type;
+
+	switch (stage_1_cfg->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP: {
+		struct rtnet_device *rtdev, *tmp;
+		u32 daddr, saddr, mask, bcast;
+
+		if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg) +
+					 2 * RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg "
+				       "frame\n");
+			kfree_rtskb(rtskb);
+			return;
+		}
+
+		rtdev = rtskb->rtdev;
+
+		memcpy(&daddr, stage_1_cfg->client_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		memcpy(&saddr, stage_1_cfg->server_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		__rtskb_pull(rtskb, 2 * RTCFG_ADDRSIZE_IP);
+
+		/* Broadcast: IP is used to address client */
+		if (rtskb->pkt_type == PACKET_BROADCAST) {
+			/* directed to us? */
+			if (daddr != rtdev->local_ip) {
+				rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+				kfree_rtskb(rtskb);
+				return;
+			}
+
+			/* Unicast: IP address is assigned by the server */
+		} else {
+			/* default netmask */
+			if (ntohl(daddr) <= 0x7FFFFFFF) /* 127.255.255.255  */
+				mask = 0x000000FF; /* 255.0.0.0        */
+			else if (ntohl(daddr) <=
+				 0xBFFFFFFF) /* 191.255.255.255  */
+				mask = 0x0000FFFF; /* 255.255.0.0      */
+			else
+				mask = 0x00FFFFFF; /* 255.255.255.0    */
+			bcast = daddr | (~mask);
+
+			rt_ip_route_del_all(rtdev); /* cleanup routing table */
+
+			rtdev->local_ip = daddr;
+			rtdev->broadcast_ip = bcast;
+
+			if ((tmp = rtdev_get_loopback()) != NULL) {
+				rt_ip_route_add_host(daddr, tmp->dev_addr, tmp);
+				rtdev_dereference(tmp);
+			}
+
+			if (rtdev->flags & IFF_BROADCAST)
+				rt_ip_route_add_host(bcast, rtdev->broadcast,
+						     rtdev);
+		}
+
+		/* update routing table */
+		rt_ip_route_add_host(saddr, rtskb->mac.ethernet->h_source,
+				     rtdev);
+
+		rtcfg_dev->spec.clt.srv_addr.ip_addr = saddr;
+		break;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    stage_1_cfg->addr_type, __FUNCTION__);
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	rtcfg_dev->spec.clt.addr_type = addr_type;
+
+	/* Ethernet-specific */
+	memcpy(rtcfg_dev->spec.clt.srv_mac_addr, rtskb->mac.ethernet->h_source,
+	       ETH_ALEN);
+
+	rtcfg_dev->burstrate = stage_1_cfg->burstrate;
+
+	rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_1);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	while (1) {
+		call = rtcfg_dequeue_blocking_call(ifindex);
+		if (call == NULL)
+			break;
+
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		if (cmd_event->internal.data.event_id == RTCFG_CMD_CLIENT) {
+			ret = 0;
+
+			/* note: only the first pending call gets data */
+			if ((rtskb != NULL) &&
+			    (cmd_event->args.client.buffer_size > 0)) {
+				ret = ntohs(stage_1_cfg->cfg_len);
+
+				cmd_event->args.client.rtskb = rtskb;
+				rtskb = NULL;
+			}
+		} else
+			ret = -EINVAL;
+
+		rtpc_complete_call(call, ret);
+	}
+
+	if (rtskb)
+		kfree_rtskb(rtskb);
+}
+
+static int rtcfg_add_to_station_list(struct rtcfg_device *rtcfg_dev,
+				     u8 *mac_addr, u8 flags)
+{
+	if (rtcfg_dev->stations_found == rtcfg_dev->spec.clt.max_stations) {
+		RTCFG_DEBUG(
+			1, "RTcfg: insufficient memory for storing new station "
+			   "address\n");
+		return -ENOMEM;
+	}
+
+	/* Ethernet-specific! */
+	memcpy(&rtcfg_dev->spec.clt.station_addr_list[rtcfg_dev->stations_found]
+			.mac_addr,
+	       mac_addr, ETH_ALEN);
+
+	rtcfg_dev->spec.clt.station_addr_list[rtcfg_dev->stations_found].flags =
+		flags;
+
+	rtcfg_dev->stations_found++;
+	if ((flags & _RTCFG_FLAG_READY) != 0)
+		rtcfg_dev->stations_ready++;
+
+	return 0;
+}
+
+/* Notes:
+ *  o rtcfg_client_recv_announce does not release the passed rtskb.
+ *  o On success, rtcfg_client_recv_announce returns without releasing the
+ *    device lock.
+ */
+static int rtcfg_client_recv_announce(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_announce *announce_frm;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u32 i;
+	u32 announce_frm_addr;
+	int result;
+
+	announce_frm = (struct rtcfg_frm_announce *)rtskb->data;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_announce)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1,
+			    "RTcfg: received invalid announce frame (id: %d)\n",
+			    announce_frm->head.id);
+		return -EINVAL;
+	}
+
+	switch (announce_frm->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP:
+		if (rtskb->len <
+		    sizeof(struct rtcfg_frm_announce) + RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(1,
+				    "RTcfg: received invalid announce frame "
+				    "(id: %d)\n",
+				    announce_frm->head.id);
+			return -EINVAL;
+		}
+
+		memcpy(&announce_frm_addr, announce_frm->addr, 4);
+
+		/* update routing table */
+		rt_ip_route_add_host(announce_frm_addr,
+				     rtskb->mac.ethernet->h_source,
+				     rtskb->rtdev);
+
+		announce_frm =
+			(struct rtcfg_frm_announce *)(((u8 *)announce_frm) +
+						      RTCFG_ADDRSIZE_IP);
+
+		break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    announce_frm->addr_type, __FUNCTION__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < rtcfg_dev->stations_found; i++)
+		/* Ethernet-specific! */
+		if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr,
+			   rtskb->mac.ethernet->h_source, ETH_ALEN) == 0)
+			return 0;
+
+	result = rtcfg_add_to_station_list(
+		rtcfg_dev, rtskb->mac.ethernet->h_source, announce_frm->flags);
+	if (result < 0)
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return result;
+}
+
+static void rtcfg_client_queue_frag(int ifindex, struct rtskb *rtskb,
+				    size_t data_len)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+	int result;
+
+	rtskb_trim(rtskb, data_len);
+
+	if (rtcfg_dev->spec.clt.stage2_chain == NULL)
+		rtcfg_dev->spec.clt.stage2_chain = rtskb;
+	else {
+		rtcfg_dev->spec.clt.stage2_chain->chain_end->next = rtskb;
+		rtcfg_dev->spec.clt.stage2_chain->chain_end = rtskb;
+	}
+
+	rtcfg_dev->spec.clt.cfg_offs += data_len;
+	rtcfg_dev->spec.clt.chain_len += data_len;
+
+	if ((rtcfg_dev->spec.clt.cfg_offs >= rtcfg_dev->spec.clt.cfg_len) ||
+	    (++rtcfg_dev->spec.clt.packet_counter == rtcfg_dev->burstrate)) {
+		while (1) {
+			call = rtcfg_dequeue_blocking_call(ifindex);
+			if (call == NULL)
+				break;
+
+			cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+			result = 0;
+
+			/* note: only the first pending call gets data */
+			if (rtcfg_dev->spec.clt.stage2_chain != NULL) {
+				result = rtcfg_dev->spec.clt.chain_len;
+				cmd_event->args.announce.rtskb =
+					rtcfg_dev->spec.clt.stage2_chain;
+				rtcfg_dev->spec.clt.stage2_chain = NULL;
+			}
+
+			rtpc_complete_call(call,
+					   (cmd_event->internal.data.event_id ==
+					    RTCFG_CMD_ANNOUNCE) ?
+						   result :
+						   -EINVAL);
+		}
+
+		rtcfg_dev->spec.clt.packet_counter = 0;
+		rtcfg_dev->spec.clt.chain_len = 0;
+	}
+}
+
+static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_2_cfg *stage_2_cfg;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	size_t data_len;
+	int ret;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid stage_2_cfg frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_2_cfg = (struct rtcfg_frm_stage_2_cfg *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg));
+
+	if (stage_2_cfg->heartbeat_period) {
+		ret = rtdm_timer_init(&rtcfg_dev->timer, rtcfg_timer,
+				      "rtcfg-timer");
+		if (ret == 0) {
+			ret = rtdm_timer_start(
+				&rtcfg_dev->timer, XN_INFINITE,
+				(nanosecs_rel_t)ntohs(
+					stage_2_cfg->heartbeat_period) *
+					1000000,
+				RTDM_TIMERMODE_RELATIVE);
+			if (ret < 0)
+				rtdm_timer_destroy(&rtcfg_dev->timer);
+		}
+
+		if (ret < 0)
+			/*ERRMSG*/ rtdm_printk(
+				"RTcfg: unable to create timer task\n");
+		else
+			set_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags);
+	}
+
+	/* add server to station list */
+	if (rtcfg_add_to_station_list(rtcfg_dev, rtskb->mac.ethernet->h_source,
+				      stage_2_cfg->flags) < 0) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unable to process stage_2_cfg frage\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	rtcfg_dev->other_stations = ntohl(stage_2_cfg->stations);
+	rtcfg_dev->spec.clt.cfg_len = ntohl(stage_2_cfg->cfg_len);
+	data_len = MIN(rtcfg_dev->spec.clt.cfg_len, rtskb->len);
+
+	if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) &&
+	    (data_len > 0)) {
+		rtcfg_client_queue_frag(ifindex, rtskb, data_len);
+		rtskb = NULL;
+
+		if (rtcfg_dev->stations_found == rtcfg_dev->other_stations)
+			rtcfg_next_main_state(ifindex,
+					      RTCFG_MAIN_CLIENT_ALL_KNOWN);
+	} else {
+		if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) {
+			rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE, 0);
+
+			rtcfg_next_main_state(ifindex,
+					      test_bit(RTCFG_FLAG_READY,
+						       &rtcfg_dev->flags) ?
+						      RTCFG_MAIN_CLIENT_READY :
+						      RTCFG_MAIN_CLIENT_2);
+		} else
+			rtcfg_next_main_state(ifindex,
+					      RTCFG_MAIN_CLIENT_ALL_FRAMES);
+
+		rtcfg_send_ack(ifindex);
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+static void rtcfg_client_recv_stage_2_frag(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_2_cfg_frag *stage_2_frag;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	size_t data_len;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg_frag)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1,
+			    "RTcfg: received invalid stage_2_cfg_frag frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_2_frag = (struct rtcfg_frm_stage_2_cfg_frag *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag));
+
+	data_len =
+		MIN(rtcfg_dev->spec.clt.cfg_len - rtcfg_dev->spec.clt.cfg_offs,
+		    rtskb->len);
+
+	if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) == 0) {
+		RTCFG_DEBUG(1, "RTcfg: unexpected stage 2 fragment, we did not "
+			       "request any data!\n");
+
+	} else if (rtcfg_dev->spec.clt.cfg_offs !=
+		   ntohl(stage_2_frag->frag_offs)) {
+		RTCFG_DEBUG(1,
+			    "RTcfg: unexpected stage 2 fragment (expected: %d, "
+			    "received: %d)\n",
+			    rtcfg_dev->spec.clt.cfg_offs,
+			    ntohl(stage_2_frag->frag_offs));
+
+		rtcfg_send_ack(ifindex);
+		rtcfg_dev->spec.clt.packet_counter = 0;
+	} else {
+		rtcfg_client_queue_frag(ifindex, rtskb, data_len);
+		rtskb = NULL;
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+/* Notes:
+ *  o On success, rtcfg_client_recv_ready returns without releasing the
+ *    device lock.
+ */
+static int rtcfg_client_recv_ready(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u32 i;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_simple)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid ready frame\n");
+		kfree_rtskb(rtskb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < rtcfg_dev->stations_found; i++)
+		/* Ethernet-specific! */
+		if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr,
+			   rtskb->mac.ethernet->h_source, ETH_ALEN) == 0) {
+			if ((rtcfg_dev->spec.clt.station_addr_list[i].flags &
+			     _RTCFG_FLAG_READY) == 0) {
+				rtcfg_dev->spec.clt.station_addr_list[i].flags |=
+					_RTCFG_FLAG_READY;
+				rtcfg_dev->stations_ready++;
+			}
+			break;
+		}
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+static void rtcfg_client_recv_dead_station(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_dead_station *dead_station_frm;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u32 i;
+
+	dead_station_frm = (struct rtcfg_frm_dead_station *)rtskb->data;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_dead_station)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid dead station frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	switch (dead_station_frm->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP: {
+		u32 ip;
+
+		if (rtskb->len <
+		    sizeof(struct rtcfg_frm_dead_station) + RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(
+				1,
+				"RTcfg: received invalid dead station frame\n");
+			kfree_rtskb(rtskb);
+			return;
+		}
+
+		memcpy(&ip, dead_station_frm->logical_addr, 4);
+
+		/* only delete remote IPs from routing table */
+		if (rtskb->rtdev->local_ip != ip)
+			rt_ip_route_del_host(ip, rtskb->rtdev);
+
+		dead_station_frm = (struct rtcfg_frm_dead_station
+					    *)(((u8 *)dead_station_frm) +
+					       RTCFG_ADDRSIZE_IP);
+
+		break;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    dead_station_frm->addr_type, __FUNCTION__);
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	for (i = 0; i < rtcfg_dev->stations_found; i++)
+		/* Ethernet-specific! */
+		if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr,
+			   dead_station_frm->physical_addr, ETH_ALEN) == 0) {
+			if ((rtcfg_dev->spec.clt.station_addr_list[i].flags &
+			     _RTCFG_FLAG_READY) != 0)
+				rtcfg_dev->stations_ready--;
+
+			rtcfg_dev->stations_found--;
+			memmove(&rtcfg_dev->spec.clt.station_addr_list[i],
+				&rtcfg_dev->spec.clt.station_addr_list[i + 1],
+				sizeof(struct rtcfg_station) *
+					(rtcfg_dev->stations_found - i));
+
+			if (rtcfg_dev->state == RTCFG_MAIN_CLIENT_ALL_KNOWN)
+				rtcfg_next_main_state(
+					ifindex, RTCFG_MAIN_CLIENT_ANNOUNCED);
+			break;
+		}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+}
+
+static void rtcfg_client_update_server(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_1_cfg *stage_1_cfg;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_1_cfg = (struct rtcfg_frm_stage_1_cfg *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));
+
+	switch (stage_1_cfg->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP: {
+		struct rtnet_device *rtdev;
+		u32 daddr, saddr;
+
+		if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg) +
+					 2 * RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg "
+				       "frame\n");
+			kfree_rtskb(rtskb);
+			break;
+		}
+
+		rtdev = rtskb->rtdev;
+
+		memcpy(&daddr, stage_1_cfg->client_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		memcpy(&saddr, stage_1_cfg->server_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		__rtskb_pull(rtskb, 2 * RTCFG_ADDRSIZE_IP);
+
+		/* directed to us? */
+		if ((rtskb->pkt_type == PACKET_BROADCAST) &&
+		    (daddr != rtdev->local_ip)) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			kfree_rtskb(rtskb);
+			return;
+		}
+
+		/* update routing table */
+		rt_ip_route_add_host(saddr, rtskb->mac.ethernet->h_source,
+				     rtdev);
+
+		rtcfg_dev->spec.clt.srv_addr.ip_addr = saddr;
+		break;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    stage_1_cfg->addr_type, __FUNCTION__);
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	/* Ethernet-specific */
+	memcpy(rtcfg_dev->spec.clt.srv_mac_addr, rtskb->mac.ethernet->h_source,
+	       ETH_ALEN);
+
+	rtcfg_send_announce_reply(ifindex, rtskb->mac.ethernet->h_source);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtwlan.c	2022-03-21 12:58:30.011882896 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtdev.c	1970-01-01 01:00:00.000000000 +0100
+/* rtwlan.c
+ *
+ * rtwlan protocol stack
+ * Copyright (c) 2006, Daniel Gregorek <dxg@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <rtnet_port.h>
+
+#include <rtwlan.h>
+
+int rtwlan_rx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rtskb->data;
+	u16 fc = le16_to_cpu(hdr->frame_ctl);
+
+	/* strip rtwlan header */
+	rtskb_pull(rtskb, ieee80211_get_hdrlen(fc));
+	rtskb->protocol = rt_eth_type_trans(rtskb, rtnet_dev);
+
+	/* forward rtskb to rtnet */
+	rtnetif_rx(rtskb);
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtwlan_rx);
+
+int rtwlan_tx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */
+					      .duration_id = 0,
+					      .seq_ctl = 0
+	};
+	int ret;
+	u8 dest[ETH_ALEN], src[ETH_ALEN];
+
+	/* Get source and destination addresses */
+
+	memcpy(src, rtskb->data + ETH_ALEN, ETH_ALEN);
+
+	if (rtwlan_dev->mode == RTWLAN_TXMODE_MCAST) {
+		memcpy(dest, rtnet_dev->dev_addr, ETH_ALEN);
+		dest[0] |= 0x01;
+	} else {
+		memcpy(dest, rtskb->data, ETH_ALEN);
+	}
+
+	/*
+     * Generate ieee80211 compatible header
+     */
+	memcpy(header.addr3, src, ETH_ALEN); /* BSSID */
+	memcpy(header.addr2, src, ETH_ALEN); /* SA */
+	memcpy(header.addr1, dest, ETH_ALEN); /* DA */
+
+	/* Write frame control field */
+	header.frame_ctl =
+		cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+	memcpy(rtskb_push(rtskb, IEEE80211_3ADDR_LEN), &header,
+	       IEEE80211_3ADDR_LEN);
+
+	ret = (*rtwlan_dev->hard_start_xmit)(rtskb, rtnet_dev);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(rtwlan_tx);
+
+/**
+ * rtalloc_wlandev - Allocates and sets up a wlan device
+ * @sizeof_priv: size of additional driver-private structure to
+ *               be allocated for this wlan device
+ *
+ * Fill in the fields of the device structure with wlan-generic
+ * values. Basically does everything except registering the device.
+ *
+ * A 32-byte alignment is enforced for the private data area.
+ */
+
+struct rtnet_device *rtwlan_alloc_dev(unsigned sizeof_priv,
+				      unsigned dev_pool_size)
+{
+	struct rtnet_device *rtnet_dev;
+
+	RTWLAN_DEBUG("Start.\n");
+
+	rtnet_dev = rt_alloc_etherdev(
+		sizeof(struct rtwlan_device) + sizeof_priv, dev_pool_size);
+	if (!rtnet_dev)
+		return NULL;
+
+	rtnet_dev->hard_start_xmit = rtwlan_tx;
+
+	rtdev_alloc_name(rtnet_dev, "rtwlan%d");
+
+	return rtnet_dev;
+}
+
+EXPORT_SYMBOL_GPL(rtwlan_alloc_dev);
+
+int rtwlan_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		 unsigned long arg)
+{
+	struct rtwlan_cmd cmd;
+	struct ifreq ifr;
+	int ret = 0;
+
+	if (copy_from_user(&cmd, (void *)arg, sizeof(cmd)) != 0)
+		return -EFAULT;
+
+	/*
+     * FIXME: proper do_ioctl() should expect a __user pointer
+     * arg. This only works with the existing WLAN support because the
+     * only driver currently providing this feature is broken, not
+     * doing the copy_to/from_user dance.
+     */
+	memset(&ifr, 0, sizeof(ifr));
+	ifr.ifr_data = &cmd;
+
+	switch (request) {
+	case IOC_RTWLAN_IFINFO:
+		if (cmd.args.info.ifindex > 0)
+			rtdev = rtdev_get_by_index(cmd.args.info.ifindex);
+		else
+			rtdev = rtdev_get_by_name(cmd.head.if_name);
+		if (rtdev == NULL)
+			return -ENODEV;
+
+		if (mutex_lock_interruptible(&rtdev->nrt_lock)) {
+			rtdev_dereference(rtdev);
+			return -ERESTARTSYS;
+		}
+
+		if (rtdev->do_ioctl)
+			ret = rtdev->do_ioctl(rtdev, &ifr, request);
+		else
+			ret = -ENORTWLANDEV;
+
+		memcpy(cmd.head.if_name, rtdev->name, IFNAMSIZ);
+		cmd.args.info.ifindex = rtdev->ifindex;
+		cmd.args.info.flags = rtdev->flags;
+
+		mutex_unlock(&rtdev->nrt_lock);
+
+		rtdev_dereference(rtdev);
+
+		break;
+
+	case IOC_RTWLAN_TXMODE:
+	case IOC_RTWLAN_BITRATE:
+	case IOC_RTWLAN_CHANNEL:
+	case IOC_RTWLAN_RETRY:
+	case IOC_RTWLAN_TXPOWER:
+	case IOC_RTWLAN_AUTORESP:
+	case IOC_RTWLAN_DROPBCAST:
+	case IOC_RTWLAN_DROPMCAST:
+	case IOC_RTWLAN_REGREAD:
+	case IOC_RTWLAN_REGWRITE:
+	case IOC_RTWLAN_BBPWRITE:
+	case IOC_RTWLAN_BBPREAD:
+	case IOC_RTWLAN_BBPSENS:
+		if (mutex_lock_interruptible(&rtdev->nrt_lock))
+			return -ERESTARTSYS;
+
+		if (rtdev->do_ioctl)
+			ret = rtdev->do_ioctl(rtdev, &ifr, request);
+		else
+			ret = -ENORTWLANDEV;
+
+		mutex_unlock(&rtdev->nrt_lock);
+
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+		return -EFAULT;
+
+	return ret;
+}
+
+struct rtnet_ioctls rtnet_wlan_ioctls = {
+	service_name: "rtwlan ioctl",
+	ioctl_type: RTNET_IOC_TYPE_RTWLAN,
+	handler: rtwlan_ioctl
+};
+
+int __init rtwlan_init(void)
+{
+	if (rtnet_register_ioctls(&rtnet_wlan_ioctls))
+		rtdm_printk(KERN_ERR "Failed to register rtnet_wlan_ioctl!\n");
+
+	return 0;
+}
+
+void rtwlan_exit(void)
+{
+	rtnet_unregister_ioctls(&rtnet_wlan_ioctls);
+}
+++ linux-patched/drivers/xenomai/net/stack/rtdev.c	2022-03-21 12:58:30.003882974 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/stack/rtnet_module.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/rtdev.c - NIC device driver layer
+ *
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/if.h>
+#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+
+#include <rtnet_internal.h>
+#include <rtskb.h>
+#include <ethernet/eth.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtnet_port.h>
+
+static unsigned int device_rtskbs = DEFAULT_DEVICE_RTSKBS;
+module_param(device_rtskbs, uint, 0444);
+MODULE_PARM_DESC(device_rtskbs, "Number of additional global realtime socket "
+				"buffers per network adapter");
+
+struct rtnet_device *rtnet_devices[MAX_RT_DEVICES];
+static struct rtnet_device *loopback_device;
+static DEFINE_RTDM_LOCK(rtnet_devices_rt_lock);
+static LIST_HEAD(rtskb_mapped_list);
+static LIST_HEAD(rtskb_mapwait_list);
+
+LIST_HEAD(event_hook_list);
+DEFINE_MUTEX(rtnet_devices_nrt_lock);
+
+static int rtdev_locked_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+#define atomic_fetch_add_unless __atomic_add_unless
+#endif
+
+int rtdev_reference(struct rtnet_device *rtdev)
+{
+	smp_mb__before_atomic();
+	if (rtdev->rt_owner &&
+	    atomic_fetch_add_unless(&rtdev->refcount, 1, 0) == 0) {
+		if (!try_module_get(rtdev->rt_owner))
+			return 0;
+		if (atomic_inc_return(&rtdev->refcount) != 1)
+			module_put(rtdev->rt_owner);
+	}
+	return 1;
+}
+EXPORT_SYMBOL_GPL(rtdev_reference);
+
+struct rtskb *rtnetdev_alloc_rtskb(struct rtnet_device *rtdev,
+				   unsigned int size)
+{
+	struct rtskb *rtskb = alloc_rtskb(size, &rtdev->dev_pool);
+	if (rtskb)
+		rtskb->rtdev = rtdev;
+	return rtskb;
+}
+EXPORT_SYMBOL_GPL(rtnetdev_alloc_rtskb);
+
+/***
+ *  __rtdev_get_by_name - find a rtnet_device by its name
+ *  @name: name to find
+ *  @note: caller must hold rtnet_devices_nrt_lock
+ */
+static struct rtnet_device *__rtdev_get_by_name(const char *name)
+{
+	int i;
+	struct rtnet_device *rtdev;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtnet_devices[i];
+		if ((rtdev != NULL) &&
+		    (strncmp(rtdev->name, name, IFNAMSIZ) == 0))
+			return rtdev;
+	}
+	return NULL;
+}
+
+/***
+ *  rtdev_get_by_name - find and lock a rtnet_device by its name
+ *  @name: name to find
+ */
+struct rtnet_device *rtdev_get_by_name(const char *name)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = __rtdev_get_by_name(name);
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_get_by_index - find and lock a rtnet_device by its ifindex
+ *  @ifindex: index of device
+ */
+struct rtnet_device *rtdev_get_by_index(int ifindex)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	if ((ifindex <= 0) || (ifindex > MAX_RT_DEVICES))
+		return NULL;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = __rtdev_get_by_index(ifindex);
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  __rtdev_get_by_hwaddr - find a rtnetdevice by its mac-address
+ *  @type:          Type of the net_device (may be ARPHRD_ETHER)
+ *  @hw_addr:       MAC-Address
+ */
+static inline struct rtnet_device *__rtdev_get_by_hwaddr(unsigned short type,
+							 char *hw_addr)
+{
+	int i;
+	struct rtnet_device *rtdev;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtnet_devices[i];
+		if ((rtdev != NULL) && (rtdev->type == type) &&
+		    (!memcmp(rtdev->dev_addr, hw_addr, rtdev->addr_len))) {
+			return rtdev;
+		}
+	}
+	return NULL;
+}
+
+/***
+ *  rtdev_get_by_hwaddr - find and lock a rtnetdevice by its mac-address
+ *  @type:          Type of the net_device (may be ARPHRD_ETHER)
+ *  @hw_addr:       MAC-Address
+ */
+struct rtnet_device *rtdev_get_by_hwaddr(unsigned short type, char *hw_addr)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = __rtdev_get_by_hwaddr(type, hw_addr);
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_get_by_hwaddr - find and lock the loopback device if available
+ */
+struct rtnet_device *rtdev_get_loopback(void)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = loopback_device;
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_alloc_name - allocate a name for the rtnet_device
+ *  @rtdev:         the rtnet_device
+ *  @name_mask:     a name mask (e.g. "rteth%d" for ethernet)
+ *
+ *  This function have to be called from the driver probe function.
+ */
+void rtdev_alloc_name(struct rtnet_device *rtdev, const char *mask)
+{
+	char buf[IFNAMSIZ];
+	int i;
+	struct rtnet_device *tmp;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		snprintf(buf, IFNAMSIZ, mask, i);
+		if ((tmp = rtdev_get_by_name(buf)) == NULL) {
+			strncpy(rtdev->name, buf, IFNAMSIZ);
+			break;
+		} else
+			rtdev_dereference(tmp);
+	}
+}
+
+static int rtdev_pool_trylock(void *cookie)
+{
+	return rtdev_reference(cookie);
+}
+
+static void rtdev_pool_unlock(void *cookie)
+{
+	rtdev_dereference(cookie);
+}
+
+static const struct rtskb_pool_lock_ops rtdev_ops = {
+	.trylock = rtdev_pool_trylock,
+	.unlock = rtdev_pool_unlock,
+};
+
+int rtdev_init(struct rtnet_device *rtdev, unsigned dev_pool_size)
+{
+	int ret;
+
+	ret = rtskb_pool_init(&rtdev->dev_pool, dev_pool_size, &rtdev_ops,
+			      rtdev);
+	if (ret < dev_pool_size) {
+		printk(KERN_ERR "RTnet: cannot allocate rtnet device pool\n");
+		rtskb_pool_release(&rtdev->dev_pool);
+		return -ENOMEM;
+	}
+
+	rtdm_mutex_init(&rtdev->xmit_mutex);
+	rtdm_lock_init(&rtdev->rtdev_lock);
+	mutex_init(&rtdev->nrt_lock);
+
+	atomic_set(&rtdev->refcount, 0);
+
+	/* scale global rtskb pool */
+	rtdev->add_rtskbs = rtskb_pool_extend(&global_pool, device_rtskbs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdev_init);
+
+void rtdev_destroy(struct rtnet_device *rtdev)
+{
+	rtskb_pool_release(&rtdev->dev_pool);
+	rtskb_pool_shrink(&global_pool, rtdev->add_rtskbs);
+	rtdev->stack_event = NULL;
+	rtdm_mutex_destroy(&rtdev->xmit_mutex);
+}
+EXPORT_SYMBOL_GPL(rtdev_destroy);
+
+/***
+ *  rtdev_alloc
+ *  @int sizeof_priv:
+ *
+ *  allocate memory for a new rt-network-adapter
+ */
+struct rtnet_device *rtdev_alloc(unsigned sizeof_priv, unsigned dev_pool_size)
+{
+	struct rtnet_device *rtdev;
+	unsigned alloc_size;
+	int ret;
+
+	/* ensure 32-byte alignment of the private area */
+	alloc_size = sizeof(*rtdev) + sizeof_priv + 31;
+
+	rtdev = kzalloc(alloc_size, GFP_KERNEL);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "RTnet: cannot allocate rtnet device\n");
+		return NULL;
+	}
+
+	ret = rtdev_init(rtdev, dev_pool_size);
+	if (ret) {
+		kfree(rtdev);
+		return NULL;
+	}
+
+	if (sizeof_priv)
+		rtdev->priv = (void *)(((long)(rtdev + 1) + 31) & ~31);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_free
+ */
+void rtdev_free(struct rtnet_device *rtdev)
+{
+	if (rtdev != NULL) {
+		rtdev_destroy(rtdev);
+		kfree(rtdev);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdev_free);
+
+static void init_etherdev(struct rtnet_device *rtdev, struct module *module)
+{
+	rtdev->hard_header = rt_eth_header;
+	rtdev->type = ARPHRD_ETHER;
+	rtdev->hard_header_len = ETH_HLEN;
+	rtdev->mtu = 1500; /* eth_mtu */
+	rtdev->addr_len = ETH_ALEN;
+	rtdev->flags = IFF_BROADCAST; /* TODO: IFF_MULTICAST; */
+	rtdev->get_mtu = rt_hard_mtu;
+	rtdev->rt_owner = module;
+
+	memset(rtdev->broadcast, 0xFF, ETH_ALEN);
+	strcpy(rtdev->name, "rteth%d");
+}
+
+/**
+ * rt_init_etherdev - sets up an ethernet device
+ * @module: module initializing the device
+ *
+ * Fill in the fields of the device structure with ethernet-generic
+ * values. This routine can be used to set up a pre-allocated device
+ * structure. The device still needs to be registered afterwards.
+ */
+int __rt_init_etherdev(struct rtnet_device *rtdev, unsigned dev_pool_size,
+		       struct module *module)
+{
+	int ret;
+
+	ret = rtdev_init(rtdev, dev_pool_size);
+	if (ret)
+		return ret;
+
+	init_etherdev(rtdev, module);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__rt_init_etherdev);
+
+/**
+ * rt_alloc_etherdev - Allocates and sets up an ethernet device
+ * @sizeof_priv: size of additional driver-private structure to
+ *               be allocated for this ethernet device
+ * @dev_pool_size: size of the rx pool
+ * @module: module creating the device
+ *
+ * Allocates then fills in the fields of a new device structure with
+ * ethernet-generic values. Basically does everything except
+ * registering the device.
+ *
+ * A 32-byte alignment is enforced for the private data area.
+ */
+struct rtnet_device *__rt_alloc_etherdev(unsigned sizeof_priv,
+					 unsigned dev_pool_size,
+					 struct module *module)
+{
+	struct rtnet_device *rtdev;
+
+	rtdev = rtdev_alloc(sizeof_priv, dev_pool_size);
+	if (!rtdev)
+		return NULL;
+
+	init_etherdev(rtdev, module);
+
+	return rtdev;
+}
+EXPORT_SYMBOL_GPL(__rt_alloc_etherdev);
+
+static inline int __rtdev_new_index(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++)
+		if (rtnet_devices[i] == NULL)
+			return i + 1;
+
+	return -ENOMEM;
+}
+
+static int rtskb_map(struct rtnet_device *rtdev, struct rtskb *skb)
+{
+	dma_addr_t addr;
+
+	addr = rtdev->map_rtskb(rtdev, skb);
+
+	if (WARN_ON(addr == RTSKB_UNMAPPED))
+		return -ENOMEM;
+
+	if (skb->buf_dma_addr != RTSKB_UNMAPPED && addr != skb->buf_dma_addr) {
+		printk("RTnet: device %s maps skb differently than others. "
+		       "Different IOMMU domain?\nThis is not supported.\n",
+		       rtdev->name);
+		return -EACCES;
+	}
+
+	skb->buf_dma_addr = addr;
+
+	return 0;
+}
+
+int rtdev_map_rtskb(struct rtskb *skb)
+{
+	struct rtnet_device *rtdev;
+	int err = 0;
+	int i;
+
+	skb->buf_dma_addr = RTSKB_UNMAPPED;
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtnet_devices[i];
+		if (rtdev && rtdev->map_rtskb) {
+			err = rtskb_map(rtdev, skb);
+			if (err)
+				break;
+		}
+	}
+
+	if (!err) {
+		if (skb->buf_dma_addr != RTSKB_UNMAPPED)
+			list_add(&skb->entry, &rtskb_mapped_list);
+		else
+			list_add(&skb->entry, &rtskb_mapwait_list);
+	}
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	return err;
+}
+
+static int rtdev_map_all_rtskbs(struct rtnet_device *rtdev)
+{
+	struct rtskb *skb, *n;
+	int err = 0;
+
+	if (!rtdev->map_rtskb)
+		return 0;
+
+	list_for_each_entry (skb, &rtskb_mapped_list, entry) {
+		err = rtskb_map(rtdev, skb);
+		if (err)
+			break;
+	}
+
+	list_for_each_entry_safe (skb, n, &rtskb_mapwait_list, entry) {
+		err = rtskb_map(rtdev, skb);
+		if (err)
+			break;
+		list_del(&skb->entry);
+		list_add(&skb->entry, &rtskb_mapped_list);
+	}
+
+	return err;
+}
+
+void rtdev_unmap_rtskb(struct rtskb *skb)
+{
+	struct rtnet_device *rtdev;
+	int i;
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+
+	list_del(&skb->entry);
+
+	if (skb->buf_dma_addr != RTSKB_UNMAPPED) {
+		for (i = 0; i < MAX_RT_DEVICES; i++) {
+			rtdev = rtnet_devices[i];
+			if (rtdev && rtdev->unmap_rtskb) {
+				rtdev->unmap_rtskb(rtdev, skb);
+			}
+		}
+	}
+
+	skb->buf_dma_addr = RTSKB_UNMAPPED;
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+static void rtdev_unmap_all_rtskbs(struct rtnet_device *rtdev)
+{
+	struct rtskb *skb;
+
+	if (!rtdev->unmap_rtskb)
+		return;
+
+	list_for_each_entry (skb, &rtskb_mapped_list, entry) {
+		rtdev->unmap_rtskb(rtdev, skb);
+	}
+}
+
+/***
+ * rt_register_rtnetdev: register a new rtnet_device (linux-like)
+ * @rtdev:               the device
+ */
+int rt_register_rtnetdev(struct rtnet_device *rtdev)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	rtdm_lockctx_t context;
+	int ifindex;
+	int err;
+
+	/* requires at least driver layer version 2.0 */
+	if (rtdev->vers < RTDEV_VERS_2_0)
+		return -EINVAL;
+
+	if (rtdev->features & NETIF_F_LLTX)
+		rtdev->start_xmit = rtdev->hard_start_xmit;
+	else
+		rtdev->start_xmit = rtdev_locked_xmit;
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+
+	ifindex = __rtdev_new_index();
+	if (ifindex < 0) {
+		err = ifindex;
+		goto fail;
+	}
+	rtdev->ifindex = ifindex;
+
+	if (strchr(rtdev->name, '%') != NULL)
+		rtdev_alloc_name(rtdev, rtdev->name);
+
+	if (__rtdev_get_by_name(rtdev->name) != NULL) {
+		err = -EEXIST;
+		goto fail;
+	}
+
+	rtdev->sysdev =
+		device_create(rtnet_class, NULL, MKDEV(0, rtdev->ifindex),
+			      rtdev, rtdev->name);
+	if (IS_ERR(rtdev->sysdev)) {
+		err = PTR_ERR(rtdev->sysdev);
+		goto fail;
+	}
+
+	if (rtdev->sysbind) {
+		err = sysfs_create_link(&rtdev->sysdev->kobj,
+					&rtdev->sysbind->kobj, "adapter");
+		if (err)
+			goto fail_link;
+	}
+
+	err = rtdev_map_all_rtskbs(rtdev);
+	if (err)
+		goto fail_map;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	if (rtdev->flags & IFF_LOOPBACK) {
+		/* allow only one loopback device */
+		if (loopback_device) {
+			rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock,
+						 context);
+			err = -EEXIST;
+			goto fail_loopback;
+		}
+		loopback_device = rtdev;
+	}
+	rtnet_devices[rtdev->ifindex - 1] = rtdev;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	list_for_each (entry, &event_hook_list) {
+		hook = list_entry(entry, struct rtdev_event_hook, entry);
+		if (hook->register_device)
+			hook->register_device(rtdev);
+	}
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	/* Default state at registration is that the device is present. */
+	set_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state);
+
+	printk("RTnet: registered %s\n", rtdev->name);
+
+	return 0;
+
+fail_loopback:
+	rtdev_unmap_all_rtskbs(rtdev);
+fail_map:
+	if (rtdev->sysbind)
+		sysfs_remove_link(&rtdev->sysdev->kobj, "adapter");
+fail_link:
+	device_destroy(rtnet_class, MKDEV(0, rtdev->ifindex));
+fail:
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	return err;
+}
+
+/***
+ * rt_unregister_rtnetdev: unregister a rtnet_device
+ * @rtdev:                 the device
+ */
+int rt_unregister_rtnetdev(struct rtnet_device *rtdev)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	rtdm_lockctx_t context;
+
+	RTNET_ASSERT(rtdev->ifindex != 0,
+		     printk("RTnet: device %s/%p was not registered\n",
+			    rtdev->name, rtdev);
+		     return -ENODEV;);
+
+	if (rtdev->sysbind)
+		sysfs_remove_link(&rtdev->sysdev->kobj, "adapter");
+
+	device_destroy(rtnet_class, MKDEV(0, rtdev->ifindex));
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	RTNET_ASSERT(atomic_read(&rtdev->refcount == 0), BUG());
+	rtnet_devices[rtdev->ifindex - 1] = NULL;
+	if (rtdev->flags & IFF_LOOPBACK)
+		loopback_device = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	list_for_each (entry, &event_hook_list) {
+		hook = list_entry(entry, struct rtdev_event_hook, entry);
+		if (hook->unregister_device)
+			hook->unregister_device(rtdev);
+	}
+
+	rtdev_unmap_all_rtskbs(rtdev);
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	clear_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state);
+
+	RTNET_ASSERT(atomic_read(&rtdev->refcount) == 0,
+		     printk("RTnet: rtdev reference counter < 0!\n"););
+
+	printk("RTnet: unregistered %s\n", rtdev->name);
+
+	return 0;
+}
+
+void rtdev_add_event_hook(struct rtdev_event_hook *hook)
+{
+	mutex_lock(&rtnet_devices_nrt_lock);
+	list_add(&hook->entry, &event_hook_list);
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+void rtdev_del_event_hook(struct rtdev_event_hook *hook)
+{
+	mutex_lock(&rtnet_devices_nrt_lock);
+	list_del(&hook->entry);
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+int rtdev_up(struct rtnet_device *rtdev, struct rtnet_core_cmd *cmd)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	/* We cannot change the promisc flag or the hardware address if
+	   the device is already up. */
+	if ((rtdev->flags & IFF_UP) &&
+	    (((cmd->args.up.set_dev_flags | cmd->args.up.clear_dev_flags) &
+	      IFF_PROMISC) ||
+	     (cmd->args.up.dev_addr_type != ARPHRD_VOID))) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (cmd->args.up.dev_addr_type != ARPHRD_VOID &&
+	    cmd->args.up.dev_addr_type != rtdev->type) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Skip upon extraneous call only after args have been checked. */
+	if (test_and_set_bit(PRIV_FLAG_UP, &rtdev->priv_flags))
+		goto out;
+
+	rtdev->flags |= cmd->args.up.set_dev_flags;
+	rtdev->flags &= ~cmd->args.up.clear_dev_flags;
+
+	if (cmd->args.up.dev_addr_type != ARPHRD_VOID)
+		memcpy(rtdev->dev_addr, cmd->args.up.dev_addr, MAX_ADDR_LEN);
+
+	ret = rtdev_open(rtdev); /* also == 0 if rtdev is already up */
+
+	if (ret == 0) {
+		mutex_lock(&rtnet_devices_nrt_lock);
+
+		list_for_each (entry, &event_hook_list) {
+			hook = list_entry(entry, struct rtdev_event_hook,
+					  entry);
+			if (hook->ifup)
+				hook->ifup(rtdev, cmd);
+		}
+
+		mutex_unlock(&rtnet_devices_nrt_lock);
+	} else
+		clear_bit(PRIV_FLAG_UP, &rtdev->priv_flags);
+out:
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdev_up);
+
+int rtdev_down(struct rtnet_device *rtdev)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	rtdm_lockctx_t context;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	/* spin lock required for sync with routing code */
+	rtdm_lock_get_irqsave(&rtdev->rtdev_lock, context);
+
+	if (test_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	if (!test_and_clear_bit(PRIV_FLAG_UP, &rtdev->priv_flags))
+		goto fail;
+
+	rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+
+	if (rtdev->mac_detach != NULL)
+		ret = rtdev->mac_detach(rtdev);
+
+	if (ret == 0) {
+		mutex_lock(&rtnet_devices_nrt_lock);
+
+		list_for_each (entry, &event_hook_list) {
+			hook = list_entry(entry, struct rtdev_event_hook,
+					  entry);
+			if (hook->ifdown)
+				hook->ifdown(rtdev);
+		}
+
+		mutex_unlock(&rtnet_devices_nrt_lock);
+
+		ret = rtdev_close(rtdev);
+	}
+out:
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+fail:
+	rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+	goto out;
+}
+EXPORT_SYMBOL_GPL(rtdev_down);
+
+/***
+ *  rtdev_open
+ *
+ *  Prepare an interface for use.
+ */
+int rtdev_open(struct rtnet_device *rtdev)
+{
+	int ret = 0;
+
+	if (rtdev->flags & IFF_UP) /* Is it already up?                */
+		return 0;
+
+	if (!rtdev_reference(rtdev))
+		return -EIDRM;
+
+	if (rtdev->open) /* Call device private open method  */
+		ret = rtdev->open(rtdev);
+
+	if (!ret) {
+		rtdev->flags |= IFF_UP;
+		set_bit(__RTNET_LINK_STATE_START, &rtdev->link_state);
+	} else
+		rtdev_dereference(rtdev);
+
+	return ret;
+}
+
+/***
+ *  rtdev_close
+ */
+int rtdev_close(struct rtnet_device *rtdev)
+{
+	int ret = 0;
+
+	if (!(rtdev->flags & IFF_UP))
+		return 0;
+
+	if (rtdev->stop)
+		ret = rtdev->stop(rtdev);
+
+	rtdev->flags &= ~(IFF_UP | IFF_RUNNING);
+	clear_bit(__RTNET_LINK_STATE_START, &rtdev->link_state);
+
+	if (ret == 0)
+		rtdev_dereference(rtdev);
+
+	return ret;
+}
+
+static int rtdev_locked_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	int ret;
+
+	rtdm_mutex_lock(&rtdev->xmit_mutex);
+	ret = rtdev->hard_start_xmit(skb, rtdev);
+	rtdm_mutex_unlock(&rtdev->xmit_mutex);
+
+	return ret;
+}
+
+/***
+ *  rtdev_xmit - send real-time packet
+ */
+int rtdev_xmit(struct rtskb *rtskb)
+{
+	struct rtnet_device *rtdev;
+	int err;
+
+	RTNET_ASSERT(rtskb != NULL, return -EINVAL;);
+
+	rtdev = rtskb->rtdev;
+
+	if (!rtnetif_carrier_ok(rtdev)) {
+		err = -EAGAIN;
+		kfree_rtskb(rtskb);
+		return err;
+	}
+
+	if (rtskb_acquire(rtskb, &rtdev->dev_pool) != 0) {
+		err = -ENOBUFS;
+		kfree_rtskb(rtskb);
+		return err;
+	}
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+
+	err = rtdev->start_xmit(rtskb, rtdev);
+	if (err) {
+		/* on error we must free the rtskb here */
+		kfree_rtskb(rtskb);
+
+		rtdm_printk("hard_start_xmit returned %d\n", err);
+	}
+
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+/***
+ *      rtdev_xmit_proxy - send rtproxy packet
+ */
+int rtdev_xmit_proxy(struct rtskb *rtskb)
+{
+	struct rtnet_device *rtdev;
+	int err;
+
+	RTNET_ASSERT(rtskb != NULL, return -EINVAL;);
+
+	rtdev = rtskb->rtdev;
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+
+	/* TODO: make these lines race-condition-safe */
+	if (rtdev->mac_disc) {
+		RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL,
+			     return -EINVAL;);
+
+		err = rtdev->mac_disc->nrt_packet_tx(rtskb);
+	} else {
+		err = rtdev->start_xmit(rtskb, rtdev);
+		if (err) {
+			/* on error we must free the rtskb here */
+			kfree_rtskb(rtskb);
+
+			rtdm_printk("hard_start_xmit returned %d\n", err);
+		}
+	}
+
+	return err;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+
+unsigned int rt_hard_mtu(struct rtnet_device *rtdev, unsigned int priority)
+{
+	return rtdev->mtu;
+}
+
+EXPORT_SYMBOL_GPL(rtdev_alloc_name);
+
+EXPORT_SYMBOL_GPL(rt_register_rtnetdev);
+EXPORT_SYMBOL_GPL(rt_unregister_rtnetdev);
+
+EXPORT_SYMBOL_GPL(rtdev_add_event_hook);
+EXPORT_SYMBOL_GPL(rtdev_del_event_hook);
+
+EXPORT_SYMBOL_GPL(rtdev_get_by_name);
+EXPORT_SYMBOL_GPL(rtdev_get_by_index);
+EXPORT_SYMBOL_GPL(rtdev_get_by_hwaddr);
+EXPORT_SYMBOL_GPL(rtdev_get_loopback);
+
+EXPORT_SYMBOL_GPL(rtdev_xmit);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+EXPORT_SYMBOL_GPL(rtdev_xmit_proxy);
+#endif
+
+EXPORT_SYMBOL_GPL(rt_hard_mtu);
+++ linux-patched/drivers/xenomai/net/stack/rtnet_module.c	2022-03-21 12:58:29.996883042 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/addons/proxy.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  stack/rtnet_module.c - module framework, proc file system
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <rtdev_mgr.h>
+#include <rtnet_chrdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_socket.h>
+#include <rtnet_rtpc.h>
+#include <stack_mgr.h>
+#include <rtwlan.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTnet stack core");
+
+struct class *rtnet_class;
+
+struct rtnet_mgr STACK_manager;
+struct rtnet_mgr RTDEV_manager;
+
+EXPORT_SYMBOL_GPL(STACK_manager);
+EXPORT_SYMBOL_GPL(RTDEV_manager);
+
+const char rtnet_rtdm_provider_name[] =
+	"(C) 1999-2008 RTnet Development Team, http://www.rtnet.org";
+
+EXPORT_SYMBOL_GPL(rtnet_rtdm_provider_name);
+
+void rtnet_corectl_register(void);
+void rtnet_corectl_unregister(void);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+/***
+ *      proc filesystem section
+ */
+struct xnvfile_directory rtnet_proc_root;
+EXPORT_SYMBOL_GPL(rtnet_proc_root);
+
+static int rtnet_devices_nrt_lock_get(struct xnvfile *vfile)
+{
+	return mutex_lock_interruptible(&rtnet_devices_nrt_lock);
+}
+
+static void rtnet_devices_nrt_lock_put(struct xnvfile *vfile)
+{
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+static struct xnvfile_lock_ops rtnet_devices_nrt_lock_ops = {
+	.get = rtnet_devices_nrt_lock_get,
+	.put = rtnet_devices_nrt_lock_put,
+};
+
+static void *rtnet_devices_begin(struct xnvfile_regular_iterator *it)
+{
+	if (it->pos == 0)
+		return VFILE_SEQ_START;
+
+	return (void *)2UL;
+}
+
+static void *rtnet_devices_next(struct xnvfile_regular_iterator *it)
+{
+	if (it->pos >= MAX_RT_DEVICES)
+		return NULL;
+
+	return (void *)2UL;
+}
+
+static int rtnet_devices_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct rtnet_device *rtdev;
+
+	if (data == NULL) {
+		xnvfile_printf(it, "Index\tName\t\tFlags\n");
+		return 0;
+	}
+
+	rtdev = __rtdev_get_by_index(it->pos);
+	if (rtdev == NULL)
+		return VFILE_SEQ_SKIP;
+
+	xnvfile_printf(it, "%d\t%-15s %s%s%s%s\n", rtdev->ifindex, rtdev->name,
+		       (rtdev->flags & IFF_UP) ? "UP" : "DOWN",
+		       (rtdev->flags & IFF_BROADCAST) ? " BROADCAST" : "",
+		       (rtdev->flags & IFF_LOOPBACK) ? " LOOPBACK" : "",
+		       (rtdev->flags & IFF_PROMISC) ? " PROMISC" : "");
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_devices_vfile_ops = {
+	.begin = rtnet_devices_begin,
+	.next = rtnet_devices_next,
+	.show = rtnet_devices_show,
+};
+
+static struct xnvfile_regular rtnet_devices_vfile = {
+	.entry = { .lockops = &rtnet_devices_nrt_lock_ops, },
+	.ops = &rtnet_devices_vfile_ops,
+};
+
+static int rtnet_rtskb_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	unsigned int rtskb_len;
+
+	rtskb_len = ALIGN_RTSKB_STRUCT_LEN + SKB_DATA_ALIGN(RTSKB_SIZE);
+
+	xnvfile_printf(it,
+		       "Statistics\t\tCurrent\tMaximum\n"
+		       "rtskb pools\t\t%d\t%d\n"
+		       "rtskbs\t\t\t%d\t%d\n"
+		       "rtskb memory need\t%d\t%d\n",
+		       rtskb_pools, rtskb_pools_max, rtskb_amount,
+		       rtskb_amount_max, rtskb_amount * rtskb_len,
+		       rtskb_amount_max * rtskb_len);
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtskb_vfile_ops = {
+	.show = rtnet_rtskb_show,
+};
+
+static struct xnvfile_regular rtnet_rtskb_vfile = {
+	.ops = &rtnet_rtskb_vfile_ops,
+};
+
+static int rtnet_version_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	const char verstr[] = "RTnet for Xenomai v" XENO_VERSION_STRING "\n"
+			      "RTcap:      "
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+			      "yes\n"
+#else
+			      "no\n"
+#endif
+			      "rtnetproxy: "
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+			      "yes\n"
+#else
+			      "no\n"
+#endif
+			      "bug checks: "
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+			      "yes\n"
+#else
+			      "no\n"
+#endif
+		;
+
+	xnvfile_printf(it, "%s", verstr);
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_version_vfile_ops = {
+	.show = rtnet_version_show,
+};
+
+static struct xnvfile_regular rtnet_version_vfile = {
+	.ops = &rtnet_version_vfile_ops,
+};
+
+static void *rtnet_stats_begin(struct xnvfile_regular_iterator *it)
+{
+	return (void *)1UL;
+}
+
+static void *rtnet_stats_next(struct xnvfile_regular_iterator *it)
+{
+	if (it->pos >= MAX_RT_DEVICES)
+		return NULL;
+
+	return (void *)1UL;
+}
+
+static int rtnet_stats_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct net_device_stats *stats;
+	struct rtnet_device *rtdev;
+
+	if (it->pos == 0) {
+		xnvfile_printf(it,
+			       "Inter-|   Receive                            "
+			       "                    |  Transmit\n");
+		xnvfile_printf(it,
+			       " face |bytes    packets errs drop fifo frame "
+			       "compressed multicast|bytes    packets errs "
+			       "drop fifo colls carrier compressed\n");
+		return 0;
+	}
+
+	rtdev = __rtdev_get_by_index(it->pos);
+	if (rtdev == NULL)
+		return VFILE_SEQ_SKIP;
+
+	if (rtdev->get_stats == NULL) {
+		xnvfile_printf(it, "%6s: No statistics available.\n",
+			       rtdev->name);
+		return 0;
+	}
+
+	stats = rtdev->get_stats(rtdev);
+	xnvfile_printf(
+		it,
+		"%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+		"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+		rtdev->name, stats->rx_bytes, stats->rx_packets,
+		stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors,
+		stats->rx_fifo_errors,
+		stats->rx_length_errors + stats->rx_over_errors +
+			stats->rx_crc_errors + stats->rx_frame_errors,
+		stats->rx_compressed, stats->multicast, stats->tx_bytes,
+		stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+		stats->tx_fifo_errors, stats->collisions,
+		stats->tx_carrier_errors + stats->tx_aborted_errors +
+			stats->tx_window_errors + stats->tx_heartbeat_errors,
+		stats->tx_compressed);
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_stats_vfile_ops = {
+	.begin = rtnet_stats_begin,
+	.next = rtnet_stats_next,
+	.show = rtnet_stats_show,
+};
+
+static struct xnvfile_regular rtnet_stats_vfile = {
+	.entry = { .lockops = &rtnet_devices_nrt_lock_ops, },
+	.ops = &rtnet_stats_vfile_ops,
+};
+
+static int rtnet_proc_register(void)
+{
+	int err;
+
+	err = xnvfile_init_dir("rtnet", &rtnet_proc_root, NULL);
+	if (err < 0)
+		goto error1;
+
+	err = xnvfile_init_regular("devices", &rtnet_devices_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error2;
+
+	err = xnvfile_init_regular("rtskb", &rtnet_rtskb_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error3;
+
+	err = xnvfile_init_regular("version", &rtnet_version_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error4;
+
+	err = xnvfile_init_regular("stats", &rtnet_stats_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error5;
+
+	return 0;
+
+error5:
+	xnvfile_destroy_regular(&rtnet_version_vfile);
+
+error4:
+	xnvfile_destroy_regular(&rtnet_rtskb_vfile);
+
+error3:
+	xnvfile_destroy_regular(&rtnet_devices_vfile);
+
+error2:
+	xnvfile_destroy_dir(&rtnet_proc_root);
+
+error1:
+	printk("RTnet: unable to initialize /proc entries\n");
+	return err;
+}
+
+static void rtnet_proc_unregister(void)
+{
+	xnvfile_destroy_regular(&rtnet_stats_vfile);
+	xnvfile_destroy_regular(&rtnet_version_vfile);
+	xnvfile_destroy_regular(&rtnet_rtskb_vfile);
+	xnvfile_destroy_regular(&rtnet_devices_vfile);
+	xnvfile_destroy_dir(&rtnet_proc_root);
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/**
+ *  rtnet_init()
+ */
+int __init rtnet_init(void)
+{
+	int err = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	printk("\n*** RTnet for Xenomai v" XENO_VERSION_STRING " ***\n\n");
+	printk("RTnet: initialising real-time networking\n");
+
+	rtnet_class = class_create(THIS_MODULE, "rtnet");
+	if (IS_ERR(rtnet_class))
+		return PTR_ERR(rtnet_class);
+
+	if ((err = rtskb_pools_init()) != 0)
+		goto err_out1;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if ((err = rtnet_proc_register()) != 0)
+		goto err_out2;
+#endif
+
+	/* initialize the Stack-Manager */
+	if ((err = rt_stack_mgr_init(&STACK_manager)) != 0)
+		goto err_out3;
+
+	/* initialize the RTDEV-Manager */
+	if ((err = rt_rtdev_mgr_init(&RTDEV_manager)) != 0)
+		goto err_out4;
+
+	rtnet_chrdev_init();
+
+	if ((err = rtwlan_init()) != 0)
+		goto err_out5;
+
+	if ((err = rtpc_init()) != 0)
+		goto err_out6;
+
+	rtnet_corectl_register();
+
+	return 0;
+
+err_out6:
+	rtwlan_exit();
+
+err_out5:
+	rtnet_chrdev_release();
+	rt_rtdev_mgr_delete(&RTDEV_manager);
+
+err_out4:
+	rt_stack_mgr_delete(&STACK_manager);
+
+err_out3:
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtnet_proc_unregister();
+
+err_out2:
+#endif
+	rtskb_pools_release();
+
+err_out1:
+	class_destroy(rtnet_class);
+
+	return err;
+}
+
+/**
+ *  rtnet_release()
+ */
+void __exit rtnet_release(void)
+{
+	rtnet_corectl_unregister();
+
+	rtpc_cleanup();
+
+	rtwlan_exit();
+
+	rtnet_chrdev_release();
+
+	rt_stack_mgr_delete(&STACK_manager);
+	rt_rtdev_mgr_delete(&RTDEV_manager);
+
+	rtskb_pools_release();
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtnet_proc_unregister();
+#endif
+
+	class_destroy(rtnet_class);
+
+	printk("RTnet: unloaded\n");
+}
+
+module_init(rtnet_init);
+module_exit(rtnet_release);
+++ linux-patched/drivers/xenomai/net/addons/proxy.c	2022-03-21 12:58:29.988883120 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/addons/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/* rtnetproxy.c: a Linux network driver that uses the RTnet driver to
+ * transport IP data from/to Linux kernel mode.
+ * This allows the usage of TCP/IP from linux space using via the RTNET
+ * network adapter.
+ *
+ *
+ * Usage:
+ *
+ * insmod rtnetproxy.o    (only after having rtnet up and running)
+ *
+ * ifconfig rtproxy up IP_ADDRESS netmask NETMASK
+ *
+ * Use it like any other network device from linux.
+ *
+ * Restrictions:
+ * Only IPV4 based protocols are supported, UDP and ICMP can be send out
+ * but not received - as these are handled directly by rtnet!
+ *
+ *
+ *
+ * Based on the linux net driver dummy.c by Nick Holloway
+ *
+ *
+ * Changelog:
+ *
+ * 08-Nov-2002  Mathias Koehrer - Clear separation between rtai context and
+ *                                standard linux driver context.
+ *                                Data exchange via ringbuffers.
+ *                                A RTAI thread is used for rtnet transmission.
+ *
+ * 05-Nov-2002  Mathias Koehrer - Initial version!
+ *                                Development based on rtnet 0.2.6,
+ *                                rtai-24.1.10, kernel 2.4.19
+ *
+ *
+ * Mathias Koehrer - mathias_koehrer@yahoo.de
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/ip.h>
+
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/if_arp.h> /* For ARPHRD_ETHER */
+
+#include <rtdev.h>
+#include <rtskb.h>
+#include <rtdm/driver.h>
+#include <ipv4/ip_input.h>
+#include <ipv4/route.h>
+#include <rtnet_port.h>
+
+static struct net_device *dev_rtnetproxy;
+
+/* **************************************************************************
+ *  SKB pool management (JK):
+ * ************************************************************************ */
+#define DEFAULT_PROXY_RTSKBS 32
+
+static unsigned int proxy_rtskbs = DEFAULT_PROXY_RTSKBS;
+module_param(proxy_rtskbs, uint, 0444);
+MODULE_PARM_DESC(proxy_rtskbs,
+		 "Number of realtime socket buffers in proxy pool");
+
+static struct rtskb_pool rtskb_pool;
+
+static struct rtskb_queue tx_queue;
+static struct rtskb_queue rx_queue;
+
+/* handle for non-real-time signal */
+static rtdm_nrtsig_t rtnetproxy_rx_signal;
+
+/* Thread for transmission */
+static rtdm_task_t rtnetproxy_tx_task;
+
+static rtdm_event_t rtnetproxy_tx_event;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+static char *rtdev_attach = "rteth0";
+module_param(rtdev_attach, charp, 0444);
+MODULE_PARM_DESC(rtdev_attach, "Attach to the specified RTnet device");
+
+struct rtnet_device *rtnetproxy_rtdev;
+#endif
+
+/* ************************************************************************
+ * ************************************************************************
+ *   T R A N S M I T
+ * ************************************************************************
+ * ************************************************************************ */
+
+static void rtnetproxy_tx_loop(void *arg)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&rtnetproxy_tx_event) < 0)
+			break;
+
+		while ((rtskb = rtskb_dequeue(&tx_queue)) != NULL) {
+			rtdev = rtskb->rtdev;
+			rtdev_xmit_proxy(rtskb);
+			rtdev_dereference(rtdev);
+		}
+	}
+}
+
+/* ************************************************************************
+ *  hard_xmit
+ *
+ *  This function runs in linux kernel context and is executed whenever
+ *  there is a frame to be sent out.
+ * ************************************************************************ */
+static int rtnetproxy_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct rtskb *rtskb;
+	int len = skb->len;
+#ifndef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	struct dest_route rt;
+	struct iphdr *iph;
+	u32 saddr, daddr;
+#endif
+
+	switch (ntohs(eth->h_proto)) {
+	case ETH_P_IP:
+		if (len < sizeof(struct ethhdr) + sizeof(struct iphdr))
+			goto drop1;
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	case ETH_P_ARP:
+#endif
+		break;
+	default:
+	drop1:
+		dev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	rtskb = alloc_rtskb(len, &rtskb_pool);
+	if (!rtskb)
+		return NETDEV_TX_BUSY;
+
+	memcpy(rtskb_put(rtskb, len), skb->data, len);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	dev_kfree_skb(skb);
+
+	rtskb->rtdev = rtnetproxy_rtdev;
+	if (rtdev_reference(rtnetproxy_rtdev) == 0) {
+		dev->stats.tx_dropped++;
+		kfree_rtskb(rtskb);
+		return NETDEV_TX_BUSY;
+	}
+
+#else /* !CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+	iph = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
+	saddr = iph->saddr;
+	daddr = iph->daddr;
+
+	dev_kfree_skb(skb);
+
+	if (rt_ip_route_output(&rt, daddr, INADDR_ANY) < 0) {
+	drop2:
+		dev->stats.tx_dropped++;
+		kfree_rtskb(rtskb);
+		return NETDEV_TX_OK;
+	}
+	if (rt.rtdev->local_ip != saddr) {
+		rtdev_dereference(rt.rtdev);
+		goto drop2;
+	}
+
+	eth = (struct ethhdr *)rtskb->data;
+	memcpy(eth->h_source, rt.rtdev->dev_addr, rt.rtdev->addr_len);
+	memcpy(eth->h_dest, rt.dev_addr, rt.rtdev->addr_len);
+
+	rtskb->rtdev = rt.rtdev;
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += len;
+
+	rtskb_queue_tail(&tx_queue, rtskb);
+	rtdm_event_signal(&rtnetproxy_tx_event);
+
+	return NETDEV_TX_OK;
+}
+
+/* ************************************************************************
+ * ************************************************************************
+ *   R E C E I V E
+ * ************************************************************************
+ * ************************************************************************ */
+
+/* ************************************************************************
+ * This function runs in real-time context.
+ *
+ * It is called from inside rtnet whenever a packet has been received that
+ * has to be processed by rtnetproxy.
+ * ************************************************************************ */
+static void rtnetproxy_recv(struct rtskb *rtskb)
+{
+	/* Acquire rtskb (JK) */
+	if (rtskb_acquire(rtskb, &rtskb_pool) != 0) {
+		dev_rtnetproxy->stats.rx_dropped++;
+		rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	if (rtskb_queue_tail_check(&rx_queue, rtskb))
+		rtdm_nrtsig_pend(&rtnetproxy_rx_signal);
+}
+
+/* ************************************************************************
+ * This function runs in kernel mode.
+ * It is activated from rtnetproxy_signal_handler whenever rtnet received a
+ * frame to be processed by rtnetproxy.
+ * ************************************************************************ */
+static inline void rtnetproxy_kernel_recv(struct rtskb *rtskb)
+{
+	struct sk_buff *skb;
+	struct net_device *dev = dev_rtnetproxy;
+
+	int header_len = rtskb->rtdev->hard_header_len;
+	int len = rtskb->len + header_len;
+
+	/* Copy the realtime skb (rtskb) to the standard skb: */
+	skb = dev_alloc_skb(len + 2);
+	skb_reserve(skb, 2);
+
+	memcpy(skb_put(skb, len), rtskb->data - header_len, len);
+
+	/* Set some relevant entries in the skb: */
+	skb->protocol = eth_type_trans(skb, dev);
+	skb->dev = dev;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb->pkt_type = PACKET_HOST; /* Extremely important! Why?!? */
+
+	/* the rtskb stamp is useless (different clock), get new one */
+	__net_timestamp(skb);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	dev->last_rx = jiffies;
+#endif
+	dev->stats.rx_bytes += skb->len;
+	dev->stats.rx_packets++;
+
+	netif_rx(skb); /* pass it to the received stuff */
+}
+
+/* ************************************************************************
+ * This function runs in kernel mode.
+ * It is activated from rtnetproxy_recv whenever rtnet received a frame to
+ * be processed by rtnetproxy.
+ * ************************************************************************ */
+static void rtnetproxy_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	struct rtskb *rtskb;
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		rtnetproxy_kernel_recv(rtskb);
+		kfree_rtskb(rtskb);
+	}
+}
+
+/* ************************************************************************
+ * ************************************************************************
+ *   G E N E R A L
+ * ************************************************************************
+ * ************************************************************************ */
+
+static void fake_multicast_support(struct net_device *dev)
+{
+}
+
+#ifdef CONFIG_NET_FASTROUTE
+static int rtnetproxy_accept_fastpath(struct net_device *dev,
+				      struct dst_entry *dst)
+{
+	return -1;
+}
+#endif
+
+static int rtnetproxy_open(struct net_device *dev)
+{
+	int err = try_module_get(THIS_MODULE);
+	if (err == 0)
+		return -EIDRM;
+
+	return 0;
+}
+
+static int rtnetproxy_stop(struct net_device *dev)
+{
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static const struct net_device_ops rtnetproxy_netdev_ops = {
+	.ndo_open = rtnetproxy_open,
+	.ndo_stop = rtnetproxy_stop,
+	.ndo_start_xmit = rtnetproxy_xmit,
+	.ndo_set_rx_mode = fake_multicast_support,
+};
+
+/* ************************************************************************
+ *  device init
+ * ************************************************************************ */
+static void __init rtnetproxy_init(struct net_device *dev)
+{
+	/* Fill in device structure with ethernet-generic values. */
+	ether_setup(dev);
+
+	dev->tx_queue_len = 0;
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	memcpy(dev->dev_addr, rtnetproxy_rtdev->dev_addr, MAX_ADDR_LEN);
+#else
+	dev->flags |= IFF_NOARP;
+#endif
+	dev->flags &= ~IFF_MULTICAST;
+
+	dev->netdev_ops = &rtnetproxy_netdev_ops;
+}
+
+/* ************************************************************************
+ * ************************************************************************
+ *   I N I T
+ * ************************************************************************
+ * ************************************************************************ */
+static int __init rtnetproxy_init_module(void)
+{
+	int err;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	if ((rtnetproxy_rtdev = rtdev_get_by_name(rtdev_attach)) == NULL) {
+		printk("Couldn't attach to %s\n", rtdev_attach);
+		return -EINVAL;
+	}
+	printk("RTproxy attached to %s\n", rtdev_attach);
+#endif
+
+	/* Initialize the proxy's rtskb pool (JK) */
+	if (rtskb_module_pool_init(&rtskb_pool, proxy_rtskbs) < proxy_rtskbs) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	dev_rtnetproxy =
+		alloc_netdev(0, "rtproxy", NET_NAME_UNKNOWN, rtnetproxy_init);
+	if (!dev_rtnetproxy) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	rtdm_nrtsig_init(&rtnetproxy_rx_signal, rtnetproxy_signal_handler,
+			 NULL);
+
+	rtskb_queue_init(&tx_queue);
+	rtskb_queue_init(&rx_queue);
+
+	err = register_netdev(dev_rtnetproxy);
+	if (err < 0)
+		goto err3;
+
+	/* Init the task for transmission */
+	rtdm_event_init(&rtnetproxy_tx_event, 0);
+	err = rtdm_task_init(&rtnetproxy_tx_task, "rtnetproxy",
+			     rtnetproxy_tx_loop, 0, RTDM_TASK_LOWEST_PRIORITY,
+			     0);
+	if (err)
+		goto err4;
+
+	/* Register with RTnet */
+	rt_ip_fallback_handler = rtnetproxy_recv;
+
+	printk("rtnetproxy installed as \"%s\"\n", dev_rtnetproxy->name);
+
+	return 0;
+
+err4:
+	unregister_netdev(dev_rtnetproxy);
+
+err3:
+	rtdm_nrtsig_destroy(&rtnetproxy_rx_signal);
+
+	free_netdev(dev_rtnetproxy);
+
+err1:
+	rtskb_pool_release(&rtskb_pool);
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	rtdev_dereference(rtnetproxy_rtdev);
+#endif
+	return err;
+}
+
+static void __exit rtnetproxy_cleanup_module(void)
+{
+	struct rtskb *rtskb;
+
+	/* Unregister the fallback at rtnet */
+	rt_ip_fallback_handler = NULL;
+
+	/* Unregister the net device: */
+	unregister_netdev(dev_rtnetproxy);
+	free_netdev(dev_rtnetproxy);
+
+	rtdm_event_destroy(&rtnetproxy_tx_event);
+	rtdm_task_destroy(&rtnetproxy_tx_task);
+
+	/* free the non-real-time signal */
+	rtdm_nrtsig_destroy(&rtnetproxy_rx_signal);
+
+	while ((rtskb = rtskb_dequeue(&tx_queue)) != NULL) {
+		rtdev_dereference(rtskb->rtdev);
+		kfree_rtskb(rtskb);
+	}
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		kfree_rtskb(rtskb);
+	}
+
+	rtskb_pool_release(&rtskb_pool);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	rtdev_dereference(rtnetproxy_rtdev);
+#endif
+}
+
+module_init(rtnetproxy_init_module);
+module_exit(rtnetproxy_cleanup_module);
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/net/addons/Kconfig	2022-03-21 12:58:29.981883189 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/addons/Makefile	1970-01-01 01:00:00.000000000 +0100
+menu "Add-Ons"
+    depends on XENO_DRIVERS_NET
+
+config XENO_DRIVERS_NET_ADDON_RTCAP
+    depends on XENO_DRIVERS_NET && m
+    select ETHERNET
+    tristate "Real-Time Capturing Support"
+    default n
+    help
+    This feature allows to capture real-time packets traversing the RTnet
+    stack. It can both be used to sniff passively on a network (in this
+    case you may want to enable the promisc mode of your real-time NIC via
+    rtifconfig) and to log the traffic the node receives and transmits
+    during normal operation. RTcap consists of additional hooks in the
+    RTnet stack and a separate module as interface to standard network
+    analysis tools like Ethereal.
+
+    For further information see Documentation/README.rtcap.
+
+config XENO_DRIVERS_NET_ADDON_PROXY
+    depends on XENO_DRIVERS_NET_RTIPV4 && m
+    select ETHERNET
+    tristate "IP protocol proxy for Linux"
+    default n
+    help
+    Enables a forward-to-Linux module for all IP protocols that are not
+    handled by the IPv4 implemenation of RTnet (TCP, UDP, etc.). Only use
+    when you know what you are doing - it can easily break your real-time
+    requirements!
+
+    See Documentation/README.rtnetproxy for further information.
+
+config XENO_DRIVERS_NET_ADDON_PROXY_ARP
+    depends on XENO_DRIVERS_NET_ADDON_PROXY
+    bool "Enable ARP handling via protocol proxy"
+    default n
+    help
+    Enables ARP support for the IP protocol proxy. Incoming ARP replies
+    are then delivered to both, the RTnet and the Linux network stack,
+    but only answered by Linux. The IP protocol proxy gets attached to
+    the RTnet device specified by the module parameter "rtdev_attach",
+    rteth0 by default.
+
+endmenu
+++ linux-patched/drivers/xenomai/net/addons/Makefile	2022-03-21 12:58:29.973883266 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/addons/cap.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP) += rtcap.o
+
+rtcap-y := cap.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY) += rtnetproxy.o
+
+rtnetproxy-y := proxy.o
+++ linux-patched/drivers/xenomai/net/addons/cap.c	2022-03-21 12:58:29.966883335 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/mpc8xx_enet.c	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  rtcap/rtcap.c
+ *
+ *  Real-Time Capturing Interface
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+
+#include <rtdev.h>
+#include <rtnet_chrdev.h>
+#include <rtnet_port.h> /* for netdev_priv() */
+
+MODULE_LICENSE("GPL");
+
+static unsigned int rtcap_rtskbs = 128;
+module_param(rtcap_rtskbs, uint, 0444);
+MODULE_PARM_DESC(rtcap_rtskbs, "Number of real-time socket buffers per "
+			       "real-time device");
+
+#define TAP_DEV 1
+#define RTMAC_TAP_DEV 2
+#define XMIT_HOOK 4
+
+static rtdm_nrtsig_t cap_signal;
+static struct rtskb_queue cap_queue;
+static struct rtskb_pool cap_pool;
+
+static struct tap_device_t {
+	struct net_device *tap_dev;
+	struct net_device *rtmac_tap_dev;
+	struct net_device_stats tap_dev_stats;
+	int present;
+	int (*orig_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+} tap_device[MAX_RT_DEVICES];
+
+void rtcap_rx_hook(struct rtskb *rtskb)
+{
+	bool			trigger = false;
+
+	if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) {
+		tap_device[rtskb->rtdev->ifindex].tap_dev_stats.rx_dropped++;
+		return;
+	}
+
+	if (cap_queue.first == NULL) {
+		cap_queue.first = rtskb;
+		trigger = true;
+	} else
+		cap_queue.last->cap_next = rtskb;
+	cap_queue.last = rtskb;
+	rtskb->cap_next = NULL;
+
+	rtskb->cap_flags |= RTSKB_CAP_SHARED;
+
+	if (trigger)
+		rtdm_nrtsig_pend(&cap_signal);
+}
+
+int rtcap_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex];
+	rtdm_lockctx_t context;
+	bool trigger = false;
+
+	if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) {
+		tap_dev->tap_dev_stats.rx_dropped++;
+		return tap_dev->orig_xmit(rtskb, rtdev);
+	}
+
+	rtskb->cap_next = NULL;
+	rtskb->cap_start = rtskb->data;
+	rtskb->cap_len = rtskb->len;
+	rtskb->cap_flags |= RTSKB_CAP_SHARED;
+
+	rtskb->time_stamp = rtdm_clock_read();
+
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+	if (cap_queue.first == NULL) {
+		cap_queue.first = rtskb;
+		trigger = true;
+	} else
+		cap_queue.last->cap_next = rtskb;
+	cap_queue.last = rtskb;
+
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+	if (trigger)
+		rtdm_nrtsig_pend(&cap_signal);
+
+	return tap_dev->orig_xmit(rtskb, rtdev);
+}
+
+int rtcap_loopback_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex];
+
+	rtskb->time_stamp = rtdm_clock_read();
+
+	return tap_dev->orig_xmit(rtskb, rtdev);
+}
+
+void rtcap_kfree_rtskb(struct rtskb *rtskb)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *comp_skb;
+
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+	if (rtskb->cap_flags & RTSKB_CAP_SHARED) {
+		rtskb->cap_flags &= ~RTSKB_CAP_SHARED;
+
+		comp_skb = rtskb->cap_comp_skb;
+
+		rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+		rtskb_pool_queue_tail(comp_skb->pool, comp_skb);
+
+		return;
+	}
+
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+	rtskb->chain_end = rtskb;
+	rtskb_pool_queue_tail(rtskb->pool, rtskb);
+}
+
+static void convert_timestamp(nanosecs_abs_t timestamp, struct sk_buff *skb)
+{
+#ifdef CONFIG_KTIME_SCALAR
+	skb->tstamp.tv64 = timestamp;
+#else /* !CONFIG_KTIME_SCALAR */
+	unsigned long rem;
+
+	rem = do_div(timestamp, NSEC_PER_SEC);
+	skb->tstamp = ktime_set((long)timestamp, rem);
+#endif /* !CONFIG_KTIME_SCALAR */
+}
+
+static void rtcap_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	struct rtskb *rtskb;
+	struct sk_buff *skb;
+	struct sk_buff *rtmac_skb;
+	struct net_device_stats *stats;
+	int ifindex;
+	int active;
+	rtdm_lockctx_t context;
+
+	while (1) {
+		rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+		if ((rtskb = cap_queue.first) == NULL) {
+			rtdm_lock_put_irqrestore(&rtcap_lock, context);
+			break;
+		}
+
+		cap_queue.first = rtskb->cap_next;
+
+		rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+		ifindex = rtskb->rtdev->ifindex;
+		active = tap_device[ifindex].present;
+
+		if (active) {
+			if ((tap_device[ifindex].tap_dev->flags & IFF_UP) == 0)
+				active &= ~TAP_DEV;
+			if (active & RTMAC_TAP_DEV &&
+			    !(tap_device[ifindex].rtmac_tap_dev->flags &
+			      IFF_UP))
+				active &= ~RTMAC_TAP_DEV;
+		}
+
+		if (active == 0) {
+			tap_device[ifindex].tap_dev_stats.rx_dropped++;
+			rtcap_kfree_rtskb(rtskb);
+			continue;
+		}
+
+		skb = dev_alloc_skb(rtskb->cap_len);
+		if (skb) {
+			memcpy(skb_put(skb, rtskb->cap_len), rtskb->cap_start,
+			       rtskb->cap_len);
+
+			if (active & TAP_DEV) {
+				skb->dev = tap_device[ifindex].tap_dev;
+				skb->protocol = eth_type_trans(skb, skb->dev);
+				convert_timestamp(rtskb->time_stamp, skb);
+
+				rtmac_skb = NULL;
+				if ((rtskb->cap_flags &
+				     RTSKB_CAP_RTMAC_STAMP) &&
+				    (active & RTMAC_TAP_DEV)) {
+					rtmac_skb = skb_clone(skb, GFP_ATOMIC);
+					if (rtmac_skb != NULL)
+						convert_timestamp(
+							rtskb->cap_rtmac_stamp,
+							rtmac_skb);
+				}
+
+				rtcap_kfree_rtskb(rtskb);
+
+				stats = &tap_device[ifindex].tap_dev_stats;
+				stats->rx_packets++;
+				stats->rx_bytes += skb->len;
+
+				if (rtmac_skb != NULL) {
+					rtmac_skb->dev = tap_device[ifindex]
+								 .rtmac_tap_dev;
+					netif_rx(rtmac_skb);
+				}
+				netif_rx(skb);
+			} else if (rtskb->cap_flags & RTSKB_CAP_RTMAC_STAMP) {
+				skb->dev = tap_device[ifindex].rtmac_tap_dev;
+				skb->protocol = eth_type_trans(skb, skb->dev);
+				convert_timestamp(rtskb->cap_rtmac_stamp, skb);
+
+				rtcap_kfree_rtskb(rtskb);
+
+				stats = &tap_device[ifindex].tap_dev_stats;
+				stats->rx_packets++;
+				stats->rx_bytes += skb->len;
+
+				netif_rx(skb);
+			} else {
+				dev_kfree_skb(skb);
+				rtcap_kfree_rtskb(rtskb);
+			}
+		} else {
+			printk("RTcap: unable to allocate linux skb\n");
+			rtcap_kfree_rtskb(rtskb);
+		}
+	}
+}
+
+static int tap_dev_open(struct net_device *dev)
+{
+	int err;
+
+	err = try_module_get(THIS_MODULE);
+	if (err == 0)
+		return -EIDRM;
+
+	memcpy(dev->dev_addr,
+	       (*(struct rtnet_device **)netdev_priv(dev))->dev_addr,
+	       MAX_ADDR_LEN);
+
+	return 0;
+}
+
+static int tap_dev_stop(struct net_device *dev)
+{
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static int tap_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 1;
+}
+
+static struct net_device_stats *tap_dev_get_stats(struct net_device *dev)
+{
+	struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev);
+
+	return &tap_device[rtdev->ifindex].tap_dev_stats;
+}
+
+static int tap_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	return -EINVAL;
+}
+
+static const struct net_device_ops tap_netdev_ops = {
+	.ndo_open = tap_dev_open,
+	.ndo_stop = tap_dev_stop,
+	.ndo_start_xmit = tap_dev_xmit,
+	.ndo_get_stats = tap_dev_get_stats,
+	.ndo_change_mtu = tap_dev_change_mtu,
+};
+
+static void tap_dev_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &tap_netdev_ops;
+	dev->mtu = 1500;
+	dev->flags &= ~IFF_MULTICAST;
+}
+
+void cleanup_tap_devices(void)
+{
+	int i;
+	struct rtnet_device *rtdev;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++)
+		if ((tap_device[i].present & TAP_DEV) != 0) {
+			if ((tap_device[i].present & XMIT_HOOK) != 0) {
+				rtdev = *(struct rtnet_device **)netdev_priv(
+					tap_device[i].tap_dev);
+
+				mutex_lock(&rtdev->nrt_lock);
+				rtdev->hard_start_xmit =
+					tap_device[i].orig_xmit;
+				if (rtdev->features & NETIF_F_LLTX)
+					rtdev->start_xmit =
+						tap_device[i].orig_xmit;
+				mutex_unlock(&rtdev->nrt_lock);
+
+				rtdev_dereference(rtdev);
+			}
+
+			if ((tap_device[i].present & RTMAC_TAP_DEV) != 0) {
+				unregister_netdev(tap_device[i].rtmac_tap_dev);
+				free_netdev(tap_device[i].rtmac_tap_dev);
+			}
+
+			unregister_netdev(tap_device[i].tap_dev);
+			free_netdev(tap_device[i].tap_dev);
+		}
+}
+
+int __init rtcap_init(void)
+{
+	struct rtnet_device *rtdev;
+	struct net_device *dev;
+	int ret;
+	int devices = 0;
+	int i;
+
+	printk("RTcap: real-time capturing interface\n");
+
+	rtskb_queue_init(&cap_queue);
+
+	rtdm_nrtsig_init(&cap_signal, rtcap_signal_handler, NULL);
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		tap_device[i].present = 0;
+
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev != NULL) {
+			mutex_lock(&rtdev->nrt_lock);
+
+			if (test_bit(PRIV_FLAG_UP, &rtdev->priv_flags)) {
+				mutex_unlock(&rtdev->nrt_lock);
+				printk("RTcap: %s busy, skipping device!\n",
+				       rtdev->name);
+				rtdev_dereference(rtdev);
+				continue;
+			}
+
+			if (rtdev->mac_priv != NULL) {
+				mutex_unlock(&rtdev->nrt_lock);
+
+				printk("RTcap: RTmac discipline already active on device %s. "
+				       "Load RTcap before RTmac!\n",
+				       rtdev->name);
+
+				rtdev_dereference(rtdev);
+				continue;
+			}
+
+			memset(&tap_device[i].tap_dev_stats, 0,
+			       sizeof(struct net_device_stats));
+
+			dev = alloc_netdev(sizeof(struct rtnet_device *),
+					   rtdev->name, NET_NAME_UNKNOWN,
+					   tap_dev_setup);
+			if (!dev) {
+				ret = -ENOMEM;
+				goto error3;
+			}
+
+			tap_device[i].tap_dev = dev;
+			*(struct rtnet_device **)netdev_priv(dev) = rtdev;
+
+			ret = register_netdev(dev);
+			if (ret < 0)
+				goto error3;
+
+			tap_device[i].present = TAP_DEV;
+
+			tap_device[i].orig_xmit = rtdev->hard_start_xmit;
+
+			if ((rtdev->flags & IFF_LOOPBACK) == 0) {
+				dev = alloc_netdev(
+					sizeof(struct rtnet_device *),
+					rtdev->name, NET_NAME_UNKNOWN,
+					tap_dev_setup);
+				if (!dev) {
+					ret = -ENOMEM;
+					goto error3;
+				}
+
+				tap_device[i].rtmac_tap_dev = dev;
+				*(struct rtnet_device **)netdev_priv(dev) =
+					rtdev;
+				strncat(dev->name, "-mac",
+					IFNAMSIZ - strlen(dev->name));
+
+				ret = register_netdev(dev);
+				if (ret < 0)
+					goto error3;
+
+				tap_device[i].present |= RTMAC_TAP_DEV;
+
+				rtdev->hard_start_xmit = rtcap_xmit_hook;
+			} else
+				rtdev->hard_start_xmit =
+					rtcap_loopback_xmit_hook;
+
+			/* If the device requires no xmit_lock, start_xmit points equals
+	     * hard_start_xmit => we have to update this as well
+	     */
+			if (rtdev->features & NETIF_F_LLTX)
+				rtdev->start_xmit = rtdev->hard_start_xmit;
+
+			tap_device[i].present |= XMIT_HOOK;
+
+			mutex_unlock(&rtdev->nrt_lock);
+
+			devices++;
+		}
+	}
+
+	if (devices == 0) {
+		printk("RTcap: no real-time devices found!\n");
+		ret = -ENODEV;
+		goto error2;
+	}
+
+	if (rtskb_module_pool_init(&cap_pool, rtcap_rtskbs * devices) <
+	    rtcap_rtskbs * devices) {
+		rtskb_pool_release(&cap_pool);
+		ret = -ENOMEM;
+		goto error2;
+	}
+
+	/* register capturing handlers with RTnet core
+     * (adding the handler need no locking) */
+	rtcap_handler = rtcap_rx_hook;
+
+	return 0;
+
+error3:
+	mutex_unlock(&rtdev->nrt_lock);
+	rtdev_dereference(rtdev);
+	printk("RTcap: unable to register %s!\n", dev->name);
+
+error2:
+	cleanup_tap_devices();
+	rtdm_nrtsig_destroy(&cap_signal);
+
+	return ret;
+}
+
+void rtcap_cleanup(void)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_nrtsig_destroy(&cap_signal);
+
+	/* unregister capturing handlers
+     * (take lock to avoid any unloading code before handler was left) */
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+	rtcap_handler = NULL;
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+	/* empty queue (should be already empty) */
+	rtcap_signal_handler(0, NULL /* we ignore them anyway */);
+
+	cleanup_tap_devices();
+
+	rtskb_pool_release(&cap_pool);
+
+	printk("RTcap: unloaded\n");
+}
+
+module_init(rtcap_init);
+module_exit(rtcap_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/mpc8xx_enet.c	2022-03-21 12:58:29.959883403 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/macb.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * BK Id: SCCS/s.enet.c 1.24 01/19/02 03:07:14 dan
+ */
+/*
+ * Ethernet driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * I copied the basic skeleton from the lance driver, because I did not
+ * know how to write the Linux driver, but I did know how the LANCE worked.
+ *
+ * This version of the driver is somewhat selectable for the different
+ * processor/board combinations.  It works for the boards I know about
+ * now, and should be easily modified to include others.  Some of the
+ * configuration information is contained in <asm/commproc.h> and the
+ * remainder is here.
+ *
+ * Buffer descriptors are kept in the CPM dual port RAM, and the frame
+ * buffers are in the host memory.
+ *
+ * Right now, I am very watseful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Ported to RTnet.
+ * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+#include <asm/commproc.h>
+
+#include <rtnet_port.h>
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet MPC8xx SCC Ethernet driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+static unsigned int rtnet_scc = 1; /* SCC1 */
+MODULE_PARM(rtnet_scc, "i");
+MODULE_PARM_DESC(rtnet_scc, "SCCx port for RTnet, x=1..3 (default=1)");
+
+#define RT_DEBUG(fmt,args...)
+
+/*
+ *				Theory of Operation
+ *
+ * The MPC8xx CPM performs the Ethernet processing on SCC1.  It can use
+ * an aribtrary number of buffers on byte boundaries, but must have at
+ * least two receive buffers to prevent constant overrun conditions.
+ *
+ * The buffer descriptors are allocated from the CPM dual port memory
+ * with the data buffers allocated from host memory, just like all other
+ * serial communication protocols.  The host memory buffers are allocated
+ * from the free page pool, and then divided into smaller receive and
+ * transmit buffers.  The size of the buffers should be a power of two,
+ * since that nicely divides the page.  This creates a ring buffer
+ * structure similar to the LANCE and other controllers.
+ *
+ * Like the LANCE driver:
+ * The driver runs as two independent, single-threaded flows of control.  One
+ * is the send-packet routine, which enforces single-threaded use by the
+ * cep->tx_busy flag.  The other thread is the interrupt handler, which is
+ * single threaded by the hardware and other software.
+ *
+ * The send packet thread has partial control over the Tx ring and the
+ * 'cep->tx_busy' flag.  It sets the tx_busy flag whenever it's queuing a Tx
+ * packet. If the next queue slot is empty, it clears the tx_busy flag when
+ * finished otherwise it sets the 'lp->tx_full' flag.
+ *
+ * The MBX has a control register external to the MPC8xx that has some
+ * control of the Ethernet interface.  Information is in the manual for
+ * your board.
+ *
+ * The RPX boards have an external control/status register.  Consult the
+ * programming documents for details unique to your board.
+ *
+ * For the TQM8xx(L) modules, there is no control register interface.
+ * All functions are directly controlled using I/O pins.  See <asm/commproc.h>.
+ */
+
+/* The transmitter timeout
+ */
+#define TX_TIMEOUT	(2*HZ)
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it is best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define CPM_ENET_RX_PAGES	4
+#define CPM_ENET_RX_FRSIZE	2048
+#define CPM_ENET_RX_FRPPG	(PAGE_SIZE / CPM_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
+#define TX_RING_SIZE		8	/* Must be power of two */
+#define TX_RING_MOD_MASK	7	/*   for this to work */
+
+/* The CPM stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+#define PKT_MAXBLR_SIZE		1520
+
+/* The CPM buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct scc_enet_private {
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses.
+	*/
+	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
+	cbd_t	*tx_bd_base;
+	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
+	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
+	scc_t	*sccp;
+
+	/* Virtual addresses for the receive buffers because we can't
+	 * do a __va() on them anymore.
+	 */
+	unsigned char *rx_vaddr[RX_RING_SIZE];
+	struct	net_device_stats stats;
+	uint	tx_full;
+	rtdm_lock_t lock;
+	rtdm_irq_t irq_handle;
+};
+
+static int scc_enet_open(struct rtnet_device *rtdev);
+static int scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int scc_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int scc_enet_interrupt(rtdm_irq_t *irq_handle);
+static int scc_enet_close(struct rtnet_device *rtdev);
+
+static struct net_device_stats *scc_enet_get_stats(struct rtnet_device *rtdev);
+#ifdef ORIGINAL_VERSION
+static void set_multicast_list(struct net_device *dev);
+#endif
+
+#ifndef ORIGINAL_VERSION
+static struct rtnet_device *rtdev_root = NULL;
+#endif
+
+/* Typically, 860(T) boards use SCC1 for Ethernet, and other 8xx boards
+ * use SCC2. Some even may use SCC3.
+ * This is easily extended if necessary.
+ * These values are set when the driver is initialized.
+ */
+static int CPM_CR_ENET;
+static int PROFF_ENET;
+static int SCC_ENET;
+static int CPMVEC_ENET;
+
+static int
+scc_enet_open(struct rtnet_device *rtdev)
+{
+	/* I should reset the ring buffers here, but I don't yet know
+	 * a simple way to do that.
+	 */
+	rtnetif_start_queue(rtdev);
+
+	return 0;					/* Always succeed */
+}
+
+static int
+scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
+	volatile cbd_t	*bdp;
+	rtdm_lockctx_t context;
+
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	/* Fill in a Tx ring entry */
+	bdp = cep->cur_tx;
+
+#ifndef final_version
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since cep->tx_busy should be set.
+		 */
+		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
+		return 1;
+	}
+#endif
+
+	/* Clear all of the status flags.
+	 */
+	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+	/* If the frame is short, tell CPM to pad it.
+	*/
+	if (skb->len <= ETH_ZLEN)
+		bdp->cbd_sc |= BD_ENET_TX_PAD;
+	else
+		bdp->cbd_sc &= ~BD_ENET_TX_PAD;
+
+	/* Set buffer length and buffer pointer.
+	*/
+	bdp->cbd_datlen = skb->len;
+	bdp->cbd_bufaddr = __pa(skb->data);
+
+	/* Save skb pointer.
+	*/
+	cep->tx_skbuff[cep->skb_cur] = skb;
+
+	cep->stats.tx_bytes += skb->len;
+	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	/* Prevent interrupts from changing the Tx ring from underneath us. */
+	// *** RTnet ***
+	rtdm_lock_get_irqsave(&cep->lock, context);
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* Push the data cache so the CPM does not get stale memory
+	 * data.
+	 */
+	flush_dcache_range((unsigned long)(skb->data),
+			   (unsigned long)(skb->data + skb->len));
+
+
+	/* Send it on its way.  Tell CPM its ready, interrupt when done,
+	 * its the last BD of the frame, and to put the CRC on the end.
+	 */
+	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+	/* If this was the last BD in the ring, start at the beginning again.
+	*/
+	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+		bdp = cep->tx_bd_base;
+	else
+		bdp++;
+
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		rtnetif_stop_queue(rtdev);
+		cep->tx_full = 1;
+	}
+
+	cep->cur_tx = (cbd_t *)bdp;
+
+	// *** RTnet ***
+	rtdm_lock_put_irqrestore(&cep->lock, context);
+
+	return 0;
+}
+
+#ifdef ORIGINAL_VERSION
+static void
+scc_enet_timeout(struct net_device *dev)
+{
+	struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
+
+	printk("%s: transmit timed out.\n", dev->name);
+	cep->stats.tx_errors++;
+#ifndef final_version
+	{
+		int	i;
+		cbd_t	*bdp;
+		printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
+		       cep->cur_tx, cep->tx_full ? " (full)" : "",
+		       cep->cur_rx);
+		bdp = cep->tx_bd_base;
+		for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+		bdp = cep->rx_bd_base;
+		for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+	}
+#endif
+	if (!cep->tx_full)
+		netif_wake_queue(dev);
+}
+#endif /* ORIGINAL_VERSION */
+
+/* The interrupt handler.
+ * This is called from the CPM handler, not the MPC core interrupt.
+ */
+static int scc_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	struct	scc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	ushort	int_events;
+	int	must_restart;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+	cep = (struct scc_enet_private *)rtdev->priv;
+
+	/* Get the interrupt events that caused us to be here.
+	*/
+	int_events = cep->sccp->scc_scce;
+	cep->sccp->scc_scce = int_events;
+	must_restart = 0;
+
+	/* Handle receive event in its own function.
+	*/
+	if (int_events & SCCE_ENET_RXF) {
+		scc_enet_rx(rtdev, &packets, &time_stamp);
+	}
+
+	/* Check for a transmit error.  The manual is a little unclear
+	 * about this, so the debug code until I get it figured out.  It
+	 * appears that if TXE is set, then TXB is not set.  However,
+	 * if carrier sense is lost during frame transmission, the TXE
+	 * bit is set, "and continues the buffer transmission normally."
+	 * I don't know if "normally" implies TXB is set when the buffer
+	 * descriptor is closed.....trial and error :-).
+	 */
+
+	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
+	*/
+	if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
+	    rtdm_lock_get(&cep->lock);
+	    bdp = cep->dirty_tx;
+	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
+		RT_DEBUG(__FUNCTION__": Tx ok\n");
+		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
+		    break;
+
+		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
+			cep->stats.tx_heartbeat_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
+			cep->stats.tx_window_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
+			cep->stats.tx_aborted_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
+			cep->stats.tx_fifo_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
+			cep->stats.tx_carrier_errors++;
+
+
+		/* No heartbeat or Lost carrier are not really bad errors.
+		 * The others require a restart transmit command.
+		 */
+		if (bdp->cbd_sc &
+		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+			must_restart = 1;
+			cep->stats.tx_errors++;
+		}
+
+		cep->stats.tx_packets++;
+
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_DEF)
+			cep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit.
+		*/
+		dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]);
+		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted.
+		*/
+		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+			bdp = cep->tx_bd_base;
+		else
+			bdp++;
+
+		/* I don't know if we can be held off from processing these
+		 * interrupts for more than one frame time.  I really hope
+		 * not.  In such a case, we would now want to check the
+		 * currently available BD (cur_tx) and determine if any
+		 * buffers between the dirty_tx and cur_tx have also been
+		 * sent.  We would want to process anything in between that
+		 * does not have BD_ENET_TX_READY set.
+		 */
+
+		/* Since we have freed up a buffer, the ring is no longer
+		 * full.
+		 */
+		if (cep->tx_full) {
+			cep->tx_full = 0;
+			if (rtnetif_queue_stopped(rtdev))
+				rtnetif_wake_queue(rtdev);
+		}
+
+		cep->dirty_tx = (cbd_t *)bdp;
+	    }
+
+	    if (must_restart) {
+		volatile cpm8xx_t *cp;
+
+		/* Some transmit errors cause the transmitter to shut
+		 * down.  We now issue a restart transmit.  Since the
+		 * errors close the BD and update the pointers, the restart
+		 * _should_ pick up without having to reset any of our
+		 * pointers either.
+		 */
+		cp = cpmp;
+		cp->cp_cpcr =
+		    mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG;
+		while (cp->cp_cpcr & CPM_CR_FLG);
+	    }
+	    rtdm_lock_put(&cep->lock);
+	}
+
+	/* Check for receive busy, i.e. packets coming but no place to
+	 * put them.  This "can't happen" because the receive interrupt
+	 * is tossing previous frames.
+	 */
+	if (int_events & SCCE_ENET_BSY) {
+		cep->stats.rx_dropped++;
+		rtdm_printk("CPM ENET: BSY can't happen.\n");
+	}
+
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static int
+scc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
+{
+	struct	scc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	ushort	pkt_len;
+	struct	rtskb *skb;
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	cep = (struct scc_enet_private *)rtdev->priv;
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = cep->cur_rx;
+
+    for (;;) {
+
+	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
+		break;
+
+#ifndef final_version
+	/* Since we have allocated space to hold a complete frame, both
+	 * the first and last indicators should be set.
+	 */
+	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
+		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
+			rtdm_printk("CPM ENET: rcv is not first+last\n");
+#endif
+
+	/* Frame too long or too short.
+	*/
+	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+		cep->stats.rx_length_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
+		cep->stats.rx_frame_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
+		cep->stats.rx_crc_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
+		cep->stats.rx_crc_errors++;
+
+	/* Report late collisions as a frame error.
+	 * On this error, the BD is closed, but we don't know what we
+	 * have in the buffer.  So, just drop this frame on the floor.
+	 */
+	if (bdp->cbd_sc & BD_ENET_RX_CL) {
+		cep->stats.rx_frame_errors++;
+	}
+	else {
+
+		/* Process the incoming frame.
+		*/
+		cep->stats.rx_packets++;
+		pkt_len = bdp->cbd_datlen;
+		cep->stats.rx_bytes += pkt_len;
+
+		/* This does 16 byte alignment, much more than we need.
+		 * The packet length includes FCS, but we don't want to
+		 * include that when passing upstream as it messes up
+		 * bridging applications.
+		 */
+		skb = rtnetdev_alloc_rtskb(rtdev, pkt_len-4);
+		if (skb == NULL) {
+			rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
+			cep->stats.rx_dropped++;
+		}
+		else {
+			rtskb_put(skb,pkt_len-4); /* Make room */
+			memcpy(skb->data,
+			       cep->rx_vaddr[bdp - cep->rx_bd_base],
+			       pkt_len-4);
+			skb->protocol=rt_eth_type_trans(skb,rtdev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			(*packets)++;
+		}
+	}
+
+	/* Clear the status flags for this buffer.
+	*/
+	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+	/* Mark the buffer empty.
+	*/
+	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+	/* Update BD pointer to next entry.
+	*/
+	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+		bdp = cep->rx_bd_base;
+	else
+		bdp++;
+
+    }
+	cep->cur_rx = (cbd_t *)bdp;
+
+	return 0;
+}
+
+static int
+scc_enet_close(struct rtnet_device *rtdev)
+{
+	/* Don't know what to do yet.
+	*/
+	rtnetif_stop_queue(rtdev);
+
+	return 0;
+}
+
+static struct net_device_stats *scc_enet_get_stats(struct rtnet_device *rtdev)
+{
+	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
+
+	return &cep->stats;
+}
+
+#ifdef ORIGINAL_VERSION
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+	struct	scc_enet_private *cep;
+	struct	dev_mc_list *dmi;
+	u_char	*mcptr, *tdptr;
+	volatile scc_enet_t *ep;
+	int	i, j;
+	cep = (struct scc_enet_private *)dev->priv;
+
+	/* Get pointer to SCC area in parameter RAM.
+	*/
+	ep = (scc_enet_t *)dev->base_addr;
+
+	if (dev->flags&IFF_PROMISC) {
+
+		/* Log any net taps. */
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		cep->sccp->scc_pmsr |= SCC_PMSR_PRO;
+	} else {
+
+		cep->sccp->scc_pmsr &= ~SCC_PMSR_PRO;
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Catch all multicast addresses, so set the
+			 * filter to all 1's.
+			 */
+			ep->sen_gaddr1 = 0xffff;
+			ep->sen_gaddr2 = 0xffff;
+			ep->sen_gaddr3 = 0xffff;
+			ep->sen_gaddr4 = 0xffff;
+		}
+		else {
+			/* Clear filter and add the addresses in the list.
+			*/
+			ep->sen_gaddr1 = 0;
+			ep->sen_gaddr2 = 0;
+			ep->sen_gaddr3 = 0;
+			ep->sen_gaddr4 = 0;
+
+			dmi = dev->mc_list;
+
+			for (i=0; i<dev->mc_count; i++) {
+
+				/* Only support group multicast for now.
+				*/
+				if (!(dmi->dmi_addr[0] & 1))
+					continue;
+
+				/* The address in dmi_addr is LSB first,
+				 * and taddr is MSB first.  We have to
+				 * copy bytes MSB first from dmi_addr.
+				 */
+				mcptr = (u_char *)dmi->dmi_addr + 5;
+				tdptr = (u_char *)&ep->sen_taddrh;
+				for (j=0; j<6; j++)
+					*tdptr++ = *mcptr--;
+
+				/* Ask CPM to run CRC and set bit in
+				 * filter mask.
+				 */
+				cpmp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_SET_GADDR) | CPM_CR_FLG;
+				/* this delay is necessary here -- Cort */
+				udelay(10);
+				while (cpmp->cp_cpcr & CPM_CR_FLG);
+			}
+		}
+	}
+}
+#endif /* ORIGINAL_VERSION */
+
+/* Initialize the CPM Ethernet on SCC.  If EPPC-Bug loaded us, or performed
+ * some other network I/O, a whole bunch of this has already been set up.
+ * It is no big deal if we do it again, we just have to disable the
+ * transmit and receive to make sure we don't catch the CPM with some
+ * inconsistent control information.
+ */
+int __init scc_enet_init(void)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct scc_enet_private *cep;
+	int i, j, k;
+	unsigned char	*eap, *ba;
+	dma_addr_t	mem_addr;
+	bd_t		*bd;
+	volatile	cbd_t		*bdp;
+	volatile	cpm8xx_t	*cp;
+	volatile	scc_t		*sccp;
+	volatile	scc_enet_t	*ep;
+	volatile	immap_t		*immap;
+
+	cp = cpmp;	/* Get pointer to Communication Processor */
+
+	immap = (immap_t *)(mfspr(IMMR) & 0xFFFF0000);	/* and to internal registers */
+
+	bd = (bd_t *)__res;
+
+	/* Configure the SCC parameters (this has formerly be done
+	 * by macro definitions).
+	 */
+	switch (rtnet_scc) {
+	case 3:
+		CPM_CR_ENET = CPM_CR_CH_SCC3;
+		PROFF_ENET  = PROFF_SCC3;
+		SCC_ENET    = 2;		/* Index, not number! */
+		CPMVEC_ENET = CPMVEC_SCC3;
+		break;
+	case 2:
+		CPM_CR_ENET = CPM_CR_CH_SCC2;
+		PROFF_ENET  = PROFF_SCC2;
+		SCC_ENET    = 1;		/* Index, not number! */
+		CPMVEC_ENET = CPMVEC_SCC2;
+		break;
+	case 1:
+		CPM_CR_ENET = CPM_CR_CH_SCC1;
+		PROFF_ENET  = PROFF_SCC1;
+		SCC_ENET    = 0;		/* Index, not number! */
+		CPMVEC_ENET = CPMVEC_SCC1;
+		break;
+	default:
+		printk(KERN_ERR "enet: SCC%d doesn't exit (check rtnet_scc)\n", rtnet_scc);
+		return -1;
+	}
+
+	/* Allocate some private information and create an Ethernet device instance.
+	*/
+	if (!rx_pool_size)
+		rx_pool_size = RX_RING_SIZE * 2;
+	rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct scc_enet_private),
+					rx_pool_size + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "enet: Could not allocate ethernet device.\n");
+		return -1;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+
+	cep = (struct scc_enet_private *)rtdev->priv;
+	rtdm_lock_init(&cep->lock);
+
+	/* Get pointer to SCC area in parameter RAM.
+	*/
+	ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);
+
+	/* And another to the SCC register area.
+	*/
+	sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
+	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */
+
+	/* Disable receive and transmit in case EPPC-Bug started it.
+	*/
+	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+	/* Cookbook style from the MPC860 manual.....
+	 * Not all of this is necessary if EPPC-Bug has initialized
+	 * the network.
+	 * So far we are lucky, all board configurations use the same
+	 * pins, or at least the same I/O Port for these functions.....
+	 * It can't last though......
+	 */
+
+#if (defined(PA_ENET_RXD) && defined(PA_ENET_TXD))
+	/* Configure port A pins for Txd and Rxd.
+	*/
+	immap->im_ioport.iop_papar |=  (PA_ENET_RXD | PA_ENET_TXD);
+	immap->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD);
+	immap->im_ioport.iop_paodr &=                ~PA_ENET_TXD;
+#elif (defined(PB_ENET_RXD) && defined(PB_ENET_TXD))
+	/* Configure port B pins for Txd and Rxd.
+	*/
+	immap->im_cpm.cp_pbpar |=  (PB_ENET_RXD | PB_ENET_TXD);
+	immap->im_cpm.cp_pbdir &= ~(PB_ENET_RXD | PB_ENET_TXD);
+	immap->im_cpm.cp_pbodr &=		 ~PB_ENET_TXD;
+#else
+#error Exactly ONE pair of PA_ENET_[RT]XD, PB_ENET_[RT]XD must be defined
+#endif
+
+#if defined(PC_ENET_LBK)
+	/* Configure port C pins to disable External Loopback
+	 */
+	immap->im_ioport.iop_pcpar &= ~PC_ENET_LBK;
+	immap->im_ioport.iop_pcdir |=  PC_ENET_LBK;
+	immap->im_ioport.iop_pcso  &= ~PC_ENET_LBK;
+	immap->im_ioport.iop_pcdat &= ~PC_ENET_LBK;	/* Disable Loopback */
+#endif	/* PC_ENET_LBK */
+
+	/* Configure port C pins to enable CLSN and RENA.
+	*/
+	immap->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA);
+	immap->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA);
+	immap->im_ioport.iop_pcso  |=  (PC_ENET_CLSN | PC_ENET_RENA);
+
+	/* Configure port A for TCLK and RCLK.
+	*/
+	immap->im_ioport.iop_papar |=  (PA_ENET_TCLK | PA_ENET_RCLK);
+	immap->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK);
+
+	/* Configure Serial Interface clock routing.
+	 * First, clear all SCC bits to zero, then set the ones we want.
+	 */
+	cp->cp_sicr &= ~SICR_ENET_MASK;
+	cp->cp_sicr |=  SICR_ENET_CLKRT;
+
+	/* Manual says set SDDR, but I can't find anything with that
+	 * name.  I think it is a misprint, and should be SDCR.  This
+	 * has already been set by the communication processor initialization.
+	 */
+
+	/* Allocate space for the buffer descriptors in the DP ram.
+	 * These are relative offsets in the DP ram address space.
+	 * Initialize base addresses for the buffer descriptors.
+	 */
+	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
+	ep->sen_genscc.scc_rbase = i;
+	cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[i];
+
+	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
+	ep->sen_genscc.scc_tbase = i;
+	cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[i];
+
+	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
+	cep->cur_rx = cep->rx_bd_base;
+
+	/* Issue init Rx BD command for SCC.
+	 * Manual says to perform an Init Rx parameters here.  We have
+	 * to perform both Rx and Tx because the SCC may have been
+	 * already running.
+	 * In addition, we have to do it later because we don't yet have
+	 * all of the BD control/status set properly.
+	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG;
+	while (cp->cp_cpcr & CPM_CR_FLG);
+	 */
+
+	/* Initialize function code registers for big-endian.
+	*/
+	ep->sen_genscc.scc_rfcr = SCC_EB;
+	ep->sen_genscc.scc_tfcr = SCC_EB;
+
+	/* Set maximum bytes per receive buffer.
+	 * This appears to be an Ethernet frame size, not the buffer
+	 * fragment size.  It must be a multiple of four.
+	 */
+	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;
+
+	/* Set CRC preset and mask.
+	*/
+	ep->sen_cpres = 0xffffffff;
+	ep->sen_cmask = 0xdebb20e3;
+
+	ep->sen_crcec = 0;	/* CRC Error counter */
+	ep->sen_alec = 0;	/* alignment error counter */
+	ep->sen_disfc = 0;	/* discard frame counter */
+
+	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
+	ep->sen_retlim = 15;	/* Retry limit threshold */
+
+	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
+	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */
+
+	ep->sen_maxd1 = PKT_MAXBLR_SIZE;	/* maximum DMA1 length */
+	ep->sen_maxd2 = PKT_MAXBLR_SIZE;	/* maximum DMA2 length */
+
+	/* Clear hash tables.
+	*/
+	ep->sen_gaddr1 = 0;
+	ep->sen_gaddr2 = 0;
+	ep->sen_gaddr3 = 0;
+	ep->sen_gaddr4 = 0;
+	ep->sen_iaddr1 = 0;
+	ep->sen_iaddr2 = 0;
+	ep->sen_iaddr3 = 0;
+	ep->sen_iaddr4 = 0;
+
+	/* Set Ethernet station address.
+	 */
+	eap = (unsigned char *)&(ep->sen_paddrh);
+#ifdef CONFIG_FEC_ENET
+	/* We need a second MAC address if FEC is used by Linux */
+	for (i=5; i>=0; i--)
+		*eap++ = rtdev->dev_addr[i] = (bd->bi_enetaddr[i] |
+					     (i==3 ? 0x80 : 0));
+#else
+	for (i=5; i>=0; i--)
+		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+#endif
+
+	ep->sen_pper = 0;	/* 'cause the book says so */
+	ep->sen_taddrl = 0;	/* temp address (LSB) */
+	ep->sen_taddrm = 0;
+	ep->sen_taddrh = 0;	/* temp address (MSB) */
+
+	/* Now allocate the host memory pages and initialize the
+	 * buffer descriptors.
+	 */
+	bdp = cep->tx_bd_base;
+	for (i=0; i<TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	bdp = cep->rx_bd_base;
+	k = 0;
+	for (i=0; i<CPM_ENET_RX_PAGES; i++) {
+
+		/* Allocate a page.
+		*/
+		ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr);
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
+			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
+			bdp->cbd_bufaddr = mem_addr;
+			cep->rx_vaddr[k++] = ba;
+			mem_addr += CPM_ENET_RX_FRSIZE;
+			ba += CPM_ENET_RX_FRSIZE;
+			bdp++;
+		}
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Let's re-initialize the channel now.  We have to do it later
+	 * than the manual describes because we have just now finished
+	 * the BD initialization.
+	 */
+	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
+	while (cp->cp_cpcr & CPM_CR_FLG);
+
+	cep->skb_cur = cep->skb_dirty = 0;
+
+	sccp->scc_scce = 0xffff;	/* Clear any pending events */
+
+	/* Enable interrupts for transmit error, complete frame
+	 * received, and any transmit buffer we have also set the
+	 * interrupt flag.
+	 */
+	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
+
+	/* Install our interrupt handler.
+	*/
+	rtdev->irq = CPM_IRQ_OFFSET + CPMVEC_ENET;
+	rt_stack_connect(rtdev, &STACK_manager);
+	if ((i = rtdm_irq_request(&cep->irq_handle, rtdev->irq,
+				  scc_enet_interrupt, 0, "rt_mpc8xx_enet", rtdev))) {
+		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+
+	/* Set GSMR_H to enable all normal operating modes.
+	 * Set GSMR_L to enable Ethernet to MC68160.
+	 */
+	sccp->scc_gsmrh = 0;
+	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);
+
+	/* Set sync/delimiters.
+	*/
+	sccp->scc_dsr = 0xd555;
+
+	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
+	 * start frame search 22 bit times after RENA.
+	 */
+	sccp->scc_pmsr = (SCC_PMSR_ENCRC | SCC_PMSR_NIB22);
+
+	/* It is now OK to enable the Ethernet transmitter.
+	 * Unfortunately, there are board implementation differences here.
+	 */
+#if   (!defined (PB_ENET_TENA) &&  defined (PC_ENET_TENA))
+	immap->im_ioport.iop_pcpar |=  PC_ENET_TENA;
+	immap->im_ioport.iop_pcdir &= ~PC_ENET_TENA;
+#elif ( defined (PB_ENET_TENA) && !defined (PC_ENET_TENA))
+	cp->cp_pbpar |= PB_ENET_TENA;
+	cp->cp_pbdir |= PB_ENET_TENA;
+#else
+#error Configuration Error: define exactly ONE of PB_ENET_TENA, PC_ENET_TENA
+#endif
+
+#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC)
+	/* And while we are here, set the configuration to enable ethernet.
+	*/
+	*((volatile uint *)RPX_CSR_ADDR) &= ~BCSR0_ETHLPBK;
+	*((volatile uint *)RPX_CSR_ADDR) |=
+			(BCSR0_ETHEN | BCSR0_COLTESTDIS | BCSR0_FULLDPLXDIS);
+#endif
+
+#ifdef CONFIG_BSEIP
+	/* BSE uses port B and C for PHY control.
+	*/
+	cp->cp_pbpar &= ~(PB_BSE_POWERUP | PB_BSE_FDXDIS);
+	cp->cp_pbdir |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
+	cp->cp_pbdat |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
+
+	immap->im_ioport.iop_pcpar &= ~PC_BSE_LOOPBACK;
+	immap->im_ioport.iop_pcdir |= PC_BSE_LOOPBACK;
+	immap->im_ioport.iop_pcso &= ~PC_BSE_LOOPBACK;
+	immap->im_ioport.iop_pcdat &= ~PC_BSE_LOOPBACK;
+#endif
+
+#ifdef CONFIG_FADS
+	cp->cp_pbpar |= PB_ENET_TENA;
+	cp->cp_pbdir |= PB_ENET_TENA;
+
+	/* Enable the EEST PHY.
+	*/
+	*((volatile uint *)BCSR1) &= ~BCSR1_ETHEN;
+#endif
+
+	rtdev->base_addr = (unsigned long)ep;
+
+	/* The CPM Ethernet specific entries in the device structure. */
+	rtdev->open = scc_enet_open;
+	rtdev->hard_start_xmit = scc_enet_start_xmit;
+	rtdev->stop = scc_enet_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->get_stats = scc_enet_get_stats;
+
+	if (!rx_pool_size)
+		rx_pool_size = RX_RING_SIZE * 2;
+
+	if ((i = rt_register_rtnetdev(rtdev))) {
+		printk(KERN_ERR "Couldn't register rtdev\n");
+		rtdm_irq_disable(&cep->irq_handle);
+		rtdm_irq_free(&cep->irq_handle);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+	/* And last, enable the transmit and receive processing.
+	*/
+	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+	printk("%s: CPM ENET Version 0.2 on SCC%d, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       rtdev->name, SCC_ENET+1, rtdev->irq,
+	       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
+	       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
+
+	return 0;
+}
+
+static void __exit scc_enet_cleanup(void)
+{
+	struct rtnet_device *rtdev = rtdev_root;
+	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
+	volatile cpm8xx_t *cp = cpmp;
+	volatile scc_enet_t *ep;
+
+	if (rtdev) {
+		rtdm_irq_disable(&cep->irq_handle);
+		rtdm_irq_free(&cep->irq_handle);
+
+		ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);
+		m8xx_cpm_dpfree(ep->sen_genscc.scc_rbase);
+		m8xx_cpm_dpfree(ep->sen_genscc.scc_tbase);
+
+		rt_stack_disconnect(rtdev);
+		rt_unregister_rtnetdev(rtdev);
+		rt_rtdev_disconnect(rtdev);
+
+		printk("%s: unloaded\n", rtdev->name);
+		rtdev_free(rtdev);
+		rtdev_root = NULL;
+	}
+}
+
+module_init(scc_enet_init);
+module_exit(scc_enet_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/macb.c	2022-03-21 12:58:29.951883481 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/loopback.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Cadence MACB/GEM Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RTnet porting by Cristiano Mantovani & Stefano Banzi (Marposs SpA).
+ * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/circ_buf.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtnet_port.h>
+#include <rtskb.h>
+
+#include "rt_macb.h"
+
+#define MACB_RX_BUFFER_SIZE	128
+#define RX_BUFFER_MULTIPLE	64  /* bytes */
+#define RX_RING_SIZE		512 /* must be power of 2 */
+#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
+
+#define TX_RING_SIZE		128 /* must be power of 2 */
+#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
+
+/* level of occupied TX descriptors under which we wake up TX process */
+#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
+
+#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
+				 | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
+					| MACB_BIT(ISR_RLE)		\
+					| MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT	1230
+
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+	return index & (TX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+	return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
+
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+	return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+	return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+	return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+	return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
+}
+
+void rtmacb_set_hwaddr(struct macb *bp)
+{
+	u32 bottom;
+	u16 top;
+
+	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+	macb_or_gem_writel(bp, SA1B, bottom);
+	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+	macb_or_gem_writel(bp, SA1T, top);
+
+	/* Clear unused address register sets */
+	macb_or_gem_writel(bp, SA2B, 0);
+	macb_or_gem_writel(bp, SA2T, 0);
+	macb_or_gem_writel(bp, SA3B, 0);
+	macb_or_gem_writel(bp, SA3T, 0);
+	macb_or_gem_writel(bp, SA4B, 0);
+	macb_or_gem_writel(bp, SA4T, 0);
+}
+EXPORT_SYMBOL_GPL(rtmacb_set_hwaddr);
+
+void rtmacb_get_hwaddr(struct macb *bp)
+{
+	struct macb_platform_data *pdata;
+	u32 bottom;
+	u16 top;
+	u8 addr[6];
+	int i;
+
+	pdata = dev_get_platdata(&bp->pdev->dev);
+
+	/* Check all 4 address register for vaild address */
+	for (i = 0; i < 4; i++) {
+		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+		top = macb_or_gem_readl(bp, SA1T + i * 8);
+
+		if (pdata && pdata->rev_eth_addr) {
+			addr[5] = bottom & 0xff;
+			addr[4] = (bottom >> 8) & 0xff;
+			addr[3] = (bottom >> 16) & 0xff;
+			addr[2] = (bottom >> 24) & 0xff;
+			addr[1] = top & 0xff;
+			addr[0] = (top & 0xff00) >> 8;
+		} else {
+			addr[0] = bottom & 0xff;
+			addr[1] = (bottom >> 8) & 0xff;
+			addr[2] = (bottom >> 16) & 0xff;
+			addr[3] = (bottom >> 24) & 0xff;
+			addr[4] = top & 0xff;
+			addr[5] = (top >> 8) & 0xff;
+		}
+
+		if (is_valid_ether_addr(addr)) {
+			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+			return;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(rtmacb_get_hwaddr);
+
+static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct macb *bp = bus->priv;
+	int value;
+
+	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+			      | MACB_BF(RW, MACB_MAN_READ)
+			      | MACB_BF(PHYA, mii_id)
+			      | MACB_BF(REGA, regnum)
+			      | MACB_BF(CODE, MACB_MAN_CODE)));
+
+	/* wait for end of transfer */
+	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+		cpu_relax();
+
+	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+	return value;
+}
+
+static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+			   u16 value)
+{
+	struct macb *bp = bus->priv;
+
+	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+			      | MACB_BF(RW, MACB_MAN_WRITE)
+			      | MACB_BF(PHYA, mii_id)
+			      | MACB_BF(REGA, regnum)
+			      | MACB_BF(CODE, MACB_MAN_CODE)
+			      | MACB_BF(DATA, value)));
+
+	/* wait for end of transfer */
+	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+		cpu_relax();
+
+	return 0;
+}
+
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk		Pointer to the clock to change
+ * @rate	New frequency in Hz
+ * @dev		Pointer to the struct rtnet_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct rtnet_device *dev)
+{
+	long ferr, rate, rate_rounded;
+
+	switch (speed) {
+	case SPEED_10:
+		rate = 2500000;
+		break;
+	case SPEED_100:
+		rate = 25000000;
+		break;
+	case SPEED_1000:
+		rate = 125000000;
+		break;
+	default:
+		return;
+	}
+
+	rate_rounded = clk_round_rate(clk, rate);
+	if (rate_rounded < 0)
+		return;
+
+	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
+	 * is not satisfied.
+	 */
+	ferr = abs(rate_rounded - rate);
+	ferr = DIV_ROUND_UP(ferr, rate / 100000);
+	if (ferr > 5)
+		rtdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+				rate);
+
+	if (clk_set_rate(clk, rate_rounded))
+		rtdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
+struct macb_dummy_netdev_priv {
+	struct rtnet_device *rtdev;
+};
+
+static void macb_handle_link_change(struct net_device *nrt_dev)
+{
+	struct macb_dummy_netdev_priv *p = netdev_priv(nrt_dev);
+	struct rtnet_device *dev = p->rtdev;
+	struct macb *bp = rtnetdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
+	unsigned long flags;
+
+	int status_change = 0;
+
+	rtdm_lock_get_irqsave(&bp->lock, flags);
+
+	if (phydev->link) {
+		if ((bp->speed != phydev->speed) ||
+		    (bp->duplex != phydev->duplex)) {
+			u32 reg;
+
+			reg = macb_readl(bp, NCFGR);
+			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+			if (macb_is_gem(bp))
+				reg &= ~GEM_BIT(GBE);
+
+			if (phydev->duplex)
+				reg |= MACB_BIT(FD);
+			if (phydev->speed == SPEED_100)
+				reg |= MACB_BIT(SPD);
+			if (phydev->speed == SPEED_1000)
+				reg |= GEM_BIT(GBE);
+
+			macb_or_gem_writel(bp, NCFGR, reg);
+
+			bp->speed = phydev->speed;
+			bp->duplex = phydev->duplex;
+			status_change = 1;
+		}
+	}
+
+	if (phydev->link != bp->link) {
+		if (!phydev->link) {
+			bp->speed = 0;
+			bp->duplex = -1;
+		}
+		bp->link = phydev->link;
+
+		status_change = 1;
+	}
+
+	rtdm_lock_put_irqrestore(&bp->lock, flags);
+
+	if (!IS_ERR(bp->tx_clk))
+		macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
+	if (status_change) {
+		if (phydev->link) {
+			rtnetif_carrier_on(dev);
+			rtdev_info(dev, "link up (%d/%s)\n",
+				    phydev->speed,
+				    phydev->duplex == DUPLEX_FULL ?
+				    "Full" : "Half");
+		} else {
+			rtnetif_carrier_off(dev);
+			rtdev_info(dev, "link down\n");
+		}
+	}
+}
+
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	struct macb_dummy_netdev_priv *p;
+	struct macb_platform_data *pdata;
+	struct phy_device *phydev;
+	struct net_device *dummy;
+	int phy_irq;
+	int ret;
+
+	phydev = phy_find_first(bp->mii_bus);
+	if (!phydev) {
+		rtdev_err(dev, "no PHY found\n");
+		return -ENXIO;
+	}
+
+	pdata = dev_get_platdata(&bp->pdev->dev);
+	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+		if (!ret) {
+			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+		}
+	}
+
+	dummy = alloc_etherdev(sizeof(*p));
+	p = netdev_priv(dummy);
+	p->rtdev = dev;
+	bp->phy_phony_net_device = dummy;
+
+	/* attach the mac to the phy */
+	ret = phy_connect_direct(dummy, phydev, &macb_handle_link_change,
+				 bp->phy_interface);
+	if (ret) {
+		rtdev_err(dev, "Could not attach to PHY\n");
+		return ret;
+	}
+
+	/* mask with MAC supported features */
+	if (macb_is_gem(bp))
+		phydev->supported &= PHY_GBIT_FEATURES;
+	else
+		phydev->supported &= PHY_BASIC_FEATURES;
+
+	phydev->advertising = phydev->supported;
+
+	bp->link = 0;
+	bp->speed = 0;
+	bp->duplex = -1;
+	bp->phy_dev = phydev;
+
+	return 0;
+}
+
+int rtmacb_mii_init(struct macb *bp)
+{
+	struct macb_platform_data *pdata;
+	struct device_node *np;
+	int err = -ENXIO, i;
+
+	/* Enable management port */
+	macb_writel(bp, NCR, MACB_BIT(MPE));
+
+	bp->mii_bus = mdiobus_alloc();
+	if (bp->mii_bus == NULL) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	bp->mii_bus->name = "MACB_mii_bus";
+	bp->mii_bus->read = &macb_mdio_read;
+	bp->mii_bus->write = &macb_mdio_write;
+	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		bp->pdev->name, bp->pdev->id);
+	bp->mii_bus->priv = bp;
+	bp->mii_bus->parent = &bp->pdev->dev;
+	pdata = dev_get_platdata(&bp->pdev->dev);
+
+	bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+	if (!bp->mii_bus->irq) {
+		err = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	np = bp->pdev->dev.of_node;
+	if (np) {
+		/* try dt phy registration */
+		err = of_mdiobus_register(bp->mii_bus, np);
+
+		/* fallback to standard phy registration if no phy were
+		   found during dt phy registration */
+		if (!err && !phy_find_first(bp->mii_bus)) {
+			for (i = 0; i < PHY_MAX_ADDR; i++) {
+				struct phy_device *phydev;
+
+				phydev = mdiobus_scan(bp->mii_bus, i);
+				if (IS_ERR(phydev)) {
+					err = PTR_ERR(phydev);
+					break;
+				}
+			}
+
+			if (err)
+				goto err_out_unregister_bus;
+		}
+	} else {
+		for (i = 0; i < PHY_MAX_ADDR; i++)
+			bp->mii_bus->irq[i] = PHY_POLL;
+
+		if (pdata)
+			bp->mii_bus->phy_mask = pdata->phy_mask;
+
+		err = mdiobus_register(bp->mii_bus);
+	}
+
+	if (err)
+		goto err_out_free_mdio_irq;
+
+	err = macb_mii_probe(bp->dev);
+	if (err)
+		goto err_out_unregister_bus;
+
+	return 0;
+
+err_out_unregister_bus:
+	mdiobus_unregister(bp->mii_bus);
+err_out_free_mdio_irq:
+	kfree(bp->mii_bus->irq);
+err_out_free_mdiobus:
+	mdiobus_free(bp->mii_bus);
+err_out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtmacb_mii_init);
+
+static void macb_update_stats(struct macb *bp)
+{
+	u32 __iomem *reg = bp->regs + MACB_PFR;
+	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
+	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+
+	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
+
+	for(; p < end; p++, reg++)
+		*p += __raw_readl(reg);
+}
+
+static int macb_halt_tx(struct macb *bp)
+{
+	unsigned long	halt_time, timeout;
+	u32		status;
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
+
+	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+	do {
+		halt_time = jiffies;
+		status = macb_readl(bp, TSR);
+		if (!(status & MACB_BIT(TGO)))
+			return 0;
+
+		usleep_range(10, 250);
+	} while (time_before(halt_time, timeout));
+
+	return -ETIMEDOUT;
+}
+
+static void macb_tx_error_task(struct work_struct *work)
+{
+	struct macb	*bp = container_of(work, struct macb, tx_error_task);
+	struct macb_tx_skb	*tx_skb;
+	struct rtskb		*skb;
+	unsigned int		tail;
+
+	rtdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+		    bp->tx_tail, bp->tx_head);
+
+	/* Make sure nobody is trying to queue up new packets */
+	rtnetif_stop_queue(bp->dev);
+
+	/*
+	 * Stop transmission now
+	 * (in case we have just queued new packets)
+	 */
+	if (macb_halt_tx(bp))
+		/* Just complain for now, reinitializing TX path can be good */
+		rtdev_err(bp->dev, "BUG: halt tx timed out\n");
+
+	/* No need for the lock here as nobody will interrupt us anymore */
+
+	/*
+	 * Treat frames in TX queue including the ones that caused the error.
+	 * Free transmit buffers in upper layer.
+	 */
+	for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+		struct macb_dma_desc	*desc;
+		u32			ctrl;
+
+		desc = macb_tx_desc(bp, tail);
+		ctrl = desc->ctrl;
+		tx_skb = macb_tx_skb(bp, tail);
+		skb = tx_skb->skb;
+
+		if (ctrl & MACB_BIT(TX_USED)) {
+			rtdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+				    macb_tx_ring_wrap(tail), skb->data);
+			bp->stats.tx_packets++;
+			bp->stats.tx_bytes += skb->len;
+		} else {
+			/*
+			 * "Buffers exhausted mid-frame" errors may only happen
+			 * if the driver is buggy, so complain loudly about those.
+			 * Statistics are updated by hardware.
+			 */
+			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+				rtdev_err(bp->dev,
+					   "BUG: TX buffers exhausted mid-frame\n");
+
+			desc->ctrl = ctrl | MACB_BIT(TX_USED);
+		}
+
+		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+				 DMA_TO_DEVICE);
+		tx_skb->skb = NULL;
+		dev_kfree_rtskb(skb);
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	/* Reinitialize the TX desc queue */
+	macb_writel(bp, TBQP, bp->tx_ring_dma);
+	/* Make TX ring reflect state of hardware */
+	bp->tx_head = bp->tx_tail = 0;
+
+	/* Now we are ready to start transmission again */
+	rtnetif_wake_queue(bp->dev);
+
+	/* Housework before enabling TX IRQ */
+	macb_writel(bp, TSR, macb_readl(bp, TSR));
+	macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+	unsigned int tail;
+	unsigned int head;
+	u32 status;
+
+	status = macb_readl(bp, TSR);
+	macb_writel(bp, TSR, status);
+
+	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+		macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
+	rtdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+		(unsigned long)status);
+
+	head = bp->tx_head;
+	for (tail = bp->tx_tail; tail != head; tail++) {
+		struct macb_tx_skb	*tx_skb;
+		struct rtskb		*skb;
+		struct macb_dma_desc	*desc;
+		u32			ctrl;
+
+		desc = macb_tx_desc(bp, tail);
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		ctrl = desc->ctrl;
+
+		if (!(ctrl & MACB_BIT(TX_USED)))
+			break;
+
+		tx_skb = macb_tx_skb(bp, tail);
+		skb = tx_skb->skb;
+
+		rtdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+			macb_tx_ring_wrap(tail), skb->data);
+		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+				 DMA_TO_DEVICE);
+		bp->stats.tx_packets++;
+		bp->stats.tx_bytes += skb->len;
+		tx_skb->skb = NULL;
+		dev_kfree_rtskb(skb);
+	}
+
+	bp->tx_tail = tail;
+	if (rtnetif_queue_stopped(bp->dev)
+			&& CIRC_CNT(bp->tx_head, bp->tx_tail,
+				    TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
+		rtnetif_wake_queue(bp->dev);
+}
+
+static void gem_rx_refill(struct macb *bp)
+{
+	unsigned int		entry;
+	struct rtskb		*skb;
+	dma_addr_t		paddr;
+
+	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+		entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		bp->rx_prepared_head++;
+
+		if (bp->rx_skbuff[entry] == NULL) {
+			/* allocate rtskb for this free entry in ring */
+			skb = rtnetdev_alloc_rtskb(bp->dev, bp->rx_buffer_size);
+			if (unlikely(skb == NULL)) {
+				rtdev_err(bp->dev,
+					   "Unable to allocate sk_buff\n");
+				break;
+			}
+
+			/* now fill corresponding descriptor entry */
+			paddr = dma_map_single(&bp->pdev->dev, skb->data,
+					       bp->rx_buffer_size, DMA_FROM_DEVICE);
+			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+				dev_kfree_rtskb(skb);
+				break;
+			}
+
+			bp->rx_skbuff[entry] = skb;
+
+			if (entry == RX_RING_SIZE - 1)
+				paddr |= MACB_BIT(RX_WRAP);
+			bp->rx_ring[entry].addr = paddr;
+			bp->rx_ring[entry].ctrl = 0;
+
+			/* properly align Ethernet header */
+			rtskb_reserve(skb, NET_IP_ALIGN);
+		}
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	rtdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
+		   bp->rx_prepared_head, bp->rx_tail);
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+				  unsigned int end)
+{
+	unsigned int frag;
+
+	for (frag = begin; frag != end; frag++) {
+		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+		desc->addr &= ~MACB_BIT(RX_USED);
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	/*
+	 * When this happens, the hardware stats registers for
+	 * whatever caused this is updated, so we don't have to record
+	 * anything.
+	 */
+}
+
+static int gem_rx(struct macb *bp, int budget, nanosecs_abs_t *time_stamp)
+{
+	unsigned int		len;
+	unsigned int		entry;
+	struct rtskb		*skb;
+	struct macb_dma_desc	*desc;
+	int			count = 0, status;
+
+	status = macb_readl(bp, RSR);
+	macb_writel(bp, RSR, status);
+
+	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+		macb_writel(bp, ISR, MACB_BIT(RCOMP));
+
+	while (count < budget) {
+		u32 addr, ctrl;
+
+		entry = macb_rx_ring_wrap(bp->rx_tail);
+		desc = &bp->rx_ring[entry];
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		addr = desc->addr;
+		ctrl = desc->ctrl;
+
+		if (!(addr & MACB_BIT(RX_USED)))
+			break;
+
+		bp->rx_tail++;
+		count++;
+
+		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
+			rtdev_err(bp->dev,
+				   "not whole frame pointed by descriptor\n");
+			bp->stats.rx_dropped++;
+			break;
+		}
+		skb = bp->rx_skbuff[entry];
+		if (unlikely(!skb)) {
+			rtdev_err(bp->dev,
+				   "inconsistent Rx descriptor chain\n");
+			bp->stats.rx_dropped++;
+			break;
+		}
+		skb->time_stamp = *time_stamp;
+		/* now everything is ready for receiving packet */
+		bp->rx_skbuff[entry] = NULL;
+		len = MACB_BFEXT(RX_FRMLEN, ctrl);
+
+		rtdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+
+		rtskb_put(skb, len);
+		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
+		dma_unmap_single(&bp->pdev->dev, addr,
+				 bp->rx_buffer_size, DMA_FROM_DEVICE);
+
+		skb->protocol = rt_eth_type_trans(skb, bp->dev);
+
+		bp->stats.rx_packets++;
+		bp->stats.rx_bytes += skb->len;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+		rtdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+			    skb->len, skb->csum);
+		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+			       skb->mac_header, 16, true);
+		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+			       skb->data, 32, true);
+#endif
+
+		rtnetif_rx(skb);
+	}
+
+	gem_rx_refill(bp);
+
+	return count;
+}
+
+static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+			unsigned int last_frag, nanosecs_abs_t *time_stamp)
+{
+	unsigned int len;
+	unsigned int frag;
+	unsigned int offset;
+	struct rtskb *skb;
+	struct macb_dma_desc *desc;
+
+	desc = macb_rx_desc(bp, last_frag);
+	len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+
+	rtdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+		macb_rx_ring_wrap(first_frag),
+		macb_rx_ring_wrap(last_frag), len);
+
+	/*
+	 * The ethernet header starts NET_IP_ALIGN bytes into the
+	 * first buffer. Since the header is 14 bytes, this makes the
+	 * payload word-aligned.
+	 *
+	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+	 * the two padding bytes into the skb so that we avoid hitting
+	 * the slowpath in memcpy(), and pull them off afterwards.
+	 */
+	skb = rtnetdev_alloc_rtskb(bp->dev, len + NET_IP_ALIGN);
+	if (!skb) {
+		rtdev_notice(bp->dev, "Low memory, packet dropped.\n");
+		bp->stats.rx_dropped++;
+		for (frag = first_frag; ; frag++) {
+			desc = macb_rx_desc(bp, frag);
+			desc->addr &= ~MACB_BIT(RX_USED);
+			if (frag == last_frag)
+				break;
+		}
+
+		/* Make descriptor updates visible to hardware */
+		wmb();
+
+		return 1;
+	}
+
+	offset = 0;
+	len += NET_IP_ALIGN;
+	skb->time_stamp = *time_stamp;
+	rtskb_put(skb, len);
+
+	for (frag = first_frag; ; frag++) {
+		unsigned int frag_len = bp->rx_buffer_size;
+
+		if (offset + frag_len > len) {
+			BUG_ON(frag != last_frag);
+			frag_len = len - offset;
+		}
+		memcpy(skb->data + offset, macb_rx_buffer(bp, frag), frag_len);
+		offset += bp->rx_buffer_size;
+		desc = macb_rx_desc(bp, frag);
+		desc->addr &= ~MACB_BIT(RX_USED);
+
+		if (frag == last_frag)
+			break;
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	__rtskb_pull(skb, NET_IP_ALIGN);
+	skb->protocol = rt_eth_type_trans(skb, bp->dev);
+
+	bp->stats.rx_packets++;
+	bp->stats.rx_bytes += skb->len;
+	rtdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+		   skb->len, skb->csum);
+	rtnetif_rx(skb);
+
+	return 0;
+}
+
+static int macb_rx(struct macb *bp, int budget, nanosecs_abs_t *time_stamp)
+{
+	int received = 0;
+	unsigned int tail;
+	int first_frag = -1;
+
+	for (tail = bp->rx_tail; budget > 0; tail++) {
+		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+		u32 addr, ctrl;
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		addr = desc->addr;
+		ctrl = desc->ctrl;
+
+		if (!(addr & MACB_BIT(RX_USED)))
+			break;
+
+		if (ctrl & MACB_BIT(RX_SOF)) {
+			if (first_frag != -1)
+				discard_partial_frame(bp, first_frag, tail);
+			first_frag = tail;
+		}
+
+		if (ctrl & MACB_BIT(RX_EOF)) {
+			int dropped;
+			BUG_ON(first_frag == -1);
+
+			dropped = macb_rx_frame(bp, first_frag, tail, time_stamp);
+			first_frag = -1;
+			if (!dropped) {
+				received++;
+				budget--;
+			}
+		}
+	}
+
+	if (first_frag != -1)
+		bp->rx_tail = first_frag;
+	else
+		bp->rx_tail = tail;
+
+	return received;
+}
+
+static int macb_interrupt(rtdm_irq_t *irq_handle)
+{
+	void *dev_id = rtdm_irq_get_arg(irq_handle, void);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *dev = dev_id;
+	struct macb *bp = rtnetdev_priv(dev);
+	unsigned received = 0;
+	u32 status, ctrl;
+
+	status = macb_readl(bp, ISR);
+
+	if (unlikely(!status))
+		return RTDM_IRQ_NONE;
+
+	rtdm_lock_get(&bp->lock);
+
+	while (status) {
+		/* close possible race with dev_close */
+		if (unlikely(!rtnetif_running(dev))) {
+			macb_writel(bp, IDR, -1);
+			break;
+		}
+
+		rtdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
+		if (status & MACB_BIT(RCOMP)) {
+			received += bp->macbgem_ops.mog_rx(bp, 100 - received,
+							&time_stamp);
+		}
+
+		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+			macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+			rtdm_schedule_nrt_work(&bp->tx_error_task);
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
+			break;
+		}
+
+		if (status & MACB_BIT(TCOMP))
+			macb_tx_interrupt(bp);
+
+		/*
+		 * Link change detection isn't possible with RMII, so we'll
+		 * add that if/when we get our hands on a full-blown MII PHY.
+		 */
+
+		if (status & MACB_BIT(RXUBR)) {
+			ctrl = macb_readl(bp, NCR);
+			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_BIT(RXUBR));
+		}
+
+		if (status & MACB_BIT(ISR_ROVR)) {
+			/* We missed at least one packet */
+			if (macb_is_gem(bp))
+				bp->hw_stats.gem.rx_overruns++;
+			else
+				bp->hw_stats.macb.rx_overruns++;
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
+		}
+
+		if (status & MACB_BIT(HRESP)) {
+			/*
+			 * TODO: Reset the hardware, and maybe move the
+			 * rtdev_err to a lower-priority context as well
+			 * (work queue?)
+			 */
+			rtdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_BIT(HRESP));
+		}
+
+		status = macb_readl(bp, ISR);
+	}
+
+	rtdm_lock_put(&bp->lock);
+
+	if (received)
+		rt_mark_stack_mgr(dev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int macb_start_xmit(struct rtskb *skb, struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	dma_addr_t mapping;
+	unsigned int len, entry;
+	struct macb_dma_desc *desc;
+	struct macb_tx_skb *tx_skb;
+	u32 ctrl;
+	unsigned long flags;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+	rtdev_vdbg(bp->dev,
+		   "start_xmit: len %u head %p data %p tail %p end %p\n",
+		   skb->len, skb->head, skb->data,
+		   rtskb_tail_pointer(skb), rtskb_end_pointer(skb));
+	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
+		       skb->data, 16, true);
+#endif
+
+	len = skb->len;
+	rtdm_lock_get_irqsave(&bp->lock, flags);
+
+	/* This is a hard error, log it. */
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
+		rtnetif_stop_queue(dev);
+		rtdm_lock_put_irqrestore(&bp->lock, flags);
+		rtdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
+		rtdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+			   bp->tx_head, bp->tx_tail);
+		return RTDEV_TX_BUSY;
+	}
+
+	entry = macb_tx_ring_wrap(bp->tx_head);
+	rtdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		dev_kfree_rtskb(skb);
+		goto unlock;
+	}
+
+	bp->tx_head++;
+	tx_skb = &bp->tx_skb[entry];
+	tx_skb->skb = skb;
+	tx_skb->mapping = mapping;
+	rtdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+		   skb->data, (unsigned long)mapping);
+
+	ctrl = MACB_BF(TX_FRMLEN, len);
+	ctrl |= MACB_BIT(TX_LAST);
+	if (entry == (TX_RING_SIZE - 1))
+		ctrl |= MACB_BIT(TX_WRAP);
+
+	desc = &bp->tx_ring[entry];
+	desc->addr = mapping;
+	desc->ctrl = ctrl;
+
+	/* Make newly initialized descriptor visible to hardware */
+	wmb();
+
+	rtskb_tx_timestamp(skb);
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
+		rtnetif_stop_queue(dev);
+
+unlock:
+	rtdm_lock_put_irqrestore(&bp->lock, flags);
+
+	return RTDEV_TX_OK;
+}
+
+static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
+{
+	if (!macb_is_gem(bp)) {
+		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
+	} else {
+		bp->rx_buffer_size = size;
+
+		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
+			rtdev_dbg(bp->dev,
+				    "RX buffer must be multiple of %d bytes, expanding\n",
+				    RX_BUFFER_MULTIPLE);
+			bp->rx_buffer_size =
+				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
+		}
+	}
+
+	rtdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
+		   bp->dev->mtu, bp->rx_buffer_size);
+}
+
+static void gem_free_rx_buffers(struct macb *bp)
+{
+	struct rtskb		*skb;
+	struct macb_dma_desc	*desc;
+	dma_addr_t		addr;
+	int i;
+
+	if (!bp->rx_skbuff)
+		return;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		skb = bp->rx_skbuff[i];
+
+		if (skb == NULL)
+			continue;
+
+		desc = &bp->rx_ring[i];
+		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+				 DMA_FROM_DEVICE);
+		dev_kfree_rtskb(skb);
+		skb = NULL;
+	}
+
+	kfree(bp->rx_skbuff);
+	bp->rx_skbuff = NULL;
+}
+
+static void macb_free_rx_buffers(struct macb *bp)
+{
+	if (bp->rx_buffers) {
+		dma_free_coherent(&bp->pdev->dev,
+				  RX_RING_SIZE * bp->rx_buffer_size,
+				  bp->rx_buffers, bp->rx_buffers_dma);
+		bp->rx_buffers = NULL;
+	}
+}
+
+static void macb_free_consistent(struct macb *bp)
+{
+	if (bp->tx_skb) {
+		kfree(bp->tx_skb);
+		bp->tx_skb = NULL;
+	}
+	bp->macbgem_ops.mog_free_rx_buffers(bp);
+	if (bp->rx_ring) {
+		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+				  bp->rx_ring, bp->rx_ring_dma);
+		bp->rx_ring = NULL;
+	}
+	if (bp->tx_ring) {
+		dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+				  bp->tx_ring, bp->tx_ring_dma);
+		bp->tx_ring = NULL;
+	}
+}
+
+static int gem_alloc_rx_buffers(struct macb *bp)
+{
+	int size;
+
+	size = RX_RING_SIZE * sizeof(struct rtskb *);
+	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
+	if (!bp->rx_skbuff)
+		return -ENOMEM;
+	else
+		rtdev_dbg(bp->dev,
+			   "Allocated %d RX struct rtskb entries at %p\n",
+			   RX_RING_SIZE, bp->rx_skbuff);
+	return 0;
+}
+
+static int macb_alloc_rx_buffers(struct macb *bp)
+{
+	int size;
+
+	size = RX_RING_SIZE * bp->rx_buffer_size;
+	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+					    &bp->rx_buffers_dma, GFP_KERNEL);
+	if (!bp->rx_buffers)
+		return -ENOMEM;
+	else
+		rtdev_dbg(bp->dev,
+			   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+			   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+	return 0;
+}
+
+static int macb_alloc_consistent(struct macb *bp)
+{
+	int size;
+
+	size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
+	bp->tx_skb = kmalloc(size, GFP_KERNEL);
+	if (!bp->tx_skb)
+		goto out_err;
+
+	size = RX_RING_BYTES;
+	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+					 &bp->rx_ring_dma, GFP_KERNEL);
+	if (!bp->rx_ring)
+		goto out_err;
+	rtdev_dbg(bp->dev,
+		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+
+	size = TX_RING_BYTES;
+	bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+					 &bp->tx_ring_dma, GFP_KERNEL);
+	if (!bp->tx_ring)
+		goto out_err;
+	rtdev_dbg(bp->dev,
+		   "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+		   size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+
+	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
+		goto out_err;
+
+	return 0;
+
+out_err:
+	macb_free_consistent(bp);
+	return -ENOMEM;
+}
+
+static void gem_init_rings(struct macb *bp)
+{
+	int i;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		bp->tx_ring[i].addr = 0;
+		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+	}
+	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+	bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+
+	gem_rx_refill(bp);
+}
+
+static void macb_init_rings(struct macb *bp)
+{
+	int i;
+	dma_addr_t addr;
+
+	addr = bp->rx_buffers_dma;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		bp->rx_ring[i].addr = addr;
+		bp->rx_ring[i].ctrl = 0;
+		addr += bp->rx_buffer_size;
+	}
+	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		bp->tx_ring[i].addr = 0;
+		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+	}
+	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+	bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+}
+
+static void macb_reset_hw(struct macb *bp)
+{
+	/*
+	 * Disable RX and TX (XXX: Should we halt the transmission
+	 * more gracefully?)
+	 */
+	macb_writel(bp, NCR, 0);
+
+	/* Clear the stats registers (XXX: Update stats first?) */
+	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+
+	/* Clear all status flags */
+	macb_writel(bp, TSR, -1);
+	macb_writel(bp, RSR, -1);
+
+	/* Disable all interrupts */
+	macb_writel(bp, IDR, -1);
+	macb_readl(bp, ISR);
+}
+
+static u32 gem_mdc_clk_div(struct macb *bp)
+{
+	u32 config;
+	unsigned long pclk_hz = clk_get_rate(bp->pclk);
+
+	if (pclk_hz <= 20000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV8);
+	else if (pclk_hz <= 40000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV16);
+	else if (pclk_hz <= 80000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV32);
+	else if (pclk_hz <= 120000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV48);
+	else if (pclk_hz <= 160000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV64);
+	else
+		config = GEM_BF(CLK, GEM_CLK_DIV96);
+
+	return config;
+}
+
+static u32 macb_mdc_clk_div(struct macb *bp)
+{
+	u32 config;
+	unsigned long pclk_hz;
+
+	if (macb_is_gem(bp))
+		return gem_mdc_clk_div(bp);
+
+	pclk_hz = clk_get_rate(bp->pclk);
+	if (pclk_hz <= 20000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV8);
+	else if (pclk_hz <= 40000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV16);
+	else if (pclk_hz <= 80000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV32);
+	else
+		config = MACB_BF(CLK, MACB_CLK_DIV64);
+
+	return config;
+}
+
+/*
+ * Get the DMA bus width field of the network configuration register that we
+ * should program.  We find the width from decoding the design configuration
+ * register to find the maximum supported data bus width.
+ */
+static u32 macb_dbw(struct macb *bp)
+{
+	if (!macb_is_gem(bp))
+		return 0;
+
+	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
+	case 4:
+		return GEM_BF(DBW, GEM_DBW128);
+	case 2:
+		return GEM_BF(DBW, GEM_DBW64);
+	case 1:
+	default:
+		return GEM_BF(DBW, GEM_DBW32);
+	}
+}
+
+/*
+ * Configure the receive DMA engine
+ * - use the correct receive buffer size
+ * - set the possibility to use INCR16 bursts
+ *   (if not supported by FIFO, it will fallback to default)
+ * - set both rx/tx packet buffers to full memory size
+ * These are configurable parameters for GEM.
+ */
+static void macb_configure_dma(struct macb *bp)
+{
+	u32 dmacfg;
+
+	if (macb_is_gem(bp)) {
+		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+		dmacfg |= GEM_BF(FBLDO, 16);
+		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+		dmacfg &= ~GEM_BIT(ENDIA);
+		gem_writel(bp, DMACFG, dmacfg);
+	}
+}
+
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+	if (macb_is_gem(bp)) {
+		if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+	}
+	rtdev_vdbg(bp->dev, "Capabilities : %X\n", bp->caps);
+}
+
+static void macb_init_hw(struct macb *bp)
+{
+	u32 config;
+
+	macb_reset_hw(bp);
+	rtmacb_set_hwaddr(bp);
+
+	config = macb_mdc_clk_div(bp);
+	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
+	config |= MACB_BIT(PAE);		/* PAuse Enable */
+	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
+	if (bp->dev->flags & IFF_PROMISC)
+		config |= MACB_BIT(CAF);	/* Copy All Frames */
+	if (!(bp->dev->flags & IFF_BROADCAST))
+		config |= MACB_BIT(NBC);	/* No BroadCast */
+	config |= macb_dbw(bp);
+	macb_writel(bp, NCFGR, config);
+	bp->speed = SPEED_10;
+	bp->duplex = DUPLEX_HALF;
+
+	macb_configure_dma(bp);
+	macb_configure_caps(bp);
+
+	/* Initialize TX and RX buffers */
+	macb_writel(bp, RBQP, bp->rx_ring_dma);
+	macb_writel(bp, TBQP, bp->tx_ring_dma);
+
+	/* Enable TX and RX */
+	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+
+	/* Enable interrupts */
+	macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+			      | MACB_TX_INT_FLAGS
+			      | MACB_BIT(HRESP)));
+
+}
+
+static int macb_open(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+	int err;
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	rtdev_dbg(bp->dev, "open\n");
+
+	/* carrier starts down */
+	rtnetif_carrier_off(dev);
+
+	/* if the phy is not yet register, retry later*/
+	if (!bp->phy_dev)
+		return -EAGAIN;
+
+	/* RX buffers initialization */
+	macb_init_rx_buffer_size(bp, bufsz);
+
+	err = macb_alloc_consistent(bp);
+	if (err) {
+		rtdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
+			   err);
+		return err;
+	}
+
+	bp->macbgem_ops.mog_init_rings(bp);
+	macb_init_hw(bp);
+
+	/* schedule a link state check */
+	phy_start(bp->phy_dev);
+
+	rtnetif_start_queue(dev);
+
+	return 0;
+}
+
+static int macb_close(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	unsigned long flags;
+
+	rtnetif_stop_queue(dev);
+
+	if (bp->phy_dev)
+		phy_stop(bp->phy_dev);
+
+	rtdm_lock_get_irqsave(&bp->lock, flags);
+	macb_reset_hw(bp);
+	rtnetif_carrier_off(dev);
+	rtdm_lock_put_irqrestore(&bp->lock, flags);
+
+	macb_free_consistent(bp);
+
+	rt_stack_disconnect(dev);
+
+	return 0;
+}
+
+static void gem_update_stats(struct macb *bp)
+{
+	u32 __iomem *reg = bp->regs + GEM_OTX;
+	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+	u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
+
+	for (; p < end; p++, reg++)
+		*p += __raw_readl(reg);
+}
+
+static struct net_device_stats *gem_get_stats(struct macb *bp)
+{
+	struct gem_stats *hwstat = &bp->hw_stats.gem;
+	struct net_device_stats *nstat = &bp->stats;
+
+	gem_update_stats(bp);
+
+	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+			    hwstat->rx_alignment_errors +
+			    hwstat->rx_resource_errors +
+			    hwstat->rx_overruns +
+			    hwstat->rx_oversize_frames +
+			    hwstat->rx_jabbers +
+			    hwstat->rx_undersized_frames +
+			    hwstat->rx_length_field_frame_errors);
+	nstat->tx_errors = (hwstat->tx_late_collisions +
+			    hwstat->tx_excessive_collisions +
+			    hwstat->tx_underrun +
+			    hwstat->tx_carrier_sense_errors);
+	nstat->multicast = hwstat->rx_multicast_frames;
+	nstat->collisions = (hwstat->tx_single_collision_frames +
+			     hwstat->tx_multiple_collision_frames +
+			     hwstat->tx_excessive_collisions);
+	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
+				   hwstat->rx_jabbers +
+				   hwstat->rx_undersized_frames +
+				   hwstat->rx_length_field_frame_errors);
+	nstat->rx_over_errors = hwstat->rx_resource_errors;
+	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
+	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
+	nstat->rx_fifo_errors = hwstat->rx_overruns;
+	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+	nstat->tx_fifo_errors = hwstat->tx_underrun;
+
+	return nstat;
+}
+
+struct net_device_stats *rtmacb_get_stats(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	struct net_device_stats *nstat = &bp->stats;
+	struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+	if (macb_is_gem(bp))
+		return gem_get_stats(bp);
+
+	/* read stats from hardware */
+	macb_update_stats(bp);
+
+	/* Convert HW stats into netdevice stats */
+	nstat->rx_errors = (hwstat->rx_fcs_errors +
+			    hwstat->rx_align_errors +
+			    hwstat->rx_resource_errors +
+			    hwstat->rx_overruns +
+			    hwstat->rx_oversize_pkts +
+			    hwstat->rx_jabbers +
+			    hwstat->rx_undersize_pkts +
+			    hwstat->sqe_test_errors +
+			    hwstat->rx_length_mismatch);
+	nstat->tx_errors = (hwstat->tx_late_cols +
+			    hwstat->tx_excessive_cols +
+			    hwstat->tx_underruns +
+			    hwstat->tx_carrier_errors);
+	nstat->collisions = (hwstat->tx_single_cols +
+			     hwstat->tx_multiple_cols +
+			     hwstat->tx_excessive_cols);
+	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+				   hwstat->rx_jabbers +
+				   hwstat->rx_undersize_pkts +
+				   hwstat->rx_length_mismatch);
+	nstat->rx_over_errors = hwstat->rx_resource_errors +
+				   hwstat->rx_overruns;
+	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
+	nstat->rx_frame_errors = hwstat->rx_align_errors;
+	nstat->rx_fifo_errors = hwstat->rx_overruns;
+	/* XXX: What does "missed" mean? */
+	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
+	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+	nstat->tx_fifo_errors = hwstat->tx_underruns;
+	/* Don't know about heartbeat or window errors... */
+
+	return nstat;
+}
+EXPORT_SYMBOL_GPL(rtmacb_get_stats);
+
+int rtmacb_ioctl(struct rtnet_device *dev, unsigned cmd, void *rq)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
+
+	if (!rtnetif_running(dev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(phydev, rq, cmd);
+}
+EXPORT_SYMBOL_GPL(rtmacb_ioctl);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id macb_dt_ids[] = {
+	{ .compatible = "cdns,at32ap7000-macb" },
+	{ .compatible = "cdns,at91sam9260-macb" },
+	{ .compatible = "cdns,macb" },
+	{ .compatible = "cdns,pc302-gem" },
+	{ .compatible = "cdns,gem" },
+	{ .compatible = "atmel,sama5d3-gem" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+#endif
+
+static int __init macb_probe(struct platform_device *pdev)
+{
+	struct macb_platform_data *pdata;
+	struct resource *regs;
+	struct rtnet_device *dev;
+	struct macb *bp;
+	struct phy_device *phydev;
+	u32 config;
+	int err = -ENXIO;
+	struct pinctrl *pinctrl;
+	const char *mac;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "no mmio resource defined\n");
+		goto err_out;
+	}
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		err = PTR_ERR(pinctrl);
+		if (err == -EPROBE_DEFER)
+			goto err_out;
+
+		dev_warn(&pdev->dev, "No pinctrl provided\n");
+	}
+
+	err = -ENOMEM;
+	dev = rt_alloc_etherdev(sizeof(*bp), RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (!dev)
+		goto err_out;
+
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+
+	/* TODO: Actually, we have some interesting features... */
+	dev->features |= 0;
+
+	bp = rtnetdev_priv(dev);
+	bp->pdev = pdev;
+	bp->dev = dev;
+
+	rtdm_lock_init(&bp->lock);
+	INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
+
+	bp->pclk = devm_clk_get(&pdev->dev, "pclk");
+	if (IS_ERR(bp->pclk)) {
+		err = PTR_ERR(bp->pclk);
+		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	bp->hclk = devm_clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(bp->hclk)) {
+		err = PTR_ERR(bp->hclk);
+		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+	err = clk_prepare_enable(bp->pclk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	err = clk_prepare_enable(bp->hclk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+		goto err_out_disable_pclk;
+	}
+
+	if (!IS_ERR(bp->tx_clk)) {
+		err = clk_prepare_enable(bp->tx_clk);
+		if (err) {
+			dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+					err);
+			goto err_out_disable_hclk;
+		}
+	}
+
+	bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+	if (!bp->regs) {
+		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+		err = -ENOMEM;
+		goto err_out_disable_clocks;
+	}
+
+	dev->irq = platform_get_irq(pdev, 0);
+	rt_stack_connect(dev, &STACK_manager);
+
+	err = rtdm_irq_request(&bp->irq_handle, dev->irq, macb_interrupt, 0,
+			dev->name, dev);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+			dev->irq, err);
+		goto err_out_disable_clocks;
+	}
+
+	dev->open = macb_open;
+	dev->stop = macb_close;
+	dev->hard_start_xmit = macb_start_xmit;
+	dev->do_ioctl = rtmacb_ioctl;
+	dev->get_stats = rtmacb_get_stats;
+
+	dev->base_addr = regs->start;
+
+	/* setup appropriated routines according to adapter type */
+	if (macb_is_gem(bp)) {
+		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
+		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
+		bp->macbgem_ops.mog_init_rings = gem_init_rings;
+		bp->macbgem_ops.mog_rx = gem_rx;
+	} else {
+		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
+		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
+		bp->macbgem_ops.mog_init_rings = macb_init_rings;
+		bp->macbgem_ops.mog_rx = macb_rx;
+	}
+
+	/* Set MII management clock divider */
+	config = macb_mdc_clk_div(bp);
+	config |= macb_dbw(bp);
+	macb_writel(bp, NCFGR, config);
+
+	mac = of_get_mac_address(pdev->dev.of_node);
+	if (mac)
+		memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+	else
+		rtmacb_get_hwaddr(bp);
+
+	err = of_get_phy_mode(pdev->dev.of_node);
+	if (err < 0) {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (pdata && pdata->is_rmii)
+			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
+		else
+			bp->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+		bp->phy_interface = err;
+	}
+
+	if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+		macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+	else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+#if defined(CONFIG_ARCH_AT91)
+		macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
+					       MACB_BIT(CLKEN)));
+#else
+		macb_or_gem_writel(bp, USRIO, 0);
+#endif
+	else
+#if defined(CONFIG_ARCH_AT91)
+		macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
+#else
+		macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
+#endif
+
+	err = rt_register_rtnetdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+		goto err_out_irq_free;
+	}
+
+	err = rtmacb_mii_init(bp);
+	if (err)
+		goto err_out_unregister_netdev;
+
+	platform_set_drvdata(pdev, dev);
+
+	rtnetif_carrier_off(dev);
+
+	rtdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
+		    macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
+		    dev->irq, dev->dev_addr);
+
+	phydev = bp->phy_dev;
+	rtdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		    phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+	return 0;
+
+err_out_unregister_netdev:
+	rt_unregister_rtnetdev(dev);
+err_out_irq_free:
+	rtdm_irq_free(&bp->irq_handle);
+err_out_disable_clocks:
+	if (!IS_ERR(bp->tx_clk))
+		clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
+	clk_disable_unprepare(bp->hclk);
+err_out_disable_pclk:
+	clk_disable_unprepare(bp->pclk);
+err_out_free_dev:
+	rtdev_free(dev);
+err_out:
+	return err;
+}
+
+static int __exit macb_remove(struct platform_device *pdev)
+{
+	struct rtnet_device *dev;
+	struct macb *bp;
+
+	dev = platform_get_drvdata(pdev);
+
+	if (dev) {
+		bp = rtnetdev_priv(dev);
+		if (bp->phy_dev)
+			phy_disconnect(bp->phy_dev);
+		mdiobus_unregister(bp->mii_bus);
+		if (bp->phy_phony_net_device)
+			free_netdev(bp->phy_phony_net_device);
+		kfree(bp->mii_bus->irq);
+		rt_rtdev_disconnect(dev);
+		rtdm_irq_free(&bp->irq_handle);
+		mdiobus_free(bp->mii_bus);
+		rt_unregister_rtnetdev(dev);
+		if (!IS_ERR(bp->tx_clk))
+			clk_disable_unprepare(bp->tx_clk);
+		clk_disable_unprepare(bp->hclk);
+		clk_disable_unprepare(bp->pclk);
+		rtdev_free(dev);
+	}
+
+	return 0;
+}
+
+static struct platform_driver macb_driver = {
+	.remove		= __exit_p(macb_remove),
+	.driver		= {
+		.name		= "macb",
+		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(macb_dt_ids),
+	},
+};
+
+static bool found;
+static int __init macb_driver_init(void)
+{
+	found = platform_driver_probe(&macb_driver, macb_probe) == 0;
+	return 0;
+}
+module_init(macb_driver_init);
+
+static void __exit macb_driver_exit(void)
+{
+	if (found)
+		platform_driver_unregister(&macb_driver);
+}
+module_exit(macb_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_ALIAS("platform:macb");
+++ linux-patched/drivers/xenomai/net/drivers/loopback.c	2022-03-21 12:58:29.944883549 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/at91_ether.c	1970-01-01 01:00:00.000000000 +0100
+/* loopback.c
+ *
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ * extended by Jose Carlos Billalabeitia and Jan Kiszka
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+
+#include <rtnet_port.h>
+#include <stack_mgr.h>
+
+MODULE_AUTHOR("Maintainer: Jan Kiszka <Jan.Kiszka@web.de>");
+MODULE_DESCRIPTION("RTnet loopback driver");
+MODULE_LICENSE("GPL");
+
+static struct rtnet_device *rt_loopback_dev;
+
+/***
+ *  rt_loopback_open
+ *  @rtdev
+ */
+static int rt_loopback_open(struct rtnet_device *rtdev)
+{
+	rt_stack_connect(rtdev, &STACK_manager);
+	rtnetif_start_queue(rtdev);
+
+	return 0;
+}
+
+/***
+ *  rt_loopback_close
+ *  @rtdev
+ */
+static int rt_loopback_close(struct rtnet_device *rtdev)
+{
+	rtnetif_stop_queue(rtdev);
+	rt_stack_disconnect(rtdev);
+
+	return 0;
+}
+
+/***
+ *  rt_loopback_xmit - begin packet transmission
+ *  @skb: packet to be sent
+ *  @dev: network device to which packet is sent
+ *
+ */
+static int rt_loopback_xmit(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	/* write transmission stamp - in case any protocol ever gets the idea to
+       ask the lookback device for this service... */
+	if (rtskb->xmit_stamp)
+		*rtskb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp);
+
+	/* make sure that critical fields are re-intialised */
+	rtskb->chain_end = rtskb;
+
+	/* parse the Ethernet header as usual */
+	rtskb->protocol = rt_eth_type_trans(rtskb, rtdev);
+
+	rt_stack_deliver(rtskb);
+
+	return 0;
+}
+
+/***
+ *  loopback_init
+ */
+static int __init loopback_init(void)
+{
+	int err;
+	struct rtnet_device *rtdev;
+
+	pr_info("initializing loopback interface...\n");
+
+	if ((rtdev = rt_alloc_etherdev(0, 1)) == NULL)
+		return -ENODEV;
+
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+
+	strcpy(rtdev->name, "rtlo");
+
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->open = &rt_loopback_open;
+	rtdev->stop = &rt_loopback_close;
+	rtdev->hard_start_xmit = &rt_loopback_xmit;
+	rtdev->flags |= IFF_LOOPBACK;
+	rtdev->flags &= ~IFF_BROADCAST;
+	rtdev->features |= NETIF_F_LLTX;
+
+	if ((err = rt_register_rtnetdev(rtdev)) != 0) {
+		rtdev_free(rtdev);
+		return err;
+	}
+
+	rt_loopback_dev = rtdev;
+
+	return 0;
+}
+
+/***
+ *  loopback_cleanup
+ */
+static void __exit loopback_cleanup(void)
+{
+	struct rtnet_device *rtdev = rt_loopback_dev;
+
+	pr_info("removing loopback interface...\n");
+
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+
+	rtdev_free(rtdev);
+}
+
+module_init(loopback_init);
+module_exit(loopback_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/at91_ether.c	2022-03-21 12:58:29.937883618 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ *  Copyright (C) 2003 SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson 01/11/2003
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * RTnet port:
+ * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtnet_port.h>
+#include <rtskb.h>
+#include "rt_macb.h"
+
+/* 1518 rounded up */
+#define MAX_RBUFF_SZ	0x600
+/* max number of receive buffers */
+#define MAX_RX_DESCR	9
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	dma_addr_t addr;
+	u32 ctl;
+	int i;
+
+	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+					 (MAX_RX_DESCR *
+					  sizeof(struct macb_dma_desc)),
+					 &lp->rx_ring_dma, GFP_KERNEL);
+	if (!lp->rx_ring)
+		return -ENOMEM;
+
+	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+					    MAX_RX_DESCR * MAX_RBUFF_SZ,
+					    &lp->rx_buffers_dma, GFP_KERNEL);
+	if (!lp->rx_buffers) {
+		dma_free_coherent(&lp->pdev->dev,
+				  MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+				  lp->rx_ring, lp->rx_ring_dma);
+		lp->rx_ring = NULL;
+		return -ENOMEM;
+	}
+
+	addr = lp->rx_buffers_dma;
+	for (i = 0; i < MAX_RX_DESCR; i++) {
+		lp->rx_ring[i].addr = addr;
+		lp->rx_ring[i].ctrl = 0;
+		addr += MAX_RBUFF_SZ;
+	}
+
+	/* Set the Wrap bit on the last descriptor */
+	lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+
+	/* Reset buffer index */
+	lp->rx_tail = 0;
+
+	/* Program address of descriptor list in Rx Buffer Queue register */
+	macb_writel(lp, RBQP, lp->rx_ring_dma);
+
+	/* Enable Receive and Transmit */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+	return 0;
+}
+
+/* Open the ethernet interface */
+static int at91ether_open(struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	u32 ctl;
+	int ret;
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	/* Clear internal statistics */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+	rtmacb_set_hwaddr(lp);
+
+	ret = at91ether_start(dev);
+	if (ret)
+		return ret;
+
+	/* Enable MAC interrupts */
+	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
+			     MACB_BIT(RXUBR)	|
+			     MACB_BIT(ISR_TUND)	|
+			     MACB_BIT(ISR_RLE)	|
+			     MACB_BIT(TCOMP)	|
+			     MACB_BIT(ISR_ROVR)	|
+			     MACB_BIT(HRESP));
+
+	/* schedule a link state check */
+	phy_start(lp->phy_dev);
+
+	rtnetif_start_queue(dev);
+
+	return 0;
+}
+
+/* Close the interface */
+static int at91ether_close(struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	u32 ctl;
+
+	/* Disable Receiver and Transmitter */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+	/* Disable MAC interrupts */
+	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
+			     MACB_BIT(RXUBR)	|
+			     MACB_BIT(ISR_TUND)	|
+			     MACB_BIT(ISR_RLE)	|
+			     MACB_BIT(TCOMP)	|
+			     MACB_BIT(ISR_ROVR) |
+			     MACB_BIT(HRESP));
+
+	rtnetif_stop_queue(dev);
+
+	dma_free_coherent(&lp->pdev->dev,
+				MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+				lp->rx_ring, lp->rx_ring_dma);
+	lp->rx_ring = NULL;
+
+	dma_free_coherent(&lp->pdev->dev,
+				MAX_RX_DESCR * MAX_RBUFF_SZ,
+				lp->rx_buffers, lp->rx_buffers_dma);
+	lp->rx_buffers = NULL;
+
+	rt_stack_disconnect(dev);
+
+	return 0;
+}
+
+/* Transmit packet */
+static int at91ether_start_xmit(struct rtskb *skb, struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+
+	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+		rtnetif_stop_queue(dev);
+
+		/* Store packet information (to free when Tx completed) */
+		lp->skb = skb;
+		lp->skb_length = skb->len;
+		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+							DMA_TO_DEVICE);
+
+		/* Set address of the data in the Transmit Address register */
+		macb_writel(lp, TAR, lp->skb_physaddr);
+		/* Set length of the packet in the Transmit Control register */
+		macb_writel(lp, TCR, skb->len);
+
+	} else {
+		rtdev_err(dev, "%s called, but device is busy!\n", __func__);
+		return RTDEV_TX_BUSY;
+	}
+
+	return RTDEV_TX_OK;
+}
+
+/* Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static bool at91ether_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	unsigned char *p_recv;
+	struct rtskb *skb;
+	unsigned int pktlen;
+	bool ret = false;
+
+	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+		p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
+		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+		skb = rtnetdev_alloc_rtskb(dev, pktlen + 2);
+		if (skb) {
+			rtskb_reserve(skb, 2);
+			memcpy(rtskb_put(skb, pktlen), p_recv, pktlen);
+
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			lp->stats.rx_packets++;
+			lp->stats.rx_bytes += pktlen;
+			ret = true;
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+		} else {
+			lp->stats.rx_dropped++;
+		}
+
+		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+			lp->stats.multicast++;
+
+		/* reset ownership bit */
+		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+		/* wrap after last buffer */
+		if (lp->rx_tail == MAX_RX_DESCR - 1)
+			lp->rx_tail = 0;
+		else
+			lp->rx_tail++;
+	}
+
+	return ret;
+}
+
+/* MAC interrupt handler */
+static int at91ether_interrupt(rtdm_irq_t *irq_handle)
+{
+	void *dev_id = rtdm_irq_get_arg(irq_handle, void);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *dev = dev_id;
+	struct macb *lp = rtnetdev_priv(dev);
+	u32 intstatus, ctl;
+
+	/* MAC Interrupt Status register indicates what interrupts are pending.
+	 * It is automatically cleared once read.
+	 */
+	intstatus = macb_readl(lp, ISR);
+
+	/* Receive complete */
+	if ((intstatus & MACB_BIT(RCOMP)) && at91ether_rx(dev, &time_stamp))
+		rt_mark_stack_mgr(dev);
+
+	/* Transmit complete */
+	if (intstatus & MACB_BIT(TCOMP)) {
+		/* The TCOM bit is set even if the transmission failed */
+		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+			lp->stats.tx_errors++;
+
+		if (lp->skb) {
+			dev_kfree_rtskb(lp->skb);
+			lp->skb = NULL;
+			dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+			lp->stats.tx_packets++;
+			lp->stats.tx_bytes += lp->skb_length;
+		}
+		rtnetif_wake_queue(dev);
+	}
+
+	/* Work-around for EMAC Errata section 41.3.1 */
+	if (intstatus & MACB_BIT(RXUBR)) {
+		ctl = macb_readl(lp, NCR);
+		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
+	}
+
+	if (intstatus & MACB_BIT(ISR_ROVR))
+		rtdev_err(dev, "ROVR error\n");
+
+	return RTDM_IRQ_HANDLED;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id at91ether_dt_ids[] = {
+	{ .compatible = "cdns,at91rm9200-emac" },
+	{ .compatible = "cdns,emac" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
+#endif
+
+/* Detect MAC & PHY and perform ethernet interface initialization */
+static int __init at91ether_probe(struct platform_device *pdev)
+{
+	struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
+	struct resource *regs;
+	struct rtnet_device *dev;
+	struct phy_device *phydev;
+	struct macb *lp;
+	int res;
+	u32 reg;
+	const char *mac;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENOENT;
+
+	dev = rt_alloc_etherdev(sizeof(struct macb), MAX_RX_DESCR * 2 + 2);
+	if (!dev)
+		return -ENOMEM;
+
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+
+	lp = rtnetdev_priv(dev);
+	lp->pdev = pdev;
+	lp->dev = dev;
+	rtdm_lock_init(&lp->lock);
+
+	/* physical base address */
+	dev->base_addr = regs->start;
+	lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+	if (!lp->regs) {
+		res = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	/* Clock */
+	lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+	if (IS_ERR(lp->pclk)) {
+		res = PTR_ERR(lp->pclk);
+		goto err_free_dev;
+	}
+	clk_enable(lp->pclk);
+
+	lp->hclk = ERR_PTR(-ENOENT);
+	lp->tx_clk = ERR_PTR(-ENOENT);
+
+	/* Install the interrupt handler */
+	dev->irq = platform_get_irq(pdev, 0);
+	res = rtdm_irq_request(&lp->irq_handle, dev->irq, at91ether_interrupt, 0, dev->name, dev);
+	if (res)
+		goto err_disable_clock;
+
+	dev->open = at91ether_open;
+	dev->stop = at91ether_close;
+	dev->hard_start_xmit = at91ether_start_xmit;
+	dev->do_ioctl = rtmacb_ioctl;
+	dev->get_stats = rtmacb_get_stats;
+
+	platform_set_drvdata(pdev, dev);
+
+	mac = of_get_mac_address(pdev->dev.of_node);
+	if (mac)
+		memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
+	else
+		rtmacb_get_hwaddr(lp);
+
+	res = of_get_phy_mode(pdev->dev.of_node);
+	if (res < 0) {
+		if (board_data && board_data->is_rmii)
+			lp->phy_interface = PHY_INTERFACE_MODE_RMII;
+		else
+			lp->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+		lp->phy_interface = res;
+	}
+
+	macb_writel(lp, NCR, 0);
+
+	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+	if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
+		reg |= MACB_BIT(RM9200_RMII);
+
+	macb_writel(lp, NCFGR, reg);
+
+	/* Register the network interface */
+	res = rt_register_rtnetdev(dev);
+	if (res)
+		goto err_irq_free;
+
+	res = rtmacb_mii_init(lp);
+	if (res)
+		goto err_out_unregister_netdev;
+
+	/* will be enabled in open() */
+	rtnetif_carrier_off(dev);
+
+	phydev = lp->phy_dev;
+	rtdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+				phydev->drv->name, dev_name(&phydev->dev),
+				phydev->irq);
+
+	/* Display ethernet banner */
+	rtdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
+				dev->base_addr, dev->irq, dev->dev_addr);
+
+	return 0;
+
+err_out_unregister_netdev:
+	rt_unregister_rtnetdev(dev);
+err_irq_free:
+	rtdm_irq_free(&lp->irq_handle);
+err_disable_clock:
+	clk_disable(lp->pclk);
+err_free_dev:
+	rtdev_free(dev);
+	return res;
+}
+
+static int at91ether_remove(struct platform_device *pdev)
+{
+	struct rtnet_device *dev = platform_get_drvdata(pdev);
+	struct macb *lp = rtnetdev_priv(dev);
+
+	if (lp->phy_dev)
+		phy_disconnect(lp->phy_dev);
+
+	mdiobus_unregister(lp->mii_bus);
+	if (lp->phy_phony_net_device)
+		free_netdev(lp->phy_phony_net_device);
+	kfree(lp->mii_bus->irq);
+	rt_rtdev_disconnect(dev);
+	rtdm_irq_free(&lp->irq_handle);
+	mdiobus_free(lp->mii_bus);
+	rt_unregister_rtnetdev(dev);
+	clk_disable(lp->pclk);
+	rtdev_free(dev);
+
+	return 0;
+}
+
+static struct platform_driver at91ether_driver = {
+	.remove		= at91ether_remove,
+	.driver		= {
+		.name	= "at91_ether",
+		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(at91ether_dt_ids),
+	},
+};
+
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
+MODULE_AUTHOR("Andrew Victor");
+MODULE_ALIAS("platform:at91_ether");
+++ linux-patched/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c	2022-03-21 12:58:29.930883686 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h	1970-01-01 01:00:00.000000000 +0100
+/* rt2500pci.c
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *			     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Module: rt_rt2500pci
+ * Abstract: rt2500pci device specific routines.
+ * Supported chipsets: RT2560.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "rt2x00.h"
+#include "rt2500pci.h"
+
+#include <rtnet_port.h>
+
+#ifdef DRV_NAME
+#undef DRV_NAME
+#define DRV_NAME "rt_rt2500pci"
+#endif /* DRV_NAME */
+
+/* handler for direct register access from core module */
+static int rt2x00_dev_register_access(struct _rt2x00_core *core, int request,
+				      u32 address, u32 *value)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	u8 u8_value;
+
+	switch (request) {
+	case IOC_RTWLAN_REGREAD:
+		rt2x00_register_read(rt2x00pci, address, value);
+		break;
+	case IOC_RTWLAN_REGWRITE:
+		rt2x00_register_write(rt2x00pci, address, *value);
+		break;
+	case IOC_RTWLAN_BBPREAD:
+		rt2x00_bbp_regread(rt2x00pci, address, &u8_value);
+		*value = u8_value;
+		break;
+	case IOC_RTWLAN_BBPWRITE:
+		rt2x00_bbp_regwrite(rt2x00pci, address, *value);
+		break;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Interrupt routines.
+ * rt2x00_interrupt_txdone processes all transmitted packetss results.
+ * rt2x00_interrupt_rxdone processes all received rx packets.
+ */
+static void rt2x00_interrupt_txdone(struct _data_ring *ring)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(ring->core->rtnet_dev);
+	struct _txd *txd = NULL;
+	u8 tx_result = 0x00;
+	/*    u8			retry_count = 0x00; */
+
+	do {
+		txd = DESC_ADDR_DONE(ring);
+
+		if (rt2x00_get_field32(txd->word0, TXD_W0_OWNER_NIC) ||
+		    !rt2x00_get_field32(txd->word0, TXD_W0_VALID))
+			break;
+
+		if (ring->ring_type == RING_TX) {
+			tx_result =
+				rt2x00_get_field32(txd->word0, TXD_W0_RESULT);
+			/*	    retry_count = rt2x00_get_field32(txd->word0, TXD_W0_RETRY_COUNT); */
+
+			switch (tx_result) {
+			case TX_SUCCESS:
+				rtwlan_dev->stats.tx_packets++;
+				break;
+			case TX_SUCCESS_RETRY:
+				rtwlan_dev->stats.tx_retry++;
+				break;
+			case TX_FAIL_RETRY:
+				DEBUG("TX_FAIL_RETRY.\n");
+				break;
+			case TX_FAIL_INVALID:
+				DEBUG("TX_FAIL_INVALID.\n");
+				break;
+			case TX_FAIL_OTHER:
+				DEBUG("TX_FAIL_OTHER.\n");
+				break;
+			default:
+				DEBUG("Unknown tx result.\n");
+			}
+		}
+
+		rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 0);
+
+		rt2x00_ring_index_done_inc(ring);
+	} while (!rt2x00_ring_empty(ring));
+}
+
+static void rt2x00_interrupt_rxdone(struct _data_ring *ring,
+				    nanosecs_abs_t *time_stamp)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(ring->core);
+	struct rtnet_device *rtnet_dev = ring->core->rtnet_dev;
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rxd *rxd = NULL;
+	struct rtskb *rtskb;
+	void *data = NULL;
+	u16 size = 0x0000;
+	/*    u16                    rssi = 0x0000; */
+
+	while (1) {
+		rxd = DESC_ADDR(ring);
+		data = DATA_ADDR(ring);
+
+		if (rt2x00_get_field32(rxd->word0, RXD_W0_OWNER_NIC))
+			break;
+
+		size = rt2x00_get_field32(rxd->word0, RXD_W0_DATABYTE_COUNT);
+		/*	rssi = rt2x00_get_field32(rxd->word2, RXD_W2_RSSI); */
+
+		/* prepare rtskb */
+		rtskb = rtnetdev_alloc_rtskb(rtnet_dev, size + NET_IP_ALIGN);
+		if (!rtskb) {
+			ERROR("Couldn't allocate rtskb, packet dropped.\n");
+			break;
+		}
+		rtskb->time_stamp = *time_stamp;
+		rtskb_reserve(rtskb, NET_IP_ALIGN);
+
+		memcpy(rtskb->data, data, size);
+		rtskb_put(rtskb, size);
+
+		/* give incoming frame to rtwlan stack */
+		rtwlan_rx(rtskb, rtnet_dev);
+
+		rtwlan_dev->stats.rx_packets++;
+
+		rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1);
+		rt2x00_ring_index_inc(&rt2x00pci->rx);
+	}
+}
+
+int rt2x00_interrupt(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	struct rtnet_device *rtnet_dev =
+		rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	unsigned int old_packet_cnt = rtwlan_dev->stats.rx_packets;
+	u32 reg = 0x00000000;
+
+	rtdm_lock_get(&rt2x00pci->lock);
+
+	rt2x00_register_read(rt2x00pci, CSR7, &reg);
+	rt2x00_register_write(rt2x00pci, CSR7, reg);
+
+	if (!reg) {
+		rtdm_lock_put(&rt2x00pci->lock);
+		return RTDM_IRQ_NONE;
+	}
+
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TBCN_EXPIRE)) /* Beacon timer expired interrupt. */
+		DEBUG("Beacon timer expired.\n");
+	if (rt2x00_get_field32(reg, CSR7_RXDONE)) /* Rx ring done interrupt. */
+		rt2x00_interrupt_rxdone(&rt2x00pci->rx, &time_stamp);
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TXDONE_ATIMRING)) /* Atim ring transmit done interrupt. */
+		DEBUG("AtimTxDone.\n");
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TXDONE_PRIORING)) /* Priority ring transmit done interrupt. */
+		DEBUG("PrioTxDone.\n");
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TXDONE_TXRING)) /* Tx ring transmit done interrupt. */
+		rt2x00_interrupt_txdone(&rt2x00pci->tx);
+
+	rtdm_lock_put(&rt2x00pci->lock);
+
+	if (old_packet_cnt != rtwlan_dev->stats.rx_packets)
+		rt_mark_stack_mgr(rtnet_dev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+void rt2x00_init_eeprom(struct _rt2x00_pci *rt2x00pci,
+			struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+	u16 eeprom = 0x0000;
+
+	/*
+     * 1 - Detect EEPROM width.
+     */
+	rt2x00_register_read(rt2x00pci, CSR21, &reg);
+	rt2x00pci->eeprom_width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ?
+					  EEPROM_WIDTH_93c46 :
+					  EEPROM_WIDTH_93c66;
+
+	/*
+     * 2 - Identify rf chipset.
+     */
+	eeprom = rt2x00_eeprom_read_word(rt2x00pci, EEPROM_ANTENNA);
+	set_chip(&rt2x00pci->chip, RT2560,
+		 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE));
+
+	/*
+     * 3 - Identify default antenna configuration.
+     */
+	config->antenna_tx =
+		rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT);
+	config->antenna_rx =
+		rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT);
+
+	DEBUG("antenna_tx=%d antenna_rx=%d\n", config->antenna_tx,
+	      config->antenna_rx);
+
+	/*
+     * 4 - Read BBP data from EEPROM and store in private structure.
+     */
+	memset(&rt2x00pci->eeprom, 0x00, sizeof(rt2x00pci->eeprom));
+	for (eeprom = 0; eeprom < EEPROM_BBP_SIZE; eeprom++)
+		rt2x00pci->eeprom[eeprom] = rt2x00_eeprom_read_word(
+			rt2x00pci, EEPROM_BBP_START + eeprom);
+}
+
+void rt2x00_dev_read_mac(struct _rt2x00_pci *rt2x00pci,
+			 struct rtnet_device *rtnet_dev)
+{
+	u32 reg[2];
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	rt2x00_register_multiread(rt2x00pci, CSR3, &reg[0], sizeof(reg));
+
+	rtnet_dev->dev_addr[0] = rt2x00_get_field32(reg[0], CSR3_BYTE0);
+	rtnet_dev->dev_addr[1] = rt2x00_get_field32(reg[0], CSR3_BYTE1);
+	rtnet_dev->dev_addr[2] = rt2x00_get_field32(reg[0], CSR3_BYTE2);
+	rtnet_dev->dev_addr[3] = rt2x00_get_field32(reg[0], CSR3_BYTE3);
+	rtnet_dev->dev_addr[4] = rt2x00_get_field32(reg[1], CSR4_BYTE4);
+	rtnet_dev->dev_addr[5] = rt2x00_get_field32(reg[1], CSR4_BYTE5);
+
+	rtnet_dev->addr_len = 6;
+}
+
+int rt2x00_dev_probe(struct _rt2x00_core *core, void *priv)
+{
+	struct pci_dev *pci_dev = (struct pci_dev *)priv;
+	struct _rt2x00_pci *rt2x00pci = core->priv;
+
+	memset(rt2x00pci, 0x00, sizeof(*rt2x00pci));
+
+	if (unlikely(!pci_dev)) {
+		ERROR("invalid priv pointer.\n");
+		return -ENODEV;
+	}
+	rt2x00pci->pci_dev = pci_dev;
+
+	rt2x00pci->rx.data_addr = NULL;
+	rt2x00pci->tx.data_addr = NULL;
+
+	rt2x00pci->csr_addr = ioremap(pci_resource_start(pci_dev, 0),
+				      pci_resource_len(pci_dev, 0));
+	if (!rt2x00pci->csr_addr) {
+		ERROR("ioremap failed.\n");
+		return -ENOMEM;
+	}
+
+	rt2x00_init_eeprom(rt2x00pci, &core->config);
+	rt2x00_dev_read_mac(rt2x00pci, core->rtnet_dev);
+
+	return 0;
+}
+
+int rt2x00_dev_remove(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	if (rt2x00pci->csr_addr) {
+		iounmap(rt2x00pci->csr_addr);
+		rt2x00pci->csr_addr = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * rt2x00_clear_ring
+ * During the initialization some of the descriptor variables are filled in.
+ * The default value of the owner variable is different between the types of the descriptor,
+ * DMA ring entries that receive packets are owned by the device untill a packet is received.
+ * DMA ring entries that are used to transmit a packet are owned by the module untill the device,
+ * for these rings the valid bit is set to 0 to indicate it is ready for use.
+ * should transmit the packet that particular DMA ring entry.
+ * The BUFFER_ADDRESS variable is used to link a descriptor to a packet data block.
+ */
+static void rt2x00_clear_ring(struct _rt2x00_pci *rt2x00pci,
+			      struct _data_ring *ring)
+{
+	struct _rxd *rxd = NULL;
+	struct _txd *txd = NULL;
+	dma_addr_t data_dma =
+		ring->data_dma + (ring->max_entries * ring->desc_size);
+	u8 counter = 0x00;
+
+	memset(ring->data_addr, 0x00, ring->mem_size);
+
+	for (; counter < ring->max_entries; counter++) {
+		if (ring->ring_type == RING_RX) {
+			rxd = (struct _rxd *)__DESC_ADDR(ring, counter);
+
+			rt2x00_set_field32(&rxd->word1, RXD_W1_BUFFER_ADDRESS,
+					   data_dma);
+			rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1);
+		} else {
+			txd = (struct _txd *)__DESC_ADDR(ring, counter);
+
+			rt2x00_set_field32(&txd->word1, TXD_W1_BUFFER_ADDRESS,
+					   data_dma);
+			rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 0);
+			rt2x00_set_field32(&txd->word0, TXD_W0_OWNER_NIC, 0);
+		}
+
+		data_dma += ring->entry_size;
+	}
+
+	rt2x00_ring_clear_index(ring);
+}
+
+/*
+ * rt2x00_init_ring_register
+ * The registers should be updated with the descriptor size and the
+ * number of entries of each ring.
+ * The address of the first entry of the descriptor ring is written to the register
+ * corresponding to the ring.
+ */
+static void rt2x00_init_ring_register(struct _rt2x00_pci *rt2x00pci)
+{
+	u32 reg = 0x00000000;
+
+	/* Initialize ring register for RX/TX */
+
+	rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00pci->tx.desc_size);
+	rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00pci->tx.max_entries);
+	rt2x00_register_write(rt2x00pci, TXCSR2, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
+			   rt2x00pci->tx.data_dma);
+	rt2x00_register_write(rt2x00pci, TXCSR3, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00pci->rx.desc_size);
+	rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00pci->rx.max_entries);
+	rt2x00_register_write(rt2x00pci, RXCSR1, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
+			   rt2x00pci->rx.data_dma);
+	rt2x00_register_write(rt2x00pci, RXCSR2, reg);
+}
+
+static int rt2x00_init_registers(struct _rt2x00_pci *rt2x00pci)
+{
+	u32 reg = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x3f3b3100));
+
+	rt2x00_register_write(rt2x00pci, PSCSR0, cpu_to_le32(0x00020002));
+	rt2x00_register_write(rt2x00pci, PSCSR1, cpu_to_le32(0x00000002));
+	rt2x00_register_write(rt2x00pci, PSCSR2, cpu_to_le32(0x00020002));
+	rt2x00_register_write(rt2x00pci, PSCSR3, cpu_to_le32(0x00000002));
+
+	rt2x00_register_read(rt2x00pci, TIMECSR, &reg);
+	rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33);
+	rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63);
+	rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0);
+	rt2x00_register_write(rt2x00pci, TIMECSR, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR9, &reg);
+	rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT,
+			   (rt2x00pci->rx.entry_size / 128));
+	rt2x00_register_write(rt2x00pci, CSR9, reg);
+
+	rt2x00_register_write(rt2x00pci, CNT3, cpu_to_le32(0x3f080000));
+
+	rt2x00_register_read(rt2x00pci, RXCSR0, &reg);
+	rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, 0);
+	rt2x00_register_write(rt2x00pci, RXCSR0, reg);
+
+	rt2x00_register_write(rt2x00pci, MACCSR0, cpu_to_le32(0x00213223));
+
+	rt2x00_register_read(rt2x00pci, MACCSR1, &reg);
+	rt2x00_set_field32(&reg, MACCSR1_AUTO_TXBBP, 1);
+	rt2x00_set_field32(&reg, MACCSR1_AUTO_RXBBP, 1);
+	rt2x00_register_write(rt2x00pci, MACCSR1, reg);
+
+	rt2x00_register_read(rt2x00pci, MACCSR2, &reg);
+	rt2x00_set_field32(&reg, MACCSR2_DELAY, 64);
+	rt2x00_register_write(rt2x00pci, MACCSR2, reg);
+
+	rt2x00_register_read(rt2x00pci, RXCSR3, &reg);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 47); /* Signal. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 51); /* Rssi. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID1_VALID, 1);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID2, 42); /* OFDM Rate. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID3, 51); /* OFDM. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID3_VALID, 1);
+	rt2x00_register_write(rt2x00pci, RXCSR3, reg);
+
+	rt2x00_register_read(rt2x00pci, RALINKCSR, &reg);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 26);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID0, 1);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 26);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID1, 1);
+	rt2x00_register_write(rt2x00pci, RALINKCSR, reg);
+
+	rt2x00_register_write(rt2x00pci, BBPCSR1, cpu_to_le32(0x82188200));
+
+	rt2x00_register_write(rt2x00pci, TXACKCSR0, cpu_to_le32(0x00000020));
+
+	rt2x00_register_write(rt2x00pci, ARTCSR0, cpu_to_le32(0x7038140a));
+	rt2x00_register_write(rt2x00pci, ARTCSR1, cpu_to_le32(0x1d21252d));
+	rt2x00_register_write(rt2x00pci, ARTCSR2, cpu_to_le32(0x1919191d));
+
+	/* disable Beacon timer */
+	rt2x00_register_write(rt2x00pci, CSR14, 0x0);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, 30);
+	rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, 70);
+	rt2x00_set_field32(&reg, LEDCSR_LINK, 0);
+	rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, 0);
+	rt2x00_register_write(rt2x00pci, LEDCSR, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1);
+	rt2x00_register_write(rt2x00pci, CSR1, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, CSR1_HOST_READY, 1);
+	rt2x00_register_write(rt2x00pci, CSR1, reg);
+
+	/*
+     * We must clear the FCS and FIFI error count.
+     * These registers are cleared on read, so we may pass a useless variable to store the value.
+     */
+	rt2x00_register_read(rt2x00pci, CNT0, &reg);
+	rt2x00_register_read(rt2x00pci, CNT4, &reg);
+
+	return 0;
+}
+
+static void rt2x00_init_write_mac(struct _rt2x00_pci *rt2x00pci,
+				  struct rtnet_device *rtnet_dev)
+{
+	u32 reg[2];
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	rt2x00_set_field32(&reg[0], CSR3_BYTE0, rtnet_dev->dev_addr[0]);
+	rt2x00_set_field32(&reg[0], CSR3_BYTE1, rtnet_dev->dev_addr[1]);
+	rt2x00_set_field32(&reg[0], CSR3_BYTE2, rtnet_dev->dev_addr[2]);
+	rt2x00_set_field32(&reg[0], CSR3_BYTE3, rtnet_dev->dev_addr[3]);
+	rt2x00_set_field32(&reg[1], CSR4_BYTE4, rtnet_dev->dev_addr[4]);
+	rt2x00_set_field32(&reg[1], CSR4_BYTE5, rtnet_dev->dev_addr[5]);
+
+	rt2x00_register_multiwrite(rt2x00pci, CSR3, &reg[0], sizeof(reg));
+}
+
+static int rt2x00_init_bbp(struct _rt2x00_pci *rt2x00pci)
+{
+	u8 reg_id = 0x00;
+	u8 value = 0x00;
+	u8 counter = 0x00;
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_bbp_regread(rt2x00pci, 0x00, &value);
+		if ((value != 0xff) && (value != 0x00))
+			goto continue_csr_init;
+		NOTICE("Waiting for BBP register.\n");
+	}
+
+	ERROR("hardware problem, BBP register access failed, aborting.\n");
+	return -EACCES;
+
+continue_csr_init:
+	rt2x00_bbp_regwrite(rt2x00pci, 3, 0x02);
+	rt2x00_bbp_regwrite(rt2x00pci, 4, 0x19);
+	rt2x00_bbp_regwrite(rt2x00pci, 14, 0x1c);
+	rt2x00_bbp_regwrite(rt2x00pci, 15, 0x30);
+	rt2x00_bbp_regwrite(rt2x00pci, 16, 0xac);
+	rt2x00_bbp_regwrite(rt2x00pci, 17, 0x48);
+	rt2x00_bbp_regwrite(rt2x00pci, 18, 0x18);
+	rt2x00_bbp_regwrite(rt2x00pci, 19, 0xff);
+	rt2x00_bbp_regwrite(rt2x00pci, 20, 0x1e);
+	rt2x00_bbp_regwrite(rt2x00pci, 21, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 22, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 23, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 24, 0x70);
+	rt2x00_bbp_regwrite(rt2x00pci, 25, 0x40);
+	rt2x00_bbp_regwrite(rt2x00pci, 26, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 27, 0x23);
+	rt2x00_bbp_regwrite(rt2x00pci, 30, 0x10);
+	rt2x00_bbp_regwrite(rt2x00pci, 31, 0x2b);
+	rt2x00_bbp_regwrite(rt2x00pci, 32, 0xb9);
+	rt2x00_bbp_regwrite(rt2x00pci, 34, 0x12);
+	rt2x00_bbp_regwrite(rt2x00pci, 35, 0x50);
+	rt2x00_bbp_regwrite(rt2x00pci, 39, 0xc4);
+	rt2x00_bbp_regwrite(rt2x00pci, 40, 0x02);
+	rt2x00_bbp_regwrite(rt2x00pci, 41, 0x60);
+	rt2x00_bbp_regwrite(rt2x00pci, 53, 0x10);
+	rt2x00_bbp_regwrite(rt2x00pci, 54, 0x18);
+	rt2x00_bbp_regwrite(rt2x00pci, 56, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 57, 0x10);
+	rt2x00_bbp_regwrite(rt2x00pci, 58, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 61, 0x6d);
+	rt2x00_bbp_regwrite(rt2x00pci, 62, 0x10);
+
+	DEBUG("Start reading EEPROM contents...\n");
+	for (counter = 0; counter < EEPROM_BBP_SIZE; counter++) {
+		if (rt2x00pci->eeprom[counter] != 0xffff &&
+		    rt2x00pci->eeprom[counter] != 0x0000) {
+			reg_id = rt2x00_get_field16(rt2x00pci->eeprom[counter],
+						    EEPROM_BBP_REG_ID);
+			value = rt2x00_get_field16(rt2x00pci->eeprom[counter],
+						   EEPROM_BBP_VALUE);
+			DEBUG("BBP reg_id: 0x%02x, value: 0x%02x.\n", reg_id,
+			      value);
+			rt2x00_bbp_regwrite(rt2x00pci, reg_id, value);
+		}
+	}
+	DEBUG("...End of EEPROM contents.\n");
+
+	return 0;
+}
+
+/*
+ * Device radio routines.
+ * When the radio is switched on or off, the TX and RX
+ * should always be reset using the TXCSR0 and RXCSR0 registers.
+ * The radio itself is switched on and off using the PWRCSR0 register.
+ */
+
+static int rt2x00_dev_radio_on(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	u32 reg = 0x00000000;
+	int retval;
+
+	if (rt2x00_pci_alloc_rings(core))
+		goto exit_fail;
+
+	rt2x00_clear_ring(rt2x00pci, &rt2x00pci->rx);
+	rt2x00_clear_ring(rt2x00pci, &rt2x00pci->tx);
+
+	rt2x00_init_ring_register(rt2x00pci);
+
+	if (rt2x00_init_registers(rt2x00pci))
+		goto exit_fail;
+
+	rt2x00_init_write_mac(rt2x00pci, core->rtnet_dev);
+
+	if (rt2x00_init_bbp(rt2x00pci))
+		goto exit_fail;
+
+	/*
+     * Clear interrupts.
+     */
+	rt2x00_register_read(rt2x00pci, CSR7, &reg);
+	rt2x00_register_write(rt2x00pci, CSR7, reg);
+
+	/* Register rtdm-irq */
+	retval = rtdm_irq_request(&rt2x00pci->irq_handle, core->rtnet_dev->irq,
+				  rt2x00_interrupt, 0, core->rtnet_dev->name,
+				  core->rtnet_dev);
+
+	/*
+     * Enable interrupts.
+     */
+	rt2x00_register_read(rt2x00pci, CSR8, &reg);
+	rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, 0);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+	rt2x00_set_field32(&reg, CSR8_RXDONE, 0);
+	rt2x00_register_write(rt2x00pci, CSR8, reg);
+
+	return 0;
+
+exit_fail:
+	rt2x00_pci_free_rings(core);
+
+	return -ENOMEM;
+}
+
+static int rt2x00_dev_radio_off(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	u32 reg = 0x00000000;
+	int retval = 0;
+
+	rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x00000000));
+
+	rt2x00_register_read(rt2x00pci, TXCSR0, &reg);
+	rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
+	rt2x00_register_write(rt2x00pci, TXCSR0, reg);
+
+	rt2x00_register_read(rt2x00pci, RXCSR0, &reg);
+	rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
+	rt2x00_register_write(rt2x00pci, RXCSR0, reg);
+
+	rt2x00_register_read(rt2x00pci, LEDCSR, &reg);
+	rt2x00_set_field32(&reg, LEDCSR_LINK, 0);
+	rt2x00_register_write(rt2x00pci, LEDCSR, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR8, &reg);
+	rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, 1);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 1);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 1);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 1);
+	rt2x00_set_field32(&reg, CSR8_RXDONE, 1);
+	rt2x00_register_write(rt2x00pci, CSR8, reg);
+
+	rt2x00_pci_free_rings(core);
+
+	if ((retval = rtdm_irq_free(&rt2x00pci->irq_handle)) != 0)
+		ERROR("rtdm_irq_free=%d\n", retval);
+
+	rt_stack_disconnect(core->rtnet_dev);
+
+	return retval;
+}
+
+/*
+ * Configuration handlers.
+ */
+
+static void rt2x00_dev_update_autoresp(struct _rt2x00_pci *rt2x00pci,
+				       struct _rt2x00_config *config)
+{
+	u32 reg = 0;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, TXCSR1, &reg);
+
+	if (config->config_flags & CONFIG_AUTORESP)
+		rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
+	else
+		rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 0);
+
+	rt2x00_register_write(rt2x00pci, TXCSR1, reg);
+}
+
+static void rt2x00_dev_update_bbpsens(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	rt2x00_bbp_regwrite(rt2x00pci, 0x11, config->bbpsens);
+}
+
+static void rt2x00_dev_update_bssid(struct _rt2x00_pci *rt2x00pci,
+				    struct _rt2x00_config *config)
+{
+	u32 reg[2];
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	rt2x00_set_field32(&reg[0], CSR5_BYTE0, config->bssid[0]);
+	rt2x00_set_field32(&reg[0], CSR5_BYTE1, config->bssid[1]);
+	rt2x00_set_field32(&reg[0], CSR5_BYTE2, config->bssid[2]);
+	rt2x00_set_field32(&reg[0], CSR5_BYTE3, config->bssid[3]);
+	rt2x00_set_field32(&reg[1], CSR6_BYTE4, config->bssid[4]);
+	rt2x00_set_field32(&reg[1], CSR6_BYTE5, config->bssid[5]);
+
+	rt2x00_register_multiwrite(rt2x00pci, CSR5, &reg[0], sizeof(reg));
+}
+
+static void rt2x00_dev_update_packet_filter(struct _rt2x00_pci *rt2x00pci,
+					    struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, RXCSR0, &reg);
+
+	rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, 0);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_CRC, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+
+	/*
+     * This looks like a bug, but for an unknown reason the register seems to swap the bits !!!
+     */
+	if (config->config_flags & CONFIG_DROP_BCAST)
+		rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, 1);
+	else
+		rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, 0);
+
+	if (config->config_flags & CONFIG_DROP_MCAST)
+		rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 1);
+	else
+		rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0);
+
+	rt2x00_register_write(rt2x00pci, RXCSR0, reg);
+}
+
+static void rt2x00_dev_update_channel(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	u8 txpower = rt2x00_get_txpower(&rt2x00pci->chip, config->txpower);
+	u32 reg = 0x00000000;
+
+	if (rt2x00_get_rf_value(&rt2x00pci->chip, config->channel,
+				&rt2x00pci->channel)) {
+		ERROR("RF values for chip %04x and channel %d not found.\n",
+		      rt2x00_get_rf(&rt2x00pci->chip), config->channel);
+		return;
+	}
+
+	/*
+     * Set TXpower.
+     */
+	rt2x00_set_field32(&rt2x00pci->channel.rf3, RF3_TXPOWER, txpower);
+
+	/*
+     * For RT2525 we should first set the channel to half band higher.
+     */
+	if (rt2x00_rf(&rt2x00pci->chip, RF2525)) {
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf1);
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf2 +
+						      cpu_to_le32(0x00000020));
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3);
+		if (rt2x00pci->channel.rf4)
+			rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf4);
+	}
+
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf1);
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf2);
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3);
+	if (rt2x00pci->channel.rf4)
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf4);
+
+	/*
+     * Channel 14 requires the Japan filter bit to be set.
+     */
+	rt2x00_bbp_regwrite(rt2x00pci, 70,
+			    (config->channel == 14) ? 0x4e : 0x46);
+
+	msleep(1);
+
+	/*
+     * Clear false CRC during channel switch.
+     */
+	rt2x00_register_read(rt2x00pci, CNT0, &reg);
+
+	DEBUG("Switching to channel %d. RF1: 0x%08x, RF2: 0x%08x, RF3: 0x%08x, RF4: 0x%08x.\n",
+	      config->channel, rt2x00pci->channel.rf1, rt2x00pci->channel.rf2,
+	      rt2x00pci->channel.rf3, rt2x00pci->channel.rf4);
+}
+
+static void rt2x00_dev_update_rate(struct _rt2x00_pci *rt2x00pci,
+				   struct _rt2x00_config *config)
+{
+	u32 value = 0x00000000;
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, TXCSR1, &reg);
+
+	value = config->sifs + (2 * config->slot_time) + config->plcp +
+		get_preamble(config) +
+		get_duration(ACK_SIZE, capabilities.bitrate[0]);
+	rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, value);
+
+	value = config->sifs + config->plcp + get_preamble(config) +
+		get_duration(ACK_SIZE, capabilities.bitrate[0]);
+	rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, value);
+
+	rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, 0x18);
+	rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
+
+	rt2x00_register_write(rt2x00pci, TXCSR1, reg);
+
+	reg = 0x00000000;
+	for (counter = 0; counter < 12; counter++) {
+		reg |= cpu_to_le32(0x00000001 << counter);
+		if (capabilities.bitrate[counter] == config->bitrate)
+			break;
+	}
+
+	rt2x00_register_write(rt2x00pci, ARCSR1, reg);
+}
+
+static void rt2x00_dev_update_txpower(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	u8 txpower = rt2x00_get_txpower(&rt2x00pci->chip, config->txpower);
+
+	DEBUG("Start.\n");
+
+	rt2x00_set_field32(&rt2x00pci->channel.rf3, RF3_TXPOWER, txpower);
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3);
+}
+
+static void rt2x00_dev_update_antenna(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	u32 reg;
+	u8 reg_rx;
+	u8 reg_tx;
+
+	rt2x00_register_read(rt2x00pci, BBPCSR1, &reg);
+	rt2x00_bbp_regread(rt2x00pci, 14, &reg_rx);
+	rt2x00_bbp_regread(rt2x00pci, 2, &reg_tx);
+
+	/* TX antenna select */
+	if (config->antenna_tx == 1) {
+		/* Antenna A */
+		reg_tx = (reg_tx & 0xfc) | 0x00;
+		reg = (reg & 0xfffcfffc) | 0x00;
+	} else if (config->antenna_tx == 2) {
+		/* Antenna B */
+		reg_tx = (reg_tx & 0xfc) | 0x02;
+		reg = (reg & 0xfffcfffc) | 0x00020002;
+	} else {
+		/* Diversity */
+		reg_tx = (reg_tx & 0xfc) | 0x02;
+		reg = (reg & 0xfffcfffc) | 0x00020002;
+	}
+
+	/* RX antenna select */
+	if (config->antenna_rx == 1)
+		reg_rx = (reg_rx & 0xfc) | 0x00;
+	else if (config->antenna_rx == 2)
+		reg_rx = (reg_rx & 0xfc) | 0x02;
+	else
+		reg_rx = (reg_rx & 0xfc) | 0x02;
+
+	/*
+     * RT2525E and RT5222 need to flip I/Q
+     */
+	if (rt2x00_rf(&rt2x00pci->chip, RF5222)) {
+		reg |= 0x00040004;
+		reg_tx |= 0x04;
+	} else if (rt2x00_rf(&rt2x00pci->chip, RF2525E)) {
+		reg |= 0x00040004;
+		reg_tx |= 0x04;
+		reg_rx |= 0xfb;
+	}
+
+	rt2x00_register_write(rt2x00pci, BBPCSR1, reg);
+	rt2x00_bbp_regwrite(rt2x00pci, 14, reg_rx);
+	rt2x00_bbp_regwrite(rt2x00pci, 2, reg_tx);
+}
+
+static void rt2x00_dev_update_duration(struct _rt2x00_pci *rt2x00pci,
+				       struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, CSR11, &reg);
+	rt2x00_set_field32(&reg, CSR11_CWMIN, 5); /* 2^5 = 32. */
+	rt2x00_set_field32(&reg, CSR11_CWMAX, 10); /* 2^10 = 1024. */
+	rt2x00_set_field32(&reg, CSR11_SLOT_TIME, config->slot_time);
+	rt2x00_set_field32(&reg, CSR11_CW_SELECT, 1);
+	rt2x00_register_write(rt2x00pci, CSR11, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR18, &reg);
+	rt2x00_set_field32(&reg, CSR18_SIFS, config->sifs);
+	rt2x00_set_field32(&reg, CSR18_PIFS, config->sifs + config->slot_time);
+	rt2x00_register_write(rt2x00pci, CSR18, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR19, &reg);
+	rt2x00_set_field32(&reg, CSR19_DIFS,
+			   config->sifs + (2 * config->slot_time));
+	rt2x00_set_field32(&reg, CSR19_EIFS,
+			   config->sifs +
+				   get_duration((IEEE80211_HEADER + ACK_SIZE),
+						capabilities.bitrate[0]));
+	rt2x00_register_write(rt2x00pci, CSR19, reg);
+}
+
+static void rt2x00_dev_update_retry(struct _rt2x00_pci *rt2x00pci,
+				    struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	rt2x00_register_read(rt2x00pci, CSR11, &reg);
+	rt2x00_set_field32(&reg, CSR11_LONG_RETRY, config->long_retry);
+	rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, config->short_retry);
+	rt2x00_register_write(rt2x00pci, CSR11, reg);
+}
+
+static void rt2x00_dev_update_preamble(struct _rt2x00_pci *rt2x00pci,
+				       struct _rt2x00_config *config)
+{
+	u32 reg[4];
+	u32 preamble = 0x00000000;
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	reg[0] = cpu_to_le32(0x00700400 | preamble); /* ARCSR2 */
+	reg[1] = cpu_to_le32(0x00380401 | preamble); /* ARCSR3 */
+	reg[2] = cpu_to_le32(0x00150402 | preamble); /* ARCSR4 */
+	reg[3] = cpu_to_le32(0x000b8403 | preamble); /* ARCSR5 */
+
+	rt2x00_register_multiwrite(rt2x00pci, ARCSR2, &reg[0], sizeof(reg));
+}
+
+static void rt2x00_dev_update_led(struct _rt2x00_pci *rt2x00pci,
+				  struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	rt2x00_register_read(rt2x00pci, LEDCSR, &reg);
+	rt2x00_set_field32(&reg, LEDCSR_LINK, config->led_status ? 1 : 0);
+	rt2x00_register_write(rt2x00pci, LEDCSR, reg);
+}
+
+static int rt2x00_dev_update_config(struct _rt2x00_core *core, u16 update_flags)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	DEBUG("Start.\n");
+
+	if (update_flags & UPDATE_BSSID)
+		rt2x00_dev_update_bssid(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_PACKET_FILTER)
+		rt2x00_dev_update_packet_filter(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_CHANNEL)
+		rt2x00_dev_update_channel(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_BITRATE)
+		rt2x00_dev_update_rate(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_TXPOWER)
+		rt2x00_dev_update_txpower(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_ANTENNA)
+		rt2x00_dev_update_antenna(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_DURATION)
+		rt2x00_dev_update_duration(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_RETRY)
+		rt2x00_dev_update_retry(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_PREAMBLE)
+		rt2x00_dev_update_preamble(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_LED_STATUS)
+		rt2x00_dev_update_led(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_AUTORESP)
+		rt2x00_dev_update_autoresp(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_BBPSENS)
+		rt2x00_dev_update_bbpsens(rt2x00pci, &core->config);
+
+	DEBUG("Exit.\n");
+
+	return 0;
+}
+
+/*
+ * Transmission routines.
+ * rt2x00_write_tx_desc will write the txd descriptor.
+ * rt2x00_dev_xmit_packet will copy the packets to the appropriate DMA ring.
+ */
+
+/*
+ * PLCP_SIGNAL, PLCP_SERVICE, PLCP_LENGTH_LOW and PLCP_LENGTH_HIGH are BBP registers.
+ * For RT2460 devices we need, besides the value we want to write,
+ * also set the busy bit (0x8000) and the register number (0x0f00).
+ * The value we want to write is stored in 0x00ff.
+ * For PLCP_SIGNAL we can optionally enable SHORT_PREAMBLE.
+ * For PLCP_SERVICE we can set the length extension bit according to
+ * 802.11b standard 18.2.3.5.
+ */
+static void rt2x00_write_tx_desc(struct _rt2x00_pci *rt2x00pci,
+				 struct _txd *txd, u32 packet_size, u16 rate,
+				 u16 xmit_flags)
+{
+	u32 residual = 0x00000000;
+	u32 duration = 0x00000000;
+	u16 signal = 0x0000;
+	u16 service = 0x0000;
+	u16 length_low = 0x0000;
+	u16 length_high = 0x0000;
+
+	rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 1);
+	rt2x00_set_field32(&txd->word0, TXD_W0_DATABYTE_COUNT, packet_size);
+	rt2x00_set_field32(&txd->word0, TXD_W0_ACK,
+			   (xmit_flags & XMIT_ACK) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_RETRY_MODE,
+			   (xmit_flags & XMIT_LONG_RETRY) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_TIMESTAMP,
+			   (xmit_flags & XMIT_TIMESTAMP) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_MORE_FRAG,
+			   (xmit_flags & XMIT_MORE_FRAGS) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_MORE_FRAG,
+			   (xmit_flags & XMIT_RTS) ? 1 : 0);
+	rt2x00_set_field32(&txd->word10, TXD_W10_RTS,
+			   (xmit_flags & XMIT_RTS) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_OFDM,
+			   (xmit_flags & XMIT_OFDM) ? 1 : 0);
+
+	packet_size += 4;
+
+	if (xmit_flags & XMIT_OFDM) {
+		/*
+	 * convert length to microseconds.
+	 */
+		length_high = (packet_size >> 6) & 0x3f;
+		length_low = (packet_size & 0x3f);
+	} else {
+		residual = get_duration_res(packet_size, rate);
+		duration = get_duration(packet_size, rate);
+
+		if (residual != 0)
+			duration++;
+
+		length_high = duration >> 8;
+		length_low = duration & 0xff;
+	}
+
+	signal |= 0x8500 | rt2x00_get_plcp(rate);
+	if (xmit_flags & XMIT_SHORT_PREAMBLE)
+		signal |= 0x0008;
+
+	service |= 0x0600 | 0x0004;
+	if (residual <= (8 % 11))
+		service |= 0x0080;
+
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_SIGNAL, signal);
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_SERVICE, service);
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_LENGTH_LOW, length_low);
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_LENGTH_HIGH, length_high);
+
+	/* set XMIT_IFS to XMIT_IFS_NONE */
+	rt2x00_set_field32(&txd->word0, TXD_W0_IFS, XMIT_IFS_NONE);
+
+	/* highest priority */
+	rt2x00_set_field32(&txd->word2, TXD_W2_CWMIN, 1);
+	rt2x00_set_field32(&txd->word2, TXD_W2_CWMAX, 2);
+	rt2x00_set_field32(&txd->word2, TXD_W2_AIFS, 1);
+
+	/*
+     * set this last, after this the device can start transmitting the packet.
+     */
+	rt2x00_set_field32(&txd->word0, TXD_W0_OWNER_NIC, 1);
+}
+
+static int rt2x00_dev_xmit_packet(struct _rt2x00_core *core,
+				  struct rtskb *rtskb, u16 rate, u16 xmit_flags)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	struct _data_ring *ring = NULL;
+	struct _txd *txd = NULL;
+	void *data = NULL;
+	u32 reg = 0x00000000;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rt2x00pci->lock, context);
+
+	/* load tx-control register */
+	rt2x00_register_read(rt2x00pci, TXCSR0, &reg);
+
+	/* select tx-descriptor ring and prepare xmit */
+	ring = &rt2x00pci->tx;
+	rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
+
+	txd = DESC_ADDR(ring);
+	data = DATA_ADDR(ring);
+
+	if (rt2x00_get_field32(txd->word0, TXD_W0_OWNER_NIC) ||
+	    rt2x00_get_field32(txd->word0, TXD_W0_VALID)) {
+		rtdm_lock_put_irqrestore(&rt2x00pci->lock, context);
+		return -ENOMEM;
+	}
+
+	/* get and patch time stamp just before the transmission */
+	if (rtskb->xmit_stamp)
+		*rtskb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp);
+
+	/* copy rtskb to dma */
+	memcpy(data, rtskb->data, rtskb->len);
+
+	rt2x00_write_tx_desc(rt2x00pci, txd, rtskb->len, rate, xmit_flags);
+	rt2x00_ring_index_inc(ring);
+
+	/* let the device do the rest ... */
+	rt2x00_register_write(rt2x00pci, TXCSR0, reg);
+
+	rtdm_lock_put_irqrestore(&rt2x00pci->lock, context);
+
+	return 0;
+}
+
+/*
+ * PCI device handlers for usage by core module.
+ */
+static struct _rt2x00_dev_handler rt2x00_pci_handler = {
+
+	.dev_module = THIS_MODULE,
+	.dev_probe = rt2x00_dev_probe,
+	.dev_remove = rt2x00_dev_remove,
+	.dev_radio_on = rt2x00_dev_radio_on,
+	.dev_radio_off = rt2x00_dev_radio_off,
+	.dev_update_config = rt2x00_dev_update_config,
+	.dev_register_access = rt2x00_dev_register_access,
+	.dev_xmit_packet = rt2x00_dev_xmit_packet,
+};
+
+int rt2x00_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+	struct rtnet_device *rtnet_dev = NULL;
+	int status = 0x00000000;
+
+	DEBUG("start.\n");
+
+	if (id->driver_data != RT2560) {
+		ERROR("detected device not supported.\n");
+		status = -ENODEV;
+		goto exit;
+	}
+
+	if (pci_enable_device(pci_dev)) {
+		ERROR("enable device failed.\n");
+		status = -EIO;
+		goto exit;
+	}
+
+	pci_set_master(pci_dev);
+
+	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)) &&
+	    pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+		ERROR("PCI DMA not supported\n");
+		status = -EIO;
+		goto exit_disable_device;
+	}
+
+	if (pci_request_regions(pci_dev, pci_name(pci_dev))) {
+		ERROR("PCI request regions failed.\n");
+		status = -EBUSY;
+		goto exit_disable_device;
+	}
+	INFO("pci_dev->irq=%d\n", pci_dev->irq);
+
+	rtnet_dev = rt2x00_core_probe(&rt2x00_pci_handler, pci_dev,
+				      sizeof(struct _rt2x00_pci));
+
+	if (!rtnet_dev) {
+		ERROR("rtnet_device allocation failed.\n");
+		status = -ENOMEM;
+		goto exit_release_regions;
+	}
+
+	rtnet_dev->irq = pci_dev->irq;
+
+	pci_set_drvdata(pci_dev, rtnet_dev);
+
+	return 0;
+
+exit_release_regions:
+	pci_release_regions(pci_dev);
+
+exit_disable_device:
+	if (status != -EBUSY)
+		pci_disable_device(pci_dev);
+
+exit:
+	return status;
+}
+
+static void rt2x00_pci_remove(struct pci_dev *pci_dev)
+{
+	struct rtnet_device *rtnet_dev = pci_get_drvdata(pci_dev);
+
+	rt2x00_core_remove(rtnet_dev);
+	pci_set_drvdata(pci_dev, NULL);
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+}
+
+/*
+ * RT2500 PCI module information.
+ */
+char version[] = DRV_NAME " - " DRV_VERSION;
+
+struct pci_device_id rt2x00_device_pci_tbl[] = {
+	{ PCI_DEVICE(0x1814, 0x0201),
+	  .driver_data = RT2560 }, /* Ralink 802.11g */
+	{
+		0,
+	}
+};
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("RTnet rt2500 PCI WLAN driver (PCI Module)");
+MODULE_LICENSE("GPL");
+
+struct pci_driver rt2x00_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = rt2x00_device_pci_tbl,
+	.probe = rt2x00_pci_probe,
+	.remove = rt2x00_pci_remove,
+};
+
+static int __init rt2x00_pci_init(void)
+{
+	rtdm_printk(KERN_INFO "Loading module: %s\n", version);
+	return pci_register_driver(&rt2x00_pci_driver);
+}
+
+static void __exit rt2x00_pci_exit(void)
+{
+	rtdm_printk(KERN_INFO "Unloading module: %s\n", version);
+	pci_unregister_driver(&rt2x00_pci_driver);
+}
+
+module_init(rt2x00_pci_init);
+module_exit(rt2x00_pci_exit);
+++ linux-patched/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h	2022-03-21 12:58:29.925883735 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h	1970-01-01 01:00:00.000000000 +0100
+/* rt2500pci.h
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *	                     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek 
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ *	Module: rt2500pci
+ * Abstract: Data structures and registers for the rt2500pci module.
+ * Supported chipsets: RT2560.
+ */
+
+#ifndef RT2500PCI_H
+#define RT2500PCI_H
+
+/*
+ * RT chip defines
+ */
+#define RT2560 0x0201
+
+/*
+ * RF chip defines
+ */
+#define RF2522 0x0200
+#define RF2523 0x0201
+#define RF2524 0x0202
+#define RF2525 0x0203
+#define RF2525E 0x0204
+#define RF5222 0x0210
+
+/*
+ * Control/Status Registers(CSR).
+ */
+#define CSR0 0x0000 /* ASIC revision number. */
+#define CSR1 0x0004 /* System control register. */
+#define CSR2 0x0008 /* System admin status register (invalid). */
+#define CSR3 0x000c /* STA MAC address register 0. */
+#define CSR4 0x0010 /* STA MAC address register 1. */
+#define CSR5 0x0014 /* BSSID register 0. */
+#define CSR6 0x0018 /* BSSID register 1. */
+#define CSR7 0x001c /* Interrupt source register. */
+#define CSR8 0x0020 /* Interrupt mask register. */
+#define CSR9 0x0024 /* Maximum frame length register. */
+#define SECCSR0 0x0028 /* WEP control register. */
+#define CSR11 0x002c /* Back-off control register. */
+#define CSR12 0x0030 /* Synchronization configuration register 0. */
+#define CSR13 0x0034 /* Synchronization configuration register 1. */
+#define CSR14 0x0038 /* Synchronization control register. */
+#define CSR15 0x003c /* Synchronization status register. */
+#define CSR16 0x0040 /* TSF timer register 0. */
+#define CSR17 0x0044 /* TSF timer register 1. */
+#define CSR18 0x0048 /* IFS timer register 0. */
+#define CSR19 0x004c /* IFS timer register 1. */
+#define CSR20 0x0050 /* WakeUp register. */
+#define CSR21 0x0054 /* EEPROM control register. */
+#define CSR22 0x0058 /* CFP Control Register. */
+
+/*
+ * Transmit related CSRs.
+ */
+#define TXCSR0 0x0060 /* TX control register. */
+#define TXCSR1 0x0064 /* TX configuration register. */
+#define TXCSR2 0x0068 /* TX descriptor configuratioon register. */
+#define TXCSR3 0x006c /* TX Ring Base address register. */
+#define TXCSR4 0x0070 /* TX Atim Ring Base address register. */
+#define TXCSR5 0x0074 /* TX Prio Ring Base address register. */
+#define TXCSR6 0x0078 /* Beacon base address. */
+#define TXCSR7 0x007c /* AutoResponder Control Register. */
+#define TXCSR8 0x0098 /* CCK TX BBP registers. */
+#define TXCSR9 0x0094 /* OFDM TX BBP registers. */
+
+/*
+ * Receive related CSRs.
+ */
+#define RXCSR0 0x0080 /* RX control register. */
+#define RXCSR1 0x0084 /* RX descriptor configuration register. */
+#define RXCSR2 0x0088 /* RX Ring base address register. */
+#define RXCSR3 0x0090 /* BBP ID register 0 */
+#define ARCSR1 0x009c /* Auto Responder PLCP config register 1. */
+
+/*
+ * PCI control CSRs.
+ */
+#define PCICSR 0x008c /* PCI control register. */
+
+/*
+ * Statistic Register.
+ */
+#define CNT0 0x00a0 /* FCS error count. */
+#define TIMECSR2 0x00a8
+#define CNT1 0x00ac /* PLCP error count. */
+#define CNT2 0x00b0 /* long error count. */
+#define TIMECSR3 0x00b4
+#define CNT3 0x00b8 /* CCA false alarm count. */
+#define CNT4 0x00bc /* Rx FIFO overflow count. */
+#define CNT5 0x00c0 /* Tx FIFO underrun count. */
+
+/*
+ * Baseband Control Register.
+ */
+#define PWRCSR0 0x00c4 /* Power mode configuration. */
+#define PSCSR0 0x00c8 /* Power state transition time. */
+#define PSCSR1 0x00cc /* Power state transition time. */
+#define PSCSR2 0x00d0 /* Power state transition time. */
+#define PSCSR3 0x00d4 /* Power state transition time. */
+#define PWRCSR1 0x00d8 /* Manual power control / status. */
+#define TIMECSR 0x00dc /* Timer control. */
+#define MACCSR0 0x00e0 /* MAC configuration. */
+#define MACCSR1 0x00e4 /* MAC configuration. */
+#define RALINKCSR 0x00e8 /* Ralink Auto-reset register. */
+#define BCNCSR 0x00ec /* Beacon interval control register. */
+
+/*
+ * BBP / RF / IF Control Register.
+ */
+#define BBPCSR 0x00f0 /* BBP serial control. */
+#define RFCSR 0x00f4 /* RF serial control. */
+#define LEDCSR 0x00f8 /* LED control register */
+
+#define SECCSR3 0x00fc /* AES control register. */
+
+/*
+ * ASIC pointer information.
+ */
+#define RXPTR 0x0100 /* Current RX ring address. */
+#define TXPTR 0x0104 /* Current Tx ring address. */
+#define PRIPTR 0x0108 /* Current Priority ring address. */
+#define ATIMPTR 0x010c /* Current ATIM ring address. */
+
+#define TXACKCSR0 0x0110 /* TX ACK timeout. */
+#define ACKCNT0 0x0114 /* TX ACK timeout count. */
+#define ACKCNT1 0x0118 /* RX ACK timeout count. */
+
+/*
+ * GPIO and others.
+ */
+#define GPIOCSR 0x0120 /* GPIO. */
+#define FIFOCSR0 0x0128 /* TX FIFO pointer. */
+#define FIFOCSR1 0x012c /* RX FIFO pointer. */
+#define BCNCSR1 0x0130 /* Tx BEACON offset time, unit: 1 usec. */
+#define MACCSR2 0x0134 /* TX_PE to RX_PE delay time, unit: 1 PCI clock cycle. */
+#define TESTCSR 0x0138 /* TEST mode selection register. */
+#define ARCSR2 0x013c /* 1 Mbps ACK/CTS PLCP. */
+#define ARCSR3 0x0140 /* 2 Mbps ACK/CTS PLCP. */
+#define ARCSR4 0x0144 /* 5.5 Mbps ACK/CTS PLCP. */
+#define ARCSR5 0x0148 /* 11 Mbps ACK/CTS PLCP. */
+#define ARTCSR0 0x014c /* ACK/CTS payload consumed time for 1/2/5.5/11 mbps. */
+#define ARTCSR1                                                                \
+	0x0150 /* OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. */
+#define ARTCSR2                                                                \
+	0x0154 /* OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. */
+#define SECCSR1 0x0158 /* WEP control register. */
+#define BBPCSR1 0x015c /* BBP TX configuration. */
+#define DBANDCSR0 0x0160 /* Dual band configuration register 0. */
+#define DBANDCSR1 0x0164 /* Dual band configuration register 1. */
+#define BBPPCSR 0x0168 /* BBP Pin control register. */
+#define DBGSEL0 0x016c /* MAC special debug mode selection register 0. */
+#define DBGSEL1 0x0170 /* MAC special debug mode selection register 1. */
+#define BISTCSR 0x0174 /* BBP BIST register. */
+#define MCAST0 0x0178 /* multicast filter register 0. */
+#define MCAST1 0x017c /* multicast filter register 1. */
+#define UARTCSR0 0x0180 /* UART1 TX register. */
+#define UARTCSR1 0x0184 /* UART1 RX register. */
+#define UARTCSR3 0x0188 /* UART1 frame control register. */
+#define UARTCSR4 0x018c /* UART1 buffer control register. */
+#define UART2CSR0 0x0190 /* UART2 TX register. */
+#define UART2CSR1 0x0194 /* UART2 RX register. */
+#define UART2CSR3 0x0198 /* UART2 frame control register. */
+#define UART2CSR4 0x019c /* UART2 buffer control register. */
+
+/*
+ * EEPROM addresses
+ */
+#define EEPROM_ANTENNA 0x10
+#define EEPROM_GEOGRAPHY 0x12
+#define EEPROM_BBP_START 0x13
+#define EEPROM_BBP_END 0x22
+
+#define EEPROM_BBP_SIZE 16
+
+/*
+ * CSR Registers.
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * CSR1: System control register.
+ */
+#define CSR1_SOFT_RESET                                                        \
+	FIELD32(0, 0x00000001) /* Software reset, 1: reset, 0: normal. */
+#define CSR1_BBP_RESET                                                         \
+	FIELD32(1, 0x00000002) /* Hardware reset, 1: reset, 0, release. */
+#define CSR1_HOST_READY                                                        \
+	FIELD32(2, 0x00000004) /* Host ready after initialization. */
+
+/*
+ * CSR3: STA MAC address register 0.
+ */
+#define CSR3_BYTE0 FIELD32(0, 0x000000ff) /* MAC address byte 0. */
+#define CSR3_BYTE1 FIELD32(8, 0x0000ff00) /* MAC address byte 1. */
+#define CSR3_BYTE2 FIELD32(16, 0x00ff0000) /* MAC address byte 2. */
+#define CSR3_BYTE3 FIELD32(24, 0xff000000) /* MAC address byte 3. */
+
+/*
+ * CSR4: STA MAC address register 1.
+ */
+#define CSR4_BYTE4 FIELD32(0, 0x000000ff) /* MAC address byte 4. */
+#define CSR4_BYTE5 FIELD32(8, 0x0000ff00) /* MAC address byte 5. */
+
+/*
+ * CSR5: BSSID register 0.
+ */
+#define CSR5_BYTE0 FIELD32(0, 0x000000ff) /* BSSID address byte 0. */
+#define CSR5_BYTE1 FIELD32(8, 0x0000ff00) /* BSSID address byte 1. */
+#define CSR5_BYTE2 FIELD32(16, 0x00ff0000) /* BSSID address byte 2. */
+#define CSR5_BYTE3 FIELD32(24, 0xff000000) /* BSSID address byte 3. */
+
+/*
+ * CSR6: BSSID register 1.
+ */
+#define CSR6_BYTE4 FIELD32(0, 0x000000ff) /* BSSID address byte 4. */
+#define CSR6_BYTE5 FIELD32(8, 0x0000ff00) /* BSSID address byte 5. */
+
+/*
+ * CSR7: Interrupt source register.
+ * Write 1 to clear.
+ */
+#define CSR7_TBCN_EXPIRE                                                       \
+	FIELD32(0, 0x00000001) /* beacon timer expired interrupt. */
+#define CSR7_TWAKE_EXPIRE                                                      \
+	FIELD32(1, 0x00000002) /* wakeup timer expired interrupt. */
+#define CSR7_TATIMW_EXPIRE                                                     \
+	FIELD32(2, 0x00000004) /* timer of atim window expired interrupt. */
+#define CSR7_TXDONE_TXRING                                                     \
+	FIELD32(3, 0x00000008) /* tx ring transmit done interrupt. */
+#define CSR7_TXDONE_ATIMRING                                                   \
+	FIELD32(4, 0x00000010) /* atim ring transmit done interrupt. */
+#define CSR7_TXDONE_PRIORING                                                   \
+	FIELD32(5, 0x00000020) /* priority ring transmit done interrupt. */
+#define CSR7_RXDONE FIELD32(6, 0x00000040) /* receive done interrupt. */
+#define CSR7_DECRYPTION_DONE                                                   \
+	FIELD32(7, 0x00000080) /* Decryption done interrupt. */
+#define CSR7_ENCRYPTION_DONE                                                   \
+	FIELD32(8, 0x00000100) /* Encryption done interrupt. */
+#define CSR7_UART1_TX_TRESHOLD                                                 \
+	FIELD32(9, 0x00000200) /* UART1 TX reaches threshold. */
+#define CSR7_UART1_RX_TRESHOLD                                                 \
+	FIELD32(10, 0x00000400) /* UART1 RX reaches threshold. */
+#define CSR7_UART1_IDLE_TRESHOLD                                               \
+	FIELD32(11, 0x00000800) /* UART1 IDLE over threshold. */
+#define CSR7_UART1_TX_BUFF_ERROR                                               \
+	FIELD32(12, 0x00001000) /* UART1 TX buffer error. */
+#define CSR7_UART1_RX_BUFF_ERROR                                               \
+	FIELD32(13, 0x00002000) /* UART1 RX buffer error. */
+#define CSR7_UART2_TX_TRESHOLD                                                 \
+	FIELD32(14, 0x00004000) /* UART2 TX reaches threshold. */
+#define CSR7_UART2_RX_TRESHOLD                                                 \
+	FIELD32(15, 0x00008000) /* UART2 RX reaches threshold. */
+#define CSR7_UART2_IDLE_TRESHOLD                                               \
+	FIELD32(16, 0x00010000) /* UART2 IDLE over threshold. */
+#define CSR7_UART2_TX_BUFF_ERROR                                               \
+	FIELD32(17, 0x00020000) /* UART2 TX buffer error. */
+#define CSR7_UART2_RX_BUFF_ERROR                                               \
+	FIELD32(18, 0x00040000) /* UART2 RX buffer error. */
+#define CSR7_TIMER_CSR3_EXPIRE                                                 \
+	FIELD32(19,                                                            \
+		0x00080000) /* TIMECSR3 timer expired (802.1H quiet period). */
+
+/*
+ * CSR8: Interrupt mask register.
+ * Write 1 to mask interrupt.
+ */
+#define CSR8_TBCN_EXPIRE                                                       \
+	FIELD32(0, 0x00000001) /* beacon timer expired interrupt. */
+#define CSR8_TWAKE_EXPIRE                                                      \
+	FIELD32(1, 0x00000002) /* wakeup timer expired interrupt. */
+#define CSR8_TATIMW_EXPIRE                                                     \
+	FIELD32(2, 0x00000004) /* timer of atim window expired interrupt. */
+#define CSR8_TXDONE_TXRING                                                     \
+	FIELD32(3, 0x00000008) /* tx ring transmit done interrupt. */
+#define CSR8_TXDONE_ATIMRING                                                   \
+	FIELD32(4, 0x00000010) /* atim ring transmit done interrupt. */
+#define CSR8_TXDONE_PRIORING                                                   \
+	FIELD32(5, 0x00000020) /* priority ring transmit done interrupt. */
+#define CSR8_RXDONE FIELD32(6, 0x00000040) /* receive done interrupt. */
+#define CSR8_DECRYPTION_DONE                                                   \
+	FIELD32(7, 0x00000080) /* Decryption done interrupt. */
+#define CSR8_ENCRYPTION_DONE                                                   \
+	FIELD32(8, 0x00000100) /* Encryption done interrupt. */
+#define CSR8_UART1_TX_TRESHOLD                                                 \
+	FIELD32(9, 0x00000200) /* UART1 TX reaches threshold. */
+#define CSR8_UART1_RX_TRESHOLD                                                 \
+	FIELD32(10, 0x00000400) /* UART1 RX reaches threshold. */
+#define CSR8_UART1_IDLE_TRESHOLD                                               \
+	FIELD32(11, 0x00000800) /* UART1 IDLE over threshold. */
+#define CSR8_UART1_TX_BUFF_ERROR                                               \
+	FIELD32(12, 0x00001000) /* UART1 TX buffer error. */
+#define CSR8_UART1_RX_BUFF_ERROR                                               \
+	FIELD32(13, 0x00002000) /* UART1 RX buffer error. */
+#define CSR8_UART2_TX_TRESHOLD                                                 \
+	FIELD32(14, 0x00004000) /* UART2 TX reaches threshold. */
+#define CSR8_UART2_RX_TRESHOLD                                                 \
+	FIELD32(15, 0x00008000) /* UART2 RX reaches threshold. */
+#define CSR8_UART2_IDLE_TRESHOLD                                               \
+	FIELD32(16, 0x00010000) /* UART2 IDLE over threshold. */
+#define CSR8_UART2_TX_BUFF_ERROR                                               \
+	FIELD32(17, 0x00020000) /* UART2 TX buffer error. */
+#define CSR8_UART2_RX_BUFF_ERROR                                               \
+	FIELD32(18, 0x00040000) /* UART2 RX buffer error. */
+#define CSR8_TIMER_CSR3_EXPIRE                                                 \
+	FIELD32(19,                                                            \
+		0x00080000) /* TIMECSR3 timer expired (802.1H quiet period). */
+
+/*
+ * CSR9: Maximum frame length register.
+ */
+#define CSR9_MAX_FRAME_UNIT                                                    \
+	FIELD32(7,                                                             \
+		0x00000f80) /* maximum frame length in 128b unit, default: 12. */
+
+/*
+ * SECCSR0: WEP control register.
+ */
+#define SECCSR0_KICK_DECRYPT                                                   \
+	FIELD32(0, 0x00000001) /* Kick decryption engine, self-clear. */
+#define SECCSR0_ONE_SHOT                                                       \
+	FIELD32(1, 0x00000002) /* 0: ring mode, 1: One shot only mode. */
+#define SECCSR0_DESC_ADDRESS                                                   \
+	FIELD32(2, 0xfffffffc) /* Descriptor physical address of frame. */
+
+/*
+ * CSR11: Back-off control register.
+ */
+#define CSR11_CWMIN                                                            \
+	FIELD32(0, 0x0000000f) /* CWmin. Default cwmin is 31 (2^5 - 1). */
+#define CSR11_CWMAX                                                            \
+	FIELD32(4, 0x000000f0) /* CWmax. Default cwmax is 1023 (2^10 - 1). */
+#define CSR11_SLOT_TIME                                                        \
+	FIELD32(8, 0x00001f00) /* slot time, default is 20us for 802.11b */
+#define CSR11_CW_SELECT                                                        \
+	FIELD32(13,                                                            \
+		0x00002000) /* CWmin/CWmax selection, 1: Register, 0: TXD. */
+#define CSR11_LONG_RETRY FIELD32(16, 0x00ff0000) /* long retry count. */
+#define CSR11_SHORT_RETRY FIELD32(24, 0xff000000) /* short retry count. */
+
+/*
+ * CSR12: Synchronization configuration register 0.
+ * All units in 1/16 TU.
+ */
+#define CSR12_BEACON_INTERVAL                                                  \
+	FIELD32(0, 0x0000ffff) /* beacon interval, default is 100 TU. */
+#define CSR12_CFPMAX_DURATION                                                  \
+	FIELD32(16, 0xffff0000) /* cfp maximum duration, default is 100 TU. */
+
+/*
+ * CSR13: Synchronization configuration register 1.
+ * All units in 1/16 TU.
+ */
+#define CSR13_ATIMW_DURATION FIELD32(0, 0x0000ffff) /* atim window duration. */
+#define CSR13_CFP_PERIOD                                                       \
+	FIELD32(16, 0x00ff0000) /* cfp period, default is 0 TU. */
+
+/*
+ * CSR14: Synchronization control register.
+ */
+#define CSR14_TSF_COUNT FIELD32(0, 0x00000001) /* enable tsf auto counting. */
+#define CSR14_TSF_SYNC                                                         \
+	FIELD32(1,                                                             \
+		0x00000006) /* tsf sync, 0: disable, 1: infra, 2: ad-hoc mode. */
+#define CSR14_TBCN FIELD32(3, 0x00000008) /* enable tbcn with reload value. */
+#define CSR14_TCFP                                                             \
+	FIELD32(4, 0x00000010) /* enable tcfp & cfp / cp switching. */
+#define CSR14_TATIMW                                                           \
+	FIELD32(5, 0x00000020) /* enable tatimw & atim window switching. */
+#define CSR14_BEACON_GEN FIELD32(6, 0x00000040) /* enable beacon generator. */
+#define CSR14_CFP_COUNT_PRELOAD                                                \
+	FIELD32(8, 0x0000ff00) /* cfp count preload value. */
+#define CSR14_TBCM_PRELOAD                                                     \
+	FIELD32(16, 0xffff0000) /* tbcn preload value in units of 64us. */
+
+/*
+ * CSR15: Synchronization status register.
+ */
+#define CSR15_CFP                                                              \
+	FIELD32(0, 0x00000001) /* ASIC is in contention-free period. */
+#define CSR15_ATIMW FIELD32(1, 0x00000002) /* ASIC is in ATIM window. */
+#define CSR15_BEACON_SENT FIELD32(2, 0x00000004) /* Beacon is send. */
+
+/*
+ * CSR16: TSF timer register 0.
+ */
+#define CSR16_LOW_TSFTIMER FIELD32(0, 0xffffffff)
+
+/*
+ * CSR17: TSF timer register 1.
+ */
+#define CSR17_HIGH_TSFTIMER FIELD32(0, 0xffffffff)
+
+/*
+ * CSR18: IFS timer register 0.
+ */
+#define CSR18_SIFS FIELD32(0, 0x000001ff) /* sifs, default is 10 us. */
+#define CSR18_PIFS FIELD32(16, 0x01f00000) /* pifs, default is 30 us. */
+
+/*
+ * CSR19: IFS timer register 1.
+ */
+#define CSR19_DIFS FIELD32(0, 0x0000ffff) /* difs, default is 50 us. */
+#define CSR19_EIFS FIELD32(16, 0xffff0000) /* eifs, default is 364 us. */
+
+/*
+ * CSR20: Wakeup timer register.
+ */
+#define CSR20_DELAY_AFTER_TBCN                                                 \
+	FIELD32(0,                                                             \
+		0x0000ffff) /* delay after tbcn expired in units of 1/16 TU. */
+#define CSR20_TBCN_BEFORE_WAKEUP                                               \
+	FIELD32(16, 0x00ff0000) /* number of beacon before wakeup. */
+#define CSR20_AUTOWAKE                                                         \
+	FIELD32(24, 0x01000000) /* enable auto wakeup / sleep mechanism. */
+
+/*
+ * CSR21: EEPROM control register.
+ */
+#define CSR21_RELOAD                                                           \
+	FIELD32(0, 0x00000001) /* Write 1 to reload eeprom content. */
+#define CSR21_EEPROM_DATA_CLOCK FIELD32(1, 0x00000002)
+#define CSR21_EEPROM_CHIP_SELECT FIELD32(2, 0x00000004)
+#define CSR21_EEPROM_DATA_IN FIELD32(3, 0x00000008)
+#define CSR21_EEPROM_DATA_OUT FIELD32(4, 0x00000010)
+#define CSR21_TYPE_93C46 FIELD32(5, 0x00000020) /* 1: 93c46, 0:93c66. */
+
+/*
+ * CSR22: CFP control register.
+ */
+#define CSR22_CFP_DURATION_REMAIN                                              \
+	FIELD32(0, 0x0000ffff) /* cfp duration remain, in units of TU. */
+#define CSR22_RELOAD_CFP_DURATION                                              \
+	FIELD32(16, 0x00010000) /* Write 1 to reload cfp duration remain. */
+
+/*
+ * TX / RX Registers.
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * TXCSR0: TX Control Register.
+ */
+#define TXCSR0_KICK_TX FIELD32(0, 0x00000001) /* kick tx ring. */
+#define TXCSR0_KICK_ATIM FIELD32(1, 0x00000002) /* kick atim ring. */
+#define TXCSR0_KICK_PRIO FIELD32(2, 0x00000004) /* kick priority ring. */
+#define TXCSR0_ABORT                                                           \
+	FIELD32(3, 0x00000008) /* abort all transmit related ring operation. */
+
+/*
+ * TXCSR1: TX Configuration Register.
+ */
+#define TXCSR1_ACK_TIMEOUT                                                     \
+	FIELD32(0,                                                             \
+		0x000001ff) /* ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. */
+#define TXCSR1_ACK_CONSUME_TIME                                                \
+	FIELD32(9,                                                             \
+		0x0003fe00) /* ack consume time, default = sifs + acktime @ 1mbps. */
+#define TXCSR1_TSF_OFFSET FIELD32(18, 0x00fc0000) /* insert tsf offset. */
+#define TXCSR1_AUTORESPONDER                                                   \
+	FIELD32(24,                                                            \
+		0x01000000) /* enable auto responder which include ack & cts. */
+
+/*
+ * TXCSR2: Tx descriptor configuration register.
+ */
+#define TXCSR2_TXD_SIZE                                                        \
+	FIELD32(0, 0x000000ff) /* tx descriptor size, default is 48. */
+#define TXCSR2_NUM_TXD FIELD32(8, 0x0000ff00) /* number of txd in ring. */
+#define TXCSR2_NUM_ATIM FIELD32(16, 0x00ff0000) /* number of atim in ring. */
+#define TXCSR2_NUM_PRIO                                                        \
+	FIELD32(24, 0xff000000) /* number of priority in ring. */
+
+/*
+ * TXCSR3: TX Ring Base address register.
+ */
+#define TXCSR3_TX_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR4: TX Atim Ring Base address register.
+ */
+#define TXCSR4_ATIM_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR5: TX Prio Ring Base address register.
+ */
+#define TXCSR5_PRIO_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR6: Beacon Base address register.
+ */
+#define TXCSR6_BEACON_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR7: Auto responder control register.
+ */
+#define TXCSR7_AR_POWERMANAGEMENT                                              \
+	FIELD32(0, 0x00000001) /* auto responder power management bit. */
+
+/*
+ * TXCSR8: CCK Tx BBP register.
+ */
+#define TXCSR8_CCK_SIGNAL                                                      \
+	FIELD32(0, 0x000000ff) /* BBP rate field address for CCK. */
+#define TXCSR8_CCK_SERVICE                                                     \
+	FIELD32(8, 0x0000ff00) /* BBP service field address for CCK. */
+#define TXCSR8_CCK_LENGTH_LOW                                                  \
+	FIELD32(16, 0x00ff0000) /* BBP length low byte address for CCK. */
+#define TXCSR8_CCK_LENGTH_HIGH                                                 \
+	FIELD32(24, 0xff000000) /* BBP length high byte address for CCK. */
+
+/* 
+ * TXCSR9: OFDM TX BBP registers
+ */
+#define TXCSR9_OFDM_RATE                                                       \
+	FIELD32(0, 0x000000ff) /* BBP rate field address for OFDM. */
+#define TXCSR9_OFDM_SERVICE                                                    \
+	FIELD32(8, 0x0000ff00) /* BBP service field address for OFDM. */
+#define TXCSR9_OFDM_LENGTH_LOW                                                 \
+	FIELD32(16, 0x00ff0000) /* BBP length low byte address for OFDM. */
+#define TXCSR9_OFDM_LENGTH_HIGH                                                \
+	FIELD32(24, 0xff000000) /* BBP length high byte address for OFDM. */
+
+/*
+ * RXCSR0: RX Control Register.
+ */
+#define RXCSR0_DISABLE_RX FIELD32(0, 0x00000001) /* disable rx engine. */
+#define RXCSR0_DROP_CRC FIELD32(1, 0x00000002) /* drop crc error. */
+#define RXCSR0_DROP_PHYSICAL FIELD32(2, 0x00000004) /* drop physical error. */
+#define RXCSR0_DROP_CONTROL FIELD32(3, 0x00000008) /* drop control frame. */
+#define RXCSR0_DROP_NOT_TO_ME                                                  \
+	FIELD32(4, 0x00000010) /* drop not to me unicast frame. */
+#define RXCSR0_DROP_TODS                                                       \
+	FIELD32(5, 0x00000020) /* drop frame tods bit is true. */
+#define RXCSR0_DROP_VERSION_ERROR                                              \
+	FIELD32(6, 0x00000040) /* drop version error frame. */
+#define RXCSR0_PASS_CRC                                                        \
+	FIELD32(7, 0x00000080) /* pass all packets with crc attached. */
+#define RXCSR0_PASS_PLCP                                                       \
+	FIELD32(8,                                                             \
+		0x00000100) /* Pass all packets with 4 bytes PLCP attached. */
+#define RXCSR0_DROP_MCAST FIELD32(9, 0x00000200) /* Drop multicast frames. */
+#define RXCSR0_DROP_BCAST FIELD32(10, 0x00000400) /* Drop broadcast frames. */
+#define RXCSR0_ENABLE_QOS                                                      \
+	FIELD32(11, 0x00000800) /* Accept QOS data frame and parse QOS field. */
+
+/*
+ * RXCSR1: RX descriptor configuration register.
+ */
+#define RXCSR1_RXD_SIZE                                                        \
+	FIELD32(0, 0x000000ff) /* rx descriptor size, default is 32b. */
+#define RXCSR1_NUM_RXD FIELD32(8, 0x0000ff00) /* number of rxd in ring. */
+
+/*
+ * RXCSR2: RX Ring base address register.
+ */
+#define RXCSR2_RX_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * RXCSR3: BBP ID register for Rx operation.
+ */
+#define RXCSR3_BBP_ID0 FIELD32(0, 0x0000007f) /* bbp register 0 id. */
+#define RXCSR3_BBP_ID0_VALID                                                   \
+	FIELD32(7, 0x00000080) /* bbp register 0 id is valid or not. */
+#define RXCSR3_BBP_ID1 FIELD32(8, 0x00007f00) /* bbp register 1 id. */
+#define RXCSR3_BBP_ID1_VALID                                                   \
+	FIELD32(15, 0x00008000) /* bbp register 1 id is valid or not. */
+#define RXCSR3_BBP_ID2 FIELD32(16, 0x007f0000) /* bbp register 2 id. */
+#define RXCSR3_BBP_ID2_VALID                                                   \
+	FIELD32(23, 0x00800000) /* bbp register 2 id is valid or not. */
+#define RXCSR3_BBP_ID3 FIELD32(24, 0x7f000000) /* bbp register 3 id. */
+#define RXCSR3_BBP_ID3_VALID                                                   \
+	FIELD32(31, 0x80000000) /* bbp register 3 id is valid or not. */
+
+/*
+ * ARCSR1: Auto Responder PLCP config register 1.
+ */
+#define ARCSR1_AR_BBP_DATA2                                                    \
+	FIELD32(0, 0x000000ff) /* Auto responder BBP register 2 data. */
+#define ARCSR1_AR_BBP_ID2                                                      \
+	FIELD32(8, 0x0000ff00) /* Auto responder BBP register 2 Id. */
+#define ARCSR1_AR_BBP_DATA3                                                    \
+	FIELD32(16, 0x00ff0000) /* Auto responder BBP register 3 data. */
+#define ARCSR1_AR_BBP_ID3                                                      \
+	FIELD32(24, 0xff000000) /* Auto responder BBP register 3 Id. */
+
+/*
+ * Miscellaneous Registers.
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * PCISR: PCI control register.
+ */
+#define PCICSR_BIG_ENDIAN                                                      \
+	FIELD32(0, 0x00000001) /* 1: big endian, 0: little endian. */
+#define PCICSR_RX_TRESHOLD                                                     \
+	FIELD32(1, 0x00000006) /* rx threshold in dw to start pci access */
+/* 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. */
+#define PCICSR_TX_TRESHOLD                                                     \
+	FIELD32(3, 0x00000018) /* tx threshold in dw to start pci access */
+/* 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. */
+#define PCICSR_BURST_LENTH FIELD32(5, 0x00000060) /* pci burst length */
+/* 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. */
+#define PCICSR_ENABLE_CLK FIELD32(7, 0x00000080) /* enable clk_run, */
+/* pci clock can't going down to non-operational. */
+#define PCICSR_READ_MULTIPLE                                                   \
+	FIELD32(8, 0x00000100) /* Enable memory read multiple. */
+#define PCICSR_WRITE_INVALID                                                   \
+	FIELD32(9, 0x00000200) /* Enable memory write & invalid. */
+
+/*
+ * PWRCSR1: Manual power control / status register.
+ * state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake.
+ */
+#define PWRCSR1_SET_STATE                                                      \
+	FIELD32(0,                                                             \
+		0x00000001) /* set state. Write 1 to trigger, self cleared. */
+#define PWRCSR1_BBP_DESIRE_STATE FIELD32(1, 0x00000006) /* BBP desired state. */
+#define PWRCSR1_RF_DESIRE_STATE FIELD32(3, 0x00000018) /* RF desired state. */
+#define PWRCSR1_BBP_CURR_STATE FIELD32(5, 0x00000060) /* BBP current state. */
+#define PWRCSR1_RF_CURR_STATE FIELD32(7, 0x00000180) /* RF current state. */
+#define PWRCSR1_PUT_TO_SLEEP                                                   \
+	FIELD32(9,                                                             \
+		0x00000200) /* put to sleep. Write 1 to trigger, self cleared. */
+
+/*
+ * TIMECSR: Timer control register.
+ */
+#define TIMECSR_US_COUNT                                                       \
+	FIELD32(0, 0x000000ff) /* 1 us timer count in units of clock cycles. */
+#define TIMECSR_US_64_COUNT                                                    \
+	FIELD32(8, 0x0000ff00) /* 64 us timer count in units of 1 us timer. */
+#define TIMECSR_BEACON_EXPECT                                                  \
+	FIELD32(16, 0x00070000) /* Beacon expect window. */
+
+/*
+ * MACCSR1: MAC configuration register 1.
+ */
+#define MACCSR1_KICK_RX                                                        \
+	FIELD32(0, 0x00000001) /* kick one-shot rx in one-shot rx mode. */
+#define MACCSR1_ONESHOT_RXMODE                                                 \
+	FIELD32(1, 0x00000002) /* enable one-shot rx mode for debugging. */
+#define MACCSR1_BBPRX_RESET_MODE                                               \
+	FIELD32(2, 0x00000004) /* ralink bbp rx reset mode. */
+#define MACCSR1_AUTO_TXBBP                                                     \
+	FIELD32(3, 0x00000008) /* auto tx logic access bbp control register. */
+#define MACCSR1_AUTO_RXBBP                                                     \
+	FIELD32(4, 0x00000010) /* auto rx logic access bbp control register. */
+#define MACCSR1_LOOPBACK FIELD32(5, 0x00000060) /* loopback mode. */
+/* 0: normal, 1: internal, 2: external, 3:rsvd. */
+#define MACCSR1_INTERSIL_IF                                                    \
+	FIELD32(7, 0x00000080) /* intersil if calibration pin. */
+
+/*
+ * RALINKCSR: Ralink Rx auto-reset BBCR.
+ */
+#define RALINKCSR_AR_BBP_DATA0                                                 \
+	FIELD32(0, 0x000000ff) /* auto reset bbp register 0 data. */
+#define RALINKCSR_AR_BBP_ID0                                                   \
+	FIELD32(8, 0x00007f00) /* auto reset bbp register 0 id. */
+#define RALINKCSR_AR_BBP_VALID0                                                \
+	FIELD32(15, 0x00008000) /* auto reset bbp register 0 valid. */
+#define RALINKCSR_AR_BBP_DATA1                                                 \
+	FIELD32(16, 0x00ff0000) /* auto reset bbp register 1 data. */
+#define RALINKCSR_AR_BBP_ID1                                                   \
+	FIELD32(24, 0x7f000000) /* auto reset bbp register 1 id. */
+#define RALINKCSR_AR_BBP_VALID1                                                \
+	FIELD32(31, 0x80000000) /* auto reset bbp register 1 valid. */
+
+/*
+ * BCNCSR: Beacon interval control register.
+ */
+#define BCNCSR_CHANGE                                                          \
+	FIELD32(0, 0x00000001) /* write one to change beacon interval. */
+#define BCNCSR_DELTATIME FIELD32(1, 0x0000001e) /* the delta time value. */
+#define BCNCSR_NUM_BEACON                                                      \
+	FIELD32(5, 0x00001fe0) /* number of beacon according to mode. */
+#define BCNCSR_MODE FIELD32(13, 0x00006000) /* please refer to asic specs. */
+#define BCNCSR_PLUS                                                            \
+	FIELD32(15, 0x00008000) /* plus or minus delta time value. */
+
+/*
+ * BBPCSR: BBP serial control register.
+ */
+#define BBPCSR_VALUE                                                           \
+	FIELD32(0, 0x000000ff) /* register value to program into bbp. */
+#define BBPCSR_REGNUM FIELD32(8, 0x00007f00) /* selected bbp register. */
+#define BBPCSR_BUSY                                                            \
+	FIELD32(15, 0x00008000) /* 1: asic is busy execute bbp programming. */
+#define BBPCSR_WRITE_CONTROL                                                   \
+	FIELD32(16, 0x00010000) /* 1: write bbp, 0: read bbp. */
+
+/*
+ * RFCSR: RF serial control register.
+ */
+#define RFCSR_VALUE                                                            \
+	FIELD32(0, 0x00ffffff) /* register value + id to program into rf/if. */
+#define RFCSR_NUMBER_OF_BITS                                                   \
+	FIELD32(24,                                                            \
+		0x1f000000) /* number of bits used in value (i:20, rfmd:22). */
+#define RFCSR_IF_SELECT                                                        \
+	FIELD32(29, 0x20000000) /* chip to program: 0: rf, 1: if. */
+#define RFCSR_PLL_LD FIELD32(30, 0x40000000) /* rf pll_ld status. */
+#define RFCSR_BUSY                                                             \
+	FIELD32(31, 0x80000000) /* 1: asic is busy execute rf programming. */
+
+/*
+ * LEDCSR: LED control register.
+ */
+#define LEDCSR_ON_PERIOD FIELD32(0, 0x000000ff) /* on period, default 70ms. */
+#define LEDCSR_OFF_PERIOD FIELD32(8, 0x0000ff00) /* off period, default 30ms. */
+#define LEDCSR_LINK FIELD32(16, 0x00010000) /* 0: linkoff, 1: linkup. */
+#define LEDCSR_ACTIVITY FIELD32(17, 0x00020000) /* 0: idle, 1: active. */
+#define LEDCSR_LINK_POLARITY                                                   \
+	FIELD32(18, 0x00040000) /* 0: active low, 1: active high. */
+#define LEDCSR_ACTIVITY_POLARITY                                               \
+	FIELD32(19, 0x00080000) /* 0: active low, 1: active high. */
+#define LEDCSR_LED_DEFAULT                                                     \
+	FIELD32(20, 0x00100000) /* LED state for "enable" 0: ON, 1: OFF. */
+
+/*
+ * GPIOCSR: GPIO control register.
+ */
+#define GPIOCSR_BIT0 FIELD32(0, 0x00000001)
+#define GPIOCSR_BIT1 FIELD32(1, 0x00000002)
+#define GPIOCSR_BIT2 FIELD32(2, 0x00000004)
+#define GPIOCSR_BIT3 FIELD32(3, 0x00000008)
+#define GPIOCSR_BIT4 FIELD32(4, 0x00000010)
+#define GPIOCSR_BIT5 FIELD32(5, 0x00000020)
+#define GPIOCSR_BIT6 FIELD32(6, 0x00000040)
+#define GPIOCSR_BIT7 FIELD32(7, 0x00000080)
+#define GPIOCSR_DIR0 FIELD32(8, 0x00000100)
+#define GPIOCSR_DIR1 FIELD32(9, 0x00000200)
+#define GPIOCSR_DIR2 FIELD32(10, 0x00000400)
+#define GPIOCSR_DIR3 FIELD32(11, 0x00000800)
+#define GPIOCSR_DIR4 FIELD32(12, 0x00001000)
+#define GPIOCSR_DIR5 FIELD32(13, 0x00002000)
+#define GPIOCSR_DIR6 FIELD32(14, 0x00004000)
+#define GPIOCSR_DIR7 FIELD32(15, 0x00008000)
+
+/*
+ * BCNCSR1: Tx BEACON offset time control register.
+ */
+#define BCNCSR1_PRELOAD                                                        \
+	FIELD32(0, 0x0000ffff) /* beacon timer offset in units of usec. */
+#define BCNCSR1_BEACON_CWMIN FIELD32(16, 0x000f0000) /* 2^CwMin. */
+
+/*
+ * MACCSR2: TX_PE to RX_PE turn-around time control register
+ */
+#define MACCSR2_DELAY                                                          \
+	FIELD32(0,                                                             \
+		0x000000ff) /* RX_PE low width, in units of pci clock cycle. */
+
+/*
+ * SECCSR1_RT2509: WEP control register 
+ */
+#define SECCSR1_KICK_ENCRYPT                                                   \
+	FIELD32(0, 0x00000001) /* Kick encryption engine, self-clear. */
+#define SECCSR1_ONE_SHOT                                                       \
+	FIELD32(1, 0x00000002) /* 0: ring mode, 1: One shot only mode. */
+#define SECCSR1_DESC_ADDRESS                                                   \
+	FIELD32(2, 0xfffffffc) /* Descriptor physical address of frame. */
+
+/*
+ * RF registers
+ */
+#define RF1_TUNER FIELD32(17, 0x00020000)
+#define RF3_TUNER FIELD32(8, 0x00000100)
+#define RF3_TXPOWER FIELD32(9, 0x00003e00)
+
+/*
+ * EEPROM content format.
+ * The wordsize of the EEPROM is 16 bits.
+ */
+
+/*
+ * EEPROM operation defines.
+ */
+#define EEPROM_WIDTH_93c46 6
+#define EEPROM_WIDTH_93c66 8
+#define EEPROM_WRITE_OPCODE 0x05
+#define EEPROM_READ_OPCODE 0x06
+
+/*
+ * EEPROM antenna.
+ */
+#define EEPROM_ANTENNA_NUM FIELD16(0, 0x0003) /* Number of antenna's. */
+#define EEPROM_ANTENNA_TX_DEFAULT                                              \
+	FIELD16(2, 0x000c) /* Default antenna 0: diversity, 1: A, 2: B. */
+#define EEPROM_ANTENNA_RX_DEFAULT                                              \
+	FIELD16(4, 0x0030) /* Default antenna 0: diversity, 1: A, 2: B. */
+#define EEPROM_ANTENNA_LED_MODE                                                \
+	FIELD16(6, 0x01c0) /* 0: default, 1: TX/RX activity, */
+/* 2: Single LED (ignore link), 3: reserved. */
+#define EEPROM_ANTENNA_DYN_TXAGC                                               \
+	FIELD16(9, 0x0200) /* Dynamic TX AGC control. */
+#define EEPROM_ANTENNA_HARDWARE_RADIO                                          \
+	FIELD16(10, 0x0400) /* 1: Hardware controlled radio. Read GPIO0. */
+#define EEPROM_ANTENNA_RF_TYPE                                                 \
+	FIELD16(11, 0xf800) /* rf_type of this adapter. */
+
+/*
+ * EEPROM geography.
+ */
+#define EEPROM_GEOGRAPHY_GEO                                                   \
+	FIELD16(8, 0x0f00) /* Default geography setting for device. */
+
+/*
+ * EEPROM NIC config.
+ */
+#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0, 0x0001) /* 0: enable, 1: disable. */
+#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(1, 0x0002) /* 0: enable, 1: disable. */
+#define EEPROM_NIC_CCK_TX_POWER                                                \
+	FIELD16(2, 0x000c) /* CCK TX power compensation. */
+
+/*
+ * EEPROM TX power.
+ */
+#define EEPROM_TX_POWER1 FIELD16(0, 0x00ff)
+#define EEPROM_TX_POWER2 FIELD16(8, 0xff00)
+
+/*
+ * EEPROM BBP.
+ */
+#define EEPROM_BBP_VALUE FIELD16(0, 0x00ff)
+#define EEPROM_BBP_REG_ID FIELD16(8, 0xff00)
+
+/*
+ * EEPROM VERSION.
+ */
+#define EEPROM_VERSION_FAE FIELD16(0, 0x00ff) /* FAE release number. */
+#define EEPROM_VERSION FIELD16(8, 0xff00)
+
+/*
+ * DMA ring defines and data structures.
+ */
+
+/*
+ * Size of a single descriptor.
+ */
+#define SIZE_DESCRIPTOR 48
+
+/*
+ * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
+ */
+struct _txd {
+	u32 word0;
+#define TXD_W0_OWNER_NIC FIELD32(0, 0x00000001)
+#define TXD_W0_VALID FIELD32(1, 0x00000002)
+#define TXD_W0_RESULT FIELD32(2, 0x0000001c) /* Set by device. */
+#define TXD_W0_RETRY_COUNT FIELD32(5, 0x000000e0) /* Set by device. */
+#define TXD_W0_MORE_FRAG FIELD32(8, 0x00000100) /* Set by device. */
+#define TXD_W0_ACK FIELD32(9, 0x00000200)
+#define TXD_W0_TIMESTAMP FIELD32(10, 0x00000400)
+#define TXD_W0_OFDM FIELD32(11, 0x00000800)
+#define TXD_W0_CIPHER_OWNER FIELD32(12, 0x00001000)
+#define TXD_W0_IFS FIELD32(13, 0x00006000)
+#define TXD_W0_RETRY_MODE FIELD32(15, 0x00008000)
+#define TXD_W0_DATABYTE_COUNT FIELD32(16, 0x0fff0000)
+#define TXD_W0_CIPHER_ALG FIELD32(29, 0xe0000000)
+
+	u32 word1;
+#define TXD_W1_BUFFER_ADDRESS FIELD32(0, 0xffffffff)
+
+	u32 word2;
+#define TXD_W2_IV_OFFSET FIELD32(0, 0x0000003f)
+#define TXD_W2_AIFS FIELD32(6, 0x000000c0)
+#define TXD_W2_CWMIN FIELD32(8, 0x00000f00)
+#define TXD_W2_CWMAX FIELD32(12, 0x0000f000)
+
+	u32 word3;
+#define TXD_W3_PLCP_SIGNAL FIELD32(0, 0x000000ff)
+#define TXD_W3_PLCP_SERVICE FIELD32(8, 0x0000ff00)
+#define TXD_W3_PLCP_LENGTH_LOW FIELD32(16, 0x00ff0000)
+#define TXD_W3_PLCP_LENGTH_HIGH FIELD32(24, 0xff000000)
+
+	u32 word4;
+#define TXD_W4_IV FIELD32(0, 0xffffffff)
+
+	u32 word5;
+#define TXD_W5_EIV FIELD32(0, 0xffffffff)
+
+	u32 word6;
+#define TXD_W6_KEY FIELD32(0, 0xffffffff)
+
+	u32 word7;
+#define TXD_W7_KEY FIELD32(0, 0xffffffff)
+
+	u32 word8;
+#define TXD_W8_KEY FIELD32(0, 0xffffffff)
+
+	u32 word9;
+#define TXD_W9_KEY FIELD32(0, 0xffffffff)
+
+	u32 word10;
+#define TXD_W10_RTS FIELD32(0, 0x00000001)
+#define TXD_W10_TX_RATE FIELD32(0, 0x000000fe) /* For module only. */
+} __attribute__((packed));
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+struct _rxd {
+	u32 word0;
+#define RXD_W0_OWNER_NIC FIELD32(0, 0x00000001)
+#define RXD_W0_UNICAST_TO_ME FIELD32(1, 0x00000002)
+#define RXD_W0_MULTICAST FIELD32(2, 0x00000004)
+#define RXD_W0_BROADCAST FIELD32(3, 0x00000008)
+#define RXD_W0_MY_BSS FIELD32(4, 0x00000010)
+#define RXD_W0_CRC FIELD32(5, 0x00000020)
+#define RXD_W0_OFDM FIELD32(6, 0x00000040)
+#define RXD_W0_PHYSICAL_ERROR FIELD32(7, 0x00000080)
+#define RXD_W0_CIPHER_OWNER FIELD32(8, 0x00000100)
+#define RXD_W0_ICV_ERROR FIELD32(9, 0x00000200)
+#define RXD_W0_IV_OFFSET FIELD32(10, 0x0000fc00)
+#define RXD_W0_DATABYTE_COUNT FIELD32(16, 0x0fff0000)
+#define RXD_W0_CIPHER_ALG FIELD32(29, 0xe0000000)
+
+	u32 word1;
+#define RXD_W1_BUFFER_ADDRESS FIELD32(0, 0xffffffff)
+
+	u32 word2;
+#define RXD_W2_BBR0 FIELD32(0, 0x000000ff)
+#define RXD_W2_RSSI FIELD32(8, 0x0000ff00)
+#define RXD_W2_TA FIELD32(16, 0xffff0000)
+
+	u32 word3;
+#define RXD_W3_TA FIELD32(0, 0xffffffff)
+
+	u32 word4;
+#define RXD_W4_IV FIELD32(0, 0xffffffff)
+
+	u32 word5;
+#define RXD_W5_EIV FIELD32(0, 0xffffffff)
+
+	u32 word6;
+#define RXD_W6_KEY FIELD32(0, 0xffffffff)
+
+	u32 word7;
+#define RXD_W7_KEY FIELD32(0, 0xffffffff)
+
+	u32 word8;
+#define RXD_W8_KEY FIELD32(0, 0xffffffff)
+
+	u32 word9;
+#define RXD_W9_KEY FIELD32(0, 0xffffffff)
+
+	u32 word10;
+#define RXD_W10_DROP FIELD32(0, 0x00000001)
+} __attribute__((packed));
+
+/*
+ * _rt2x00_pci
+ * This is the main structure which contains all variables required to communicate with the PCI device.
+ */
+struct _rt2x00_pci {
+	/*
+     * PCI device structure.
+     */
+	struct pci_dev *pci_dev;
+
+	/*
+     * Chipset identification.
+     */
+	struct _rt2x00_chip chip;
+
+	/*
+     * csr_addr
+     * Base address of device registers, all exact register addresses are calculated from this address.
+     */
+	void __iomem *csr_addr;
+
+	/*
+     * RF register values for current channel.
+     */
+	struct _rf_channel channel;
+
+	/*
+     * EEPROM bus width.
+     */
+	u8 eeprom_width;
+
+	u8 __pad; /* For alignment only. */
+
+	/*
+     * EEPROM BBP data.
+     */
+	u16 eeprom[EEPROM_BBP_SIZE];
+
+	/*
+     * DMA packet ring.
+     */
+	struct _data_ring rx;
+	struct _data_ring tx;
+
+	rtdm_irq_t irq_handle;
+	rtdm_lock_t lock;
+
+} __attribute__((packed));
+
+static int rt2x00_get_rf_value(const struct _rt2x00_chip *chip,
+			       const u8 channel, struct _rf_channel *rf_reg)
+{
+	int index = 0x00;
+
+	index = rt2x00_get_channel_index(channel);
+	if (index < 0)
+		return -EINVAL;
+
+	memset(rf_reg, 0x00, sizeof(*rf_reg));
+
+	if (rt2x00_rf(chip, RF2522)) {
+		rf_reg->rf1 = 0x00002050;
+		rf_reg->rf3 = 0x00000101;
+		goto update_rf2_1;
+	}
+	if (rt2x00_rf(chip, RF2523)) {
+		rf_reg->rf1 = 0x00022010;
+		rf_reg->rf3 = 0x000e0111;
+		rf_reg->rf4 = 0x00000a1b;
+		goto update_rf2_2;
+	}
+	if (rt2x00_rf(chip, RF2524)) {
+		rf_reg->rf1 = 0x00032020;
+		rf_reg->rf3 = 0x00000101;
+		rf_reg->rf4 = 0x00000a1b;
+		goto update_rf2_2;
+	}
+	if (rt2x00_rf(chip, RF2525)) {
+		rf_reg->rf1 = 0x00022020;
+		rf_reg->rf2 = 0x00080000;
+		rf_reg->rf3 = 0x00060111;
+		rf_reg->rf4 = 0x00000a1b;
+		goto update_rf2_2;
+	}
+	if (rt2x00_rf(chip, RF2525E)) {
+		rf_reg->rf2 = 0x00080000;
+		rf_reg->rf3 = 0x00060111;
+		goto update_rf2_3;
+	}
+	if (rt2x00_rf(chip, RF5222)) {
+		rf_reg->rf3 = 0x00000101;
+		goto update_rf2_3;
+	}
+
+	return -EINVAL;
+
+update_rf2_1: /* RF2522. */
+	rf_reg->rf2 = 0x000c1fda + (index * 0x14);
+	if (channel == 14)
+		rf_reg->rf2 += 0x0000001c;
+	goto exit;
+
+update_rf2_2: /* RF2523, RF2524, RF2525. */
+	rf_reg->rf2 |= 0x00000c9e + (index * 0x04);
+	if (rf_reg->rf2 & 0x00000040)
+		rf_reg->rf2 += 0x00000040;
+	if (channel == 14) {
+		rf_reg->rf2 += 0x08;
+		rf_reg->rf4 &= ~0x00000018;
+	}
+	goto exit;
+
+update_rf2_3: /* RF2525E, RF5222. */
+	if (OFDM_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022020;
+		rf_reg->rf2 |= 0x00001136 + (index * 0x04);
+		if (rf_reg->rf2 & 0x00000040)
+			rf_reg->rf2 += 0x00000040;
+		if (channel == 14) {
+			rf_reg->rf2 += 0x04;
+			rf_reg->rf4 = 0x00000a1b;
+		} else {
+			rf_reg->rf4 = 0x00000a0b;
+		}
+	} else if (UNII_LOW_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022010;
+		rf_reg->rf2 = 0x00018896 + (index * 0x04);
+		rf_reg->rf4 = 0x00000a1f;
+	} else if (HIPERLAN2_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022010;
+		rf_reg->rf2 = 0x00008802 + (index * 0x04);
+		rf_reg->rf4 = 0x00000a0f;
+	} else if (UNII_HIGH_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022020;
+		rf_reg->rf2 = 0x000090a6 + (index * 0x08);
+		rf_reg->rf4 = 0x00000a07;
+	}
+
+exit:
+	rf_reg->rf1 = cpu_to_le32(rf_reg->rf1);
+	rf_reg->rf2 = cpu_to_le32(rf_reg->rf2);
+	rf_reg->rf3 = cpu_to_le32(rf_reg->rf3);
+	rf_reg->rf4 = cpu_to_le32(rf_reg->rf4);
+
+	return 0;
+}
+
+/*
+ * Get txpower value in dBm mathing the requested percentage.
+ */
+static inline u8 rt2x00_get_txpower(const struct _rt2x00_chip *chip,
+				    const u8 tx_power)
+{
+	return tx_power / 100 * 31;
+
+	/*
+      if(tx_power <= 3)
+      return 19;
+      else if(tx_power <= 12)
+      return 22;
+      else if(tx_power <= 25)
+      return 25;
+      else if(tx_power <= 50)
+      return 28;
+      else if(tx_power <= 75)
+      return 30;
+      else if(tx_power <= 100)
+      return 31;
+    
+      ERROR("Invalid tx_power.\n");
+      return 31;
+    */
+}
+
+/*
+ * Ring handlers.
+ */
+static inline int
+rt2x00_pci_alloc_ring(struct _rt2x00_core *core, struct _data_ring *ring,
+		      const u8 ring_type, const u16 max_entries,
+		      const u16 entry_size, const u16 desc_size)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	rt2x00_init_ring(core, ring, ring_type, max_entries, entry_size,
+			 desc_size);
+
+	ring->data_addr =
+		dma_alloc_coherent(&rt2x00pci->pci_dev->dev, ring->mem_size,
+				   &ring->data_dma, GFP_KERNEL);
+	if (!ring->data_addr)
+		return -ENOMEM;
+
+	memset(ring->data_addr, 0x00, ring->mem_size);
+
+	return 0;
+}
+
+static int rt2x00_pci_alloc_rings(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	if (rt2x00_pci_alloc_ring(core, &rt2x00pci->rx, RING_RX, RX_ENTRIES,
+				  DATA_FRAME_SIZE, SIZE_DESCRIPTOR) ||
+	    rt2x00_pci_alloc_ring(core, &rt2x00pci->tx, RING_TX, TX_ENTRIES,
+				  DATA_FRAME_SIZE, SIZE_DESCRIPTOR)) {
+		ERROR("DMA allocation failed.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static inline void rt2x00_pci_free_ring(struct _data_ring *ring)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(ring->core);
+
+	if (ring->data_addr)
+		dma_free_coherent(&rt2x00pci->pci_dev->dev, ring->mem_size,
+				  ring->data_addr, ring->data_dma);
+	ring->data_addr = NULL;
+
+	rt2x00_deinit_ring(ring);
+}
+
+static void rt2x00_pci_free_rings(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	rt2x00_pci_free_ring(&rt2x00pci->rx);
+	rt2x00_pci_free_ring(&rt2x00pci->tx);
+}
+
+/*
+ * Macro's for calculating exact position in data ring.
+ */
+#define DESC_BASE(__ring) ((void *)((__ring)->data_addr))
+#define DATA_BASE(__ring)                                                      \
+	((void *)(DESC_BASE(__ring) +                                          \
+		  ((__ring)->max_entries * (__ring)->desc_size)))
+
+#define __DESC_ADDR(__ring, __index)                                           \
+	((void *)(DESC_BASE(__ring) + ((__index) * (__ring)->desc_size)))
+#define __DATA_ADDR(__ring, __index)                                           \
+	((void *)(DATA_BASE(__ring) + ((__index) * (__ring)->entry_size)))
+
+#define DESC_ADDR(__ring) (__DESC_ADDR(__ring, (__ring)->index))
+#define DESC_ADDR_DONE(__ring) (__DESC_ADDR(__ring, (__ring)->index_done))
+
+#define DATA_ADDR(__ring) (__DATA_ADDR(__ring, (__ring)->index))
+#define DATA_ADDR_DONE(__ring) (__DATA_ADDR(__ring, (__ring)->index_done))
+
+/*
+ * Register access.
+ * All access to the registers will go through rt2x00_register_read and rt2x00_register_write.
+ * BBP and RF register require indirect register access through the register BBPCSR and RFCSR.
+ * The indirect register access work with busy bits, and a read or write function call can fail.
+ * Specific fields within a register can be accessed using the set and get field routines,
+ * these function will handle the requirement of little_endian and big_endian conversions.
+ */
+#define REGISTER_BUSY_COUNT                                                    \
+	10 /* Number of retries before failing access BBP & RF indirect register */
+#define REGISTER_BUSY_DELAY                                                    \
+	100 /* Delay between each register access retry. (us) */
+
+static void rt2x00_register_read(const struct _rt2x00_pci *rt2x00pci,
+				 const unsigned long offset, u32 *value)
+{
+	*value = readl((void *)(rt2x00pci->csr_addr + offset));
+}
+
+static void rt2x00_register_multiread(const struct _rt2x00_pci *rt2x00pci,
+				      const unsigned long offset, u32 *value,
+				      const u16 length)
+{
+	memcpy_fromio((void *)value, (void *)(rt2x00pci->csr_addr + offset),
+		      length);
+}
+
+static void rt2x00_register_write(const struct _rt2x00_pci *rt2x00pci,
+				  const unsigned long offset, const u32 value)
+{
+	writel(value, (void *)(rt2x00pci->csr_addr + offset));
+}
+
+static void rt2x00_register_multiwrite(const struct _rt2x00_pci *rt2x00pci,
+				       const unsigned long offset, u32 *value,
+				       const u16 length)
+{
+	memcpy_toio((void *)(rt2x00pci->csr_addr + offset), (void *)value,
+		    length);
+}
+
+static void rt2x00_bbp_regwrite(const struct _rt2x00_pci *rt2x00pci,
+				const u8 reg_id, const u8 value)
+{
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_register_read(rt2x00pci, BBPCSR, &reg);
+		if (!rt2x00_get_field32(reg, BBPCSR_BUSY))
+			goto bbp_write;
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR("BBPCSR register busy. Write failed\n");
+	return;
+
+bbp_write:
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, BBPCSR_VALUE, value);
+	rt2x00_set_field32(&reg, BBPCSR_REGNUM, reg_id);
+	rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
+	rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1);
+
+	rt2x00_register_write(rt2x00pci, BBPCSR, reg);
+}
+
+static void rt2x00_bbp_regread(const struct _rt2x00_pci *rt2x00pci,
+			       const u8 reg_id, u8 *value)
+{
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	/*
+     * We first have to acquire the requested BBP register,
+     * so we write the register id into the BBP register first.
+     */
+	rt2x00_set_field32(&reg, BBPCSR_REGNUM, reg_id);
+	rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
+	rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0);
+
+	rt2x00_register_write(rt2x00pci, BBPCSR, reg);
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_register_read(rt2x00pci, BBPCSR, &reg);
+		if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) {
+			*value = rt2x00_get_field32(reg, BBPCSR_VALUE);
+			return;
+		}
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR("BBPCSR register busy. Read failed\n");
+	*value = 0xff;
+}
+
+static void rt2x00_rf_regwrite(const struct _rt2x00_pci *rt2x00pci,
+			       const u32 value)
+{
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_register_read(rt2x00pci, RFCSR, &reg);
+		if (!rt2x00_get_field32(reg, RFCSR_BUSY))
+			goto rf_write;
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR("RFCSR register busy. Write failed\n");
+	return;
+
+rf_write:
+	reg = value;
+	rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20);
+	rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0);
+	rt2x00_set_field32(&reg, RFCSR_BUSY, 1);
+
+	//  printk(KERN_INFO "DEBUG: %s:%d: reg=%x\n", __FILE__, __LINE__, reg);
+
+	rt2x00_register_write(rt2x00pci, RFCSR, reg);
+}
+
+/*
+ * EEPROM access.
+ * The EEPROM is being accessed by word index.
+ * rt2x00_eeprom_read_word is the main access function that can be called by
+ * the rest of the module. It will take the index number of the eeprom word
+ * and the bus width.
+ */
+static inline void rt2x00_eeprom_pulse_high(const struct _rt2x00_pci *rt2x00pci,
+					    u32 *flags)
+{
+	rt2x00_set_field32(flags, CSR21_EEPROM_DATA_CLOCK, 1);
+	rt2x00_register_write(rt2x00pci, CSR21, *flags);
+	udelay(1);
+}
+
+static inline void rt2x00_eeprom_pulse_low(const struct _rt2x00_pci *rt2x00pci,
+					   u32 *flags)
+{
+	rt2x00_set_field32(flags, CSR21_EEPROM_DATA_CLOCK, 0);
+	rt2x00_register_write(rt2x00pci, CSR21, *flags);
+	udelay(1);
+}
+
+static void rt2x00_eeprom_shift_out_bits(const struct _rt2x00_pci *rt2x00pci,
+					 const u16 data, const u16 count)
+{
+	u32 flags = 0x00000000;
+	u32 mask = 0x0001 << (count - 1);
+
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+
+	/*
+     * Clear data flags.
+     */
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0);
+
+	/*
+     * Start writing all bits. 
+     */
+	do {
+		/*
+         * Only set the data_in flag when we are at the correct bit.
+         */
+		rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN,
+				   (data & mask) ? 1 : 0);
+
+		rt2x00_register_write(rt2x00pci, CSR21, flags);
+
+		rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+		rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+
+		/*
+         * Shift to next bit.
+         */
+		mask >>= 1;
+	} while (mask);
+
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_register_write(rt2x00pci, CSR21, flags);
+}
+
+static void rt2x00_eeprom_shift_in_bits(const struct _rt2x00_pci *rt2x00pci,
+					u16 *data)
+{
+	u32 flags = 0x00000000;
+	u8 counter = 0x00;
+
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+
+	/*
+     * Clear data flags.
+     */
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0);
+
+	/*
+     * Start reading all 16 bits.
+     */
+	for (counter = 0; counter < 16; counter++) {
+		/*
+         * Shift to the next bit.
+         */
+		*data <<= 1;
+
+		rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+
+		rt2x00_register_read(rt2x00pci, CSR21, &flags);
+
+		/*
+         * Clear data_in flag and set the data bit to 1 when the data_out flag is set.
+         */
+		rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+		if (rt2x00_get_field32(flags, CSR21_EEPROM_DATA_OUT))
+			*data |= 1;
+
+		rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+	}
+}
+
+static u16 rt2x00_eeprom_read_word(const struct _rt2x00_pci *rt2x00pci,
+				   const u8 word)
+{
+	u32 flags = 0x00000000;
+	u16 data = 0x0000;
+
+	/*
+     * Clear all flags, and enable chip select.
+     */
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_CLOCK, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_CHIP_SELECT, 1);
+	rt2x00_register_write(rt2x00pci, CSR21, flags);
+
+	/*
+     * kick a pulse.
+     */
+	rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+	rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+
+	/*
+     * Select the read opcode and bus_width.
+     */
+	rt2x00_eeprom_shift_out_bits(rt2x00pci, EEPROM_READ_OPCODE, 3);
+	rt2x00_eeprom_shift_out_bits(rt2x00pci, word, rt2x00pci->eeprom_width);
+
+	rt2x00_eeprom_shift_in_bits(rt2x00pci, &data);
+
+	/*
+     * Clear chip_select and data_in flags.
+     */
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_CHIP_SELECT, 0);
+	rt2x00_register_write(rt2x00pci, CSR21, flags);
+
+	/*
+     * kick a pulse.
+     */
+	rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+	rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+
+	return data;
+}
+
+#endif /* RT2500PCI_H */
+++ linux-patched/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h	2022-03-21 12:58:29.919883793 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/* rt2x00.h
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *	                     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek 
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+  Module: rt2x00
+  Abstract: rt2x00 global information.
+  Supported chipsets: RT2560
+*/
+
+#ifndef RT2X00_H
+#define RT2X00_H
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+
+#include <rtnet_port.h>
+#include <rtwlan.h>
+
+#define MAX_UNITS 2
+
+/*
+ * Module information.
+ */
+#define DRV_NAME "rt2x00"
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Daniel Gregorek <dxg@gmx.de>"
+//#define CONFIG_RT2X00_DEBUG
+
+/*
+ * Debug defines.
+ * The debug variable will be exported by the device specific module.
+ * For this reason this variable must be set to extern to make it accessible
+ * to the core module as well.
+ */
+#ifdef CONFIG_RT2X00_DEBUG
+extern int rt2x00_debug_level;
+#define DEBUG_PRINTK(__message...)                                             \
+	do {                                                                   \
+		rtdm_printk(__message);                                        \
+	} while (0)
+#else /* CONFIG_RT2X00_DEBUG */
+#define DEBUG_PRINTK(__message...)                                             \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_RT2X00_DEBUG */
+
+/*
+ * Various debug levels.
+ * PANIC and ERROR indicates serious problems within the module,
+ * these should never be ignored and thus we will always print the message.
+ */
+#define PANIC(__message, __args...)                                            \
+	rtdm_printk(KERN_PANIC DRV_NAME "->%s: Panic - " __message,            \
+		    __FUNCTION__, ##__args);
+#define ERROR(__message, __args...)                                            \
+	rtdm_printk(KERN_ERR DRV_NAME "->%s: Error - " __message,              \
+		    __FUNCTION__, ##__args);
+#define WARNING(__message, __args...)                                          \
+	rtdm_printk(KERN_WARNING DRV_NAME "->%s: Warning - " __message,        \
+		    __FUNCTION__, ##__args);
+#define NOTICE(__message, __args...)                                           \
+	rtdm_printk(KERN_NOTICE DRV_NAME "->%s: Notice - " __message,          \
+		    __FUNCTION__, ##__args);
+#define INFO(__message, __args...)                                             \
+	rtdm_printk(KERN_INFO DRV_NAME "->%s: Info - " __message,              \
+		    __FUNCTION__, ##__args);
+#define DEBUG(__message, __args...)                                            \
+	DEBUG_PRINTK(KERN_DEBUG DRV_NAME "->%s: Debug - " __message,           \
+		     __FUNCTION__, ##__args);
+
+/*
+ * RT2x00 ring types.
+ */
+
+/*
+ * Ring names.
+ */
+#define RING_RX 0x01 /* Ring used for receiving packets. */
+#define RING_TX 0x02 /* Ring used for transmitting normal packets. */
+
+/*
+ * Ring sizes.
+ */
+#define DATA_FRAME_SIZE 2432
+#define MGMT_FRAME_SIZE 256
+
+/*
+ * RT2x00 xmit flags.
+ */
+#define XMIT_IFS_SIFS 0x0001
+#define XMIT_IFS_BACKOFF 0x0002
+#define XMIT_IFS_NEW_BACKOFF 0x0004
+#define XMIT_IFS_NONE 0x0008
+#define XMIT_NEW_SEQUENCE 0x0010
+#define XMIT_ACK 0x0020
+#define XMIT_TIMESTAMP 0x0040
+#define XMIT_RTS 0x0080
+#define XMIT_OFDM 0x0100
+#define XMIT_LONG_RETRY 0x0200
+#define XMIT_MORE_FRAGS 0x0400
+#define XMIT_SHORT_PREAMBLE 0x0800
+#define XMIT_START 0x1000
+
+/*
+ * RT2x00 Statistics flags.
+ */
+#define STATS_TX_RESULT 0x01
+#define STATS_TX_RETRY_COUNT 0x02
+#define STATS_RX_CRC 0x10
+#define STATS_RX_PHYSICAL 0x20
+#define STATS_RX_QUALITY 0x40
+#define STATS_RX_DROP 0x80
+
+/*
+ * TX result flags.
+ */
+#define TX_SUCCESS 0
+#define TX_SUCCESS_RETRY 1
+#define TX_FAIL_RETRY 2
+#define TX_FAIL_INVALID 3
+#define TX_FAIL_OTHER 4
+
+/*
+ * Channel type defines.
+ */
+#define CHANNEL_OFDM 0x01
+#define CHANNEL_UNII_LOW 0x02
+#define CHANNEL_HIPERLAN2 0x04
+#define CHANNEL_UNII_HIGH 0x08
+
+#define CHANNEL_OFDM_MIN 1
+#define CHANNEL_OFDM_MAX 14
+#define CHANNEL_UNII_LOW_MIN 36
+#define CHANNEL_UNII_LOW_MAX 64
+#define CHANNEL_HIPERLAN2_MIN 100
+#define CHANNEL_HIPERLAN2_MAX 140
+#define CHANNEL_UNII_HIGH_MIN 149
+#define CHANNEL_UNII_HIGH_MAX 161
+
+/*
+ * Device 802.11abg capabilities.
+ */
+static struct _rt2x00_capabilities {
+	u8 txpower[6];
+	u8 bitrate[12];
+} __attribute__ ((packed)) capabilities = {
+    /*
+     * tx-power.
+     */
+    .txpower = {
+          3, 12, 25, 50, 75, 100,
+      },
+
+    /*
+     * Bitrates
+     */
+    .bitrate = {
+         2, 4, 11, 22,						/* CCK. */
+         12, 18, 24, 36, 48, 72, 96, 108,			/* OFDM. */
+     },
+};
+
+struct _rt2x00_config {
+	u8 config_flags;
+#define CONFIG_DROP_BCAST 0x0001
+#define CONFIG_DROP_MCAST 0x0002
+#define CONFIG_AUTORESP 0x0004
+
+	u8 antenna_tx;
+	u8 antenna_rx;
+
+	u8 bssid[ETH_ALEN];
+	u8 short_retry;
+	u8 long_retry;
+
+	u8 channel;
+	u8 bitrate; /* 0.5Mbit/sec */
+	u8 txpower; /* % */
+
+	u8 bbpsens;
+
+	/*
+     * LED status
+     */
+	u8 led_status;
+
+	u16 __pad2; /* For alignment only. */
+
+	/*
+     * Duration values in us.
+     */
+	u8 plcp;
+	u8 sifs;
+	u8 slot_time;
+
+	/*
+     * Configuration values that have to be updated to device.
+     */
+	u16 update_flags;
+#define UPDATE_ALL_CONFIG 0xffff
+#define UPDATE_BSSID 0x0001
+#define UPDATE_PACKET_FILTER 0x0002
+#define UPDATE_CHANNEL 0x0004
+#define UPDATE_BITRATE 0x0008
+#define UPDATE_RETRY 0x0010
+#define UPDATE_TXPOWER 0x0020
+#define UPDATE_ANTENNA 0x0040
+#define UPDATE_DURATION 0x0080
+#define UPDATE_PREAMBLE 0x0100
+#define UPDATE_AUTORESP 0x0200
+#define UPDATE_LED_STATUS 0x0400
+#define UPDATE_BBPSENS 0x0800
+
+} __attribute__((packed));
+
+struct _rt2x00_core {
+	/*
+     * RT2x00 device status flags (atomic read/write access).
+     */
+	unsigned long flags;
+
+#define DEVICE_ENABLED 0 /* Device has been opened. */
+#define DEVICE_AWAKE 1 /* Device is not suspended. */
+#define DEVICE_RADIO_ON 2 /* Device antenna is enabled. */
+#define DEVICE_CONFIG_UPDATE 3 /* Device is updating configuration. */
+
+	/*
+     * Device handler.
+     */
+	struct _rt2x00_dev_handler *handler;
+
+	/*
+     * RTnet device we belong to.
+     */
+	struct rtnet_device *rtnet_dev;
+
+	/*
+     * RTwlan stack structure.
+     */
+	struct rtwlan_device *rtwlan_dev;
+
+	/*
+     * Device configuration.
+     */
+	struct _rt2x00_config config;
+
+	void *priv;
+
+} __attribute__((packed));
+
+/*
+ * Device specific handlers.
+ */
+struct _rt2x00_dev_handler {
+	/*
+     * Device specific module.
+     */
+	struct module *dev_module;
+
+	/*
+     * Initialization handlers.
+     */
+	int (*dev_probe)(struct _rt2x00_core *core, void *priv);
+	int (*dev_remove)(struct _rt2x00_core *core);
+
+	/*
+     * Radio control.
+     */
+	int (*dev_radio_on)(struct _rt2x00_core *core);
+	int (*dev_radio_off)(struct _rt2x00_core *core);
+
+	/*
+     * Configuration handlers.
+     */
+	int (*dev_update_config)(struct _rt2x00_core *core, u16 update_flags);
+
+	/*
+     * xmit handler.
+     */
+	int (*dev_xmit_packet)(struct _rt2x00_core *core, struct rtskb *rtskb,
+			       u16 rate, u16 xmit_flags);
+
+	/*
+     * Handler for direct access to register from core.
+     */
+	int (*dev_register_access)(struct _rt2x00_core *core, int request,
+				   u32 address, u32 *value);
+
+} __attribute__((packed));
+
+static inline void *rt2x00_priv(const struct _rt2x00_core *core)
+{
+	return core->priv;
+}
+
+/*
+ * Duration calculations
+ * The rate variable passed is: 2 * real_rate (in Mb/s).
+ * Therefore length has to be multiplied with 8 to convert bytes to bits and  mulltiply the length
+ * with 2 to compensate for the difference between real_rate and the rate variable.
+ */
+#define ACK_SIZE 14
+#define IEEE80211_HEADER 24
+
+static inline u16 get_duration(const unsigned int size, const u8 rate)
+{
+	return ((size * 8 * 2) / rate);
+}
+
+static inline u16 get_duration_res(const unsigned int size, const u8 rate)
+{
+	return ((size * 8 * 2) % rate);
+}
+
+static inline u16 get_preamble(const struct _rt2x00_config *config)
+{
+	return 144;
+}
+
+/*
+ * Register handlers.
+ * We store the position of a register field inside a field structure,
+ * This will simplify the process of setting and reading a certain field
+ * inside the register.
+ */
+struct _rt2x00_field16 {
+	u16 bit_offset;
+	u16 bit_mask;
+} __attribute__((packed));
+
+struct _rt2x00_field32 {
+	u32 bit_offset;
+	u32 bit_mask;
+} __attribute__((packed));
+
+#define FIELD16(__offset, __mask)                                              \
+	((struct _rt2x00_field16){ (__offset), (__mask) })
+#define FIELD32(__offset, __mask)                                              \
+	((struct _rt2x00_field32){ (__offset), (__mask) })
+
+static inline void rt2x00_set_field32(u32 *reg,
+				      const struct _rt2x00_field32 field,
+				      const u32 value)
+{
+	*reg &= cpu_to_le32(~(field.bit_mask));
+	*reg |= cpu_to_le32((value << field.bit_offset) & field.bit_mask);
+}
+
+static inline void rt2x00_set_field32_nb(u32 *reg,
+					 const struct _rt2x00_field32 field,
+					 const u32 value)
+{
+	*reg &= ~(field.bit_mask);
+	*reg |= (value << field.bit_offset) & field.bit_mask;
+}
+
+static inline u32 rt2x00_get_field32(const u32 reg,
+				     const struct _rt2x00_field32 field)
+{
+	return (le32_to_cpu(reg) & field.bit_mask) >> field.bit_offset;
+}
+
+static inline u32 rt2x00_get_field32_nb(const u32 reg,
+					const struct _rt2x00_field32 field)
+{
+	return (reg & field.bit_mask) >> field.bit_offset;
+}
+
+static inline void rt2x00_set_field16(u16 *reg,
+				      const struct _rt2x00_field16 field,
+				      const u16 value)
+{
+	*reg &= cpu_to_le16(~(field.bit_mask));
+	*reg |= cpu_to_le16((value << field.bit_offset) & field.bit_mask);
+}
+
+static inline void rt2x00_set_field16_nb(u16 *reg,
+					 const struct _rt2x00_field16 field,
+					 const u16 value)
+{
+	*reg &= ~(field.bit_mask);
+	*reg |= (value << field.bit_offset) & field.bit_mask;
+}
+
+static inline u16 rt2x00_get_field16(const u16 reg,
+				     const struct _rt2x00_field16 field)
+{
+	return (le16_to_cpu(reg) & field.bit_mask) >> field.bit_offset;
+}
+
+static inline u16 rt2x00_get_field16_nb(const u16 reg,
+					const struct _rt2x00_field16 field)
+{
+	return (reg & field.bit_mask) >> field.bit_offset;
+}
+
+/*
+ * rf register sructure for channel selection.
+ */
+struct _rf_channel {
+	u32 rf1;
+	u32 rf2;
+	u32 rf3;
+	u32 rf4;
+} __attribute__((packed));
+
+/*
+ * Chipset identification
+ * The chipset on the device is composed of a RT and RF chip.
+ * The chipset combination is important for determining device capabilities.
+ */
+struct _rt2x00_chip {
+	u16 rt;
+	u16 rf;
+} __attribute__((packed));
+
+/*
+ * Set chipset data.
+ * Some rf values for RT2400 devices are equal to rf values for RT2500 devices.
+ * To prevent problems, all rf values will be masked to clearly seperate each chipset.
+ */
+static inline void set_chip(struct _rt2x00_chip *chipset, const u16 rt,
+			    const u16 rf)
+{
+	INFO("Chipset detected - rt: %04x, rf: %04x.\n", rt, rf);
+
+	chipset->rt = rt;
+	chipset->rf = rf | (chipset->rt & 0xff00);
+}
+
+static inline char rt2x00_rt(const struct _rt2x00_chip *chipset, const u16 chip)
+{
+	return (chipset->rt == chip);
+}
+
+static inline char rt2x00_rf(const struct _rt2x00_chip *chipset, const u16 chip)
+{
+	return (chipset->rf == chip);
+}
+
+static inline u16 rt2x00_get_rf(const struct _rt2x00_chip *chipset)
+{
+	return chipset->rf;
+}
+
+/*
+ * _data_ring
+ * Data rings are used by the device to send and receive packets.
+ * The data_addr is the base address of the data memory.
+ * Device specifice information is pointed to by the priv pointer.
+ * The index values may only be changed with the functions ring_index_inc()
+ * and ring_index_done_inc().
+ */
+struct _data_ring {
+	/*
+     * Base address of packet ring.
+     */
+	dma_addr_t data_dma;
+	void *data_addr;
+
+	/*
+     * Private device specific data.
+     */
+	void *priv;
+	struct _rt2x00_core *core;
+
+	/*
+     * Current index values.
+     */
+	u8 index;
+	u8 index_done;
+
+	/*
+     * Ring type set with RING_* define.
+     */
+	u8 ring_type;
+
+	/*
+     * Number of entries in this ring.
+     */
+	u8 max_entries;
+
+	/*
+     * Size of packet and descriptor in bytes.
+     */
+	u16 entry_size;
+	u16 desc_size;
+
+	/*
+     * Total allocated memory size.
+     */
+	u32 mem_size;
+} __attribute__((packed));
+
+/*
+ * Number of entries in a packet ring.
+ */
+#define RX_ENTRIES 8
+#define TX_ENTRIES 8
+#define ATIM_ENTRIES 1
+#define PRIO_ENTRIES 2
+#define BEACON_ENTRIES 1
+
+/*
+ * Initialization and cleanup routines.
+ */
+static inline void rt2x00_init_ring(struct _rt2x00_core *core,
+				    struct _data_ring *ring, const u8 ring_type,
+				    const u16 max_entries, const u16 entry_size,
+				    const u16 desc_size)
+{
+	ring->core = core;
+	ring->index = 0;
+	ring->index_done = 0;
+	ring->ring_type = ring_type;
+	ring->max_entries = max_entries;
+	ring->entry_size = entry_size;
+	ring->desc_size = desc_size;
+	ring->mem_size =
+		ring->max_entries * (ring->desc_size + ring->entry_size);
+}
+
+static inline void rt2x00_deinit_ring(struct _data_ring *ring)
+{
+	ring->core = NULL;
+	ring->index = 0;
+	ring->index_done = 0;
+	ring->ring_type = 0;
+	ring->max_entries = 0;
+	ring->entry_size = 0;
+	ring->desc_size = 0;
+	ring->mem_size = 0;
+}
+
+/*
+ * Ring index manipulation functions.
+ */
+static inline void rt2x00_ring_index_inc(struct _data_ring *ring)
+{
+	ring->index = (++ring->index < ring->max_entries) ? ring->index : 0;
+}
+
+static inline void rt2x00_ring_index_done_inc(struct _data_ring *ring)
+{
+	ring->index_done =
+		(++ring->index_done < ring->max_entries) ? ring->index_done : 0;
+}
+
+static inline void rt2x00_ring_clear_index(struct _data_ring *ring)
+{
+	ring->index = 0;
+	ring->index_done = 0;
+}
+
+static inline u8 rt2x00_ring_empty(struct _data_ring *ring)
+{
+	return ring->index_done == ring->index;
+}
+
+static inline u8 rt2x00_ring_free_entries(struct _data_ring *ring)
+{
+	if (ring->index >= ring->index_done)
+		return ring->max_entries - (ring->index - ring->index_done);
+	else
+		return ring->index_done - ring->index;
+}
+
+/*
+ * Return PLCP value matching the rate.
+ * PLCP values according to ieee802.11a-1999 p.14.
+ */
+static inline u8 rt2x00_get_plcp(const u8 rate)
+{
+	u8 counter = 0x00;
+	u8 plcp[12] = {
+		0x00, 0x01, 0x02, 0x03, /* CCK. */
+		0x0b, 0x0f, 0x0a, 0x0e, 0x09, 0x0d, 0x08, 0x0c, /* OFDM. */
+	};
+
+	for (; counter < 12; counter++) {
+		if (capabilities.bitrate[counter] == rate)
+			return plcp[counter];
+	}
+
+	return 0xff;
+}
+
+#define OFDM_CHANNEL(__channel)                                                \
+	((__channel) >= CHANNEL_OFDM_MIN && (__channel) <= CHANNEL_OFDM_MAX)
+#define UNII_LOW_CHANNEL(__channel)                                            \
+	((__channel) >= CHANNEL_UNII_LOW_MIN &&                                \
+	 (__channel) <= CHANNEL_UNII_LOW_MAX)
+#define HIPERLAN2_CHANNEL(__channel)                                           \
+	((__channel) >= CHANNEL_HIPERLAN2_MIN &&                               \
+	 (__channel) <= CHANNEL_HIPERLAN2_MAX)
+#define UNII_HIGH_CHANNEL(__channel)                                           \
+	((__channel) >= CHANNEL_UNII_HIGH_MIN &&                               \
+	 (__channel) <= CHANNEL_UNII_HIGH_MAX)
+
+/*
+ * Return the index value of the channel starting from the first channel of the range.
+ * Where range can be OFDM, UNII (low), HiperLAN2 or UNII (high).
+ */
+static inline int rt2x00_get_channel_index(const u8 channel)
+{
+	if (OFDM_CHANNEL(channel))
+		return (channel - 1);
+
+	if (channel % 4)
+		return -EINVAL;
+
+	if (UNII_LOW_CHANNEL(channel))
+		return ((channel - CHANNEL_UNII_LOW_MIN) / 4);
+	else if (HIPERLAN2_CHANNEL(channel))
+		return ((channel - CHANNEL_HIPERLAN2_MIN) / 4);
+	else if (UNII_HIGH_CHANNEL(channel))
+		return ((channel - CHANNEL_UNII_HIGH_MIN) / 4);
+	return -EINVAL;
+}
+
+/*
+ * RT2x00 core module functions that can be used in the device specific modules.
+ */
+extern struct rtnet_device *
+rt2x00_core_probe(struct _rt2x00_dev_handler *handler, void *priv,
+		  u32 sizeof_dev);
+extern void rt2x00_core_remove(struct rtnet_device *rtnet_dev);
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig	2022-03-21 12:58:29.914883842 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_DRV_RT2500
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Ralink 2500 WLAN"
+    select XENO_DRIVERS_NET_RTWLAN
+++ linux-patched/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c	2022-03-21 12:58:29.908883900 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/rt2500/Makefile	1970-01-01 01:00:00.000000000 +0100
+/* rt2x00core.c
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *			     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Module: rt2x00core
+ * Abstract: rt2x00 core routines.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/io.h>
+
+#include <rtnet_port.h>
+
+#include "rt2x00.h"
+
+#ifdef DRV_NAME
+#undef DRV_NAME
+#define DRV_NAME "rt_rt2x00core"
+#endif /* DRV_NAME */
+
+static int rt2x00_radio_on(struct _rt2x00_core *core);
+static int rt2x00_radio_off(struct _rt2x00_core *core);
+
+static int cards[MAX_UNITS] = { [0 ...(MAX_UNITS - 1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+
+/*
+ * Writes the pending configuration to the device
+ */
+static void rt2x00_update_config(struct _rt2x00_core *core)
+{
+	u16 update_flags = 0x0000;
+
+	if (!test_bit(DEVICE_ENABLED, &core->flags) &&
+	    !test_bit(DEVICE_RADIO_ON, &core->flags))
+		return;
+
+	if (test_and_set_bit(DEVICE_CONFIG_UPDATE, &core->flags))
+		return;
+
+	update_flags = core->config.update_flags;
+	core->config.update_flags = 0;
+
+	if (likely(update_flags))
+		core->handler->dev_update_config(core, update_flags);
+
+	clear_bit(DEVICE_CONFIG_UPDATE, &core->flags);
+}
+
+/*
+ * Radio control.
+ */
+static int rt2x00_radio_on(struct _rt2x00_core *core)
+{
+	int status = 0x00000000;
+
+	if (test_bit(DEVICE_RADIO_ON, &core->flags)) {
+		WARNING("Radio already on.\n");
+		return -ENOTCONN;
+	}
+
+	status = core->handler->dev_radio_on(core);
+	if (status)
+		return status;
+
+	set_bit(DEVICE_RADIO_ON, &core->flags);
+
+	return 0;
+}
+
+static int rt2x00_radio_off(struct _rt2x00_core *core)
+{
+	if (!test_and_clear_bit(DEVICE_RADIO_ON, &core->flags)) {
+		WARNING("Radio already off.\n");
+		return -ENOTCONN;
+	}
+
+	core->handler->dev_radio_off(core);
+
+	return 0;
+}
+
+/*
+ * user space io handler
+ */
+static int rt2x00_ioctl(struct rtnet_device *rtnet_dev, struct ifreq *ifr,
+			int request)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	struct rtwlan_cmd *cmd;
+	u8 rate, dsss_rate, ofdm_rate;
+	u32 address, value;
+
+	cmd = (struct rtwlan_cmd *)ifr->ifr_data;
+
+	switch (request) {
+	case IOC_RTWLAN_IFINFO:
+		cmd->args.info.bitrate = core->config.bitrate;
+		cmd->args.info.channel = core->config.channel;
+		cmd->args.info.retry = core->config.short_retry;
+		cmd->args.info.txpower = core->config.txpower;
+		cmd->args.info.bbpsens = core->config.bbpsens;
+		cmd->args.info.mode = core->rtwlan_dev->mode;
+		cmd->args.info.rx_packets = core->rtwlan_dev->stats.rx_packets;
+		cmd->args.info.tx_packets = core->rtwlan_dev->stats.tx_packets;
+		cmd->args.info.tx_retry = core->rtwlan_dev->stats.tx_retry;
+		cmd->args.info.autoresponder =
+			core->config.config_flags & CONFIG_AUTORESP ? 1 : 0;
+		cmd->args.info.dropbcast =
+			core->config.config_flags & CONFIG_DROP_BCAST ? 1 : 0;
+		cmd->args.info.dropmcast =
+			core->config.config_flags & CONFIG_DROP_MCAST ? 1 : 0;
+		DEBUG("rtwlan_dev->mode=%d\n", rtwlan_dev->mode);
+		break;
+	case IOC_RTWLAN_BITRATE:
+		rate = cmd->args.set.bitrate;
+		ofdm_rate = ieee80211_is_ofdm_rate(rate);
+		dsss_rate = ieee80211_is_dsss_rate(rate);
+		DEBUG("bitrate=%d\n", rate);
+		if (!(dsss_rate ^ ofdm_rate))
+			NOTICE("Rate %d is not DSSS and not OFDM.\n", rate);
+		core->config.bitrate = rate;
+		core->config.update_flags |= UPDATE_BITRATE;
+		break;
+	case IOC_RTWLAN_CHANNEL:
+		DEBUG("channel=%d\n", cmd->args.set.channel);
+		core->config.channel = cmd->args.set.channel;
+		core->config.update_flags |= UPDATE_CHANNEL;
+		break;
+	case IOC_RTWLAN_RETRY:
+		core->config.short_retry = cmd->args.set.retry;
+		core->config.update_flags |= UPDATE_RETRY;
+		break;
+	case IOC_RTWLAN_TXPOWER:
+		core->config.txpower = cmd->args.set.txpower;
+		core->config.update_flags |= UPDATE_TXPOWER;
+		break;
+	case IOC_RTWLAN_AUTORESP:
+		if (cmd->args.set.autoresponder)
+			core->config.config_flags |= CONFIG_AUTORESP;
+		else
+			core->config.config_flags &= ~CONFIG_AUTORESP;
+		core->config.update_flags |= UPDATE_AUTORESP;
+		break;
+	case IOC_RTWLAN_DROPBCAST:
+		if (cmd->args.set.dropbcast)
+			core->config.config_flags |= CONFIG_DROP_BCAST;
+		else
+			core->config.config_flags &= ~CONFIG_DROP_BCAST;
+		core->config.update_flags |= UPDATE_PACKET_FILTER;
+		break;
+	case IOC_RTWLAN_DROPMCAST:
+		if (cmd->args.set.dropmcast)
+			core->config.config_flags |= CONFIG_DROP_MCAST;
+		else
+			core->config.config_flags &= ~CONFIG_DROP_MCAST;
+		core->config.update_flags |= UPDATE_PACKET_FILTER;
+		break;
+	case IOC_RTWLAN_TXMODE:
+		core->rtwlan_dev->mode = cmd->args.set.mode;
+		break;
+	case IOC_RTWLAN_BBPSENS:
+		value = cmd->args.set.bbpsens;
+		if (value < 0)
+			value = 0;
+		if (value > 127)
+			value = 127;
+		core->config.bbpsens = value;
+		core->config.update_flags |= UPDATE_BBPSENS;
+		break;
+	case IOC_RTWLAN_REGREAD:
+	case IOC_RTWLAN_BBPREAD:
+		address = cmd->args.reg.address;
+		core->handler->dev_register_access(core, request, address,
+						   &value);
+		cmd->args.reg.value = value;
+		break;
+	case IOC_RTWLAN_REGWRITE:
+	case IOC_RTWLAN_BBPWRITE:
+		address = cmd->args.reg.address;
+		value = cmd->args.reg.value;
+		core->handler->dev_register_access(core, request, address,
+						   &value);
+		break;
+	default:
+		ERROR("Unknown request!\n");
+		return -1;
+	}
+
+	if (request != IOC_RTWLAN_IFINFO)
+		rt2x00_update_config(core);
+
+	return 0;
+}
+
+/*
+ * TX/RX related routines.
+ */
+static int rt2x00_start_xmit(struct rtskb *rtskb,
+			     struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	u16 xmit_flags = 0x0000;
+	u8 rate = 0x00;
+
+	if (unlikely(rtskb)) {
+		rate = core->config.bitrate;
+		if (ieee80211_is_ofdm_rate(rate))
+			xmit_flags |= XMIT_OFDM;
+
+		/* Check if the packet should be acknowledged */
+		if (core->rtwlan_dev->mode == RTWLAN_TXMODE_ACK)
+			xmit_flags |= XMIT_ACK;
+
+		if (core->handler->dev_xmit_packet(core, rtskb, rate,
+						   xmit_flags))
+			ERROR("Packet dropped !");
+
+		dev_kfree_rtskb(rtskb);
+	}
+
+	return 0;
+}
+
+/***
+ *  rt2x00_open
+ *  @rtdev
+ */
+static int rt2x00_open(struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	int status = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	if (test_and_set_bit(DEVICE_ENABLED, &core->flags)) {
+		ERROR("device already enabled.\n");
+		return -EBUSY;
+	}
+
+	/*
+     * Start rtnet interface.
+     */
+	rt_stack_connect(rtnet_dev, &STACK_manager);
+
+	status = rt2x00_radio_on(core);
+	if (status) {
+		clear_bit(DEVICE_ENABLED, &core->flags);
+		ERROR("Couldn't activate radio.\n");
+		return status;
+	}
+
+	core->config.led_status = 1;
+	core->config.update_flags |= UPDATE_LED_STATUS;
+	rt2x00_update_config(core);
+
+	rtnetif_start_queue(rtnet_dev);
+
+	DEBUG("Exit success.\n");
+
+	return 0;
+}
+
+/***
+ *  rt2x00_close
+ *  @rtdev
+ */
+static int rt2x00_close(struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+
+	DEBUG("Start.\n");
+
+	if (!test_and_clear_bit(DEVICE_ENABLED, &core->flags)) {
+		ERROR("device already disabled.\n");
+		return -EBUSY;
+	}
+
+	rt2x00_radio_off(core);
+
+	rtnetif_stop_queue(rtnet_dev);
+	rt_stack_disconnect(rtnet_dev);
+
+	return 0;
+}
+
+/*
+ * Initialization handlers.
+ */
+static void rt2x00_init_config(struct _rt2x00_core *core)
+{
+	DEBUG("Start.\n");
+
+	memset(&core->config.bssid, '\0', sizeof(core->config.bssid));
+
+	core->config.channel = 1;
+	core->config.bitrate = capabilities.bitrate[0];
+	core->config.bbpsens = 50;
+	core->config.config_flags = 0;
+	core->config.config_flags |=
+		CONFIG_DROP_BCAST | CONFIG_DROP_MCAST | CONFIG_AUTORESP;
+	core->config.short_retry = 4;
+	core->config.long_retry = 7;
+	core->config.txpower = 100;
+	core->config.plcp = 48;
+	core->config.sifs = 10;
+	core->config.slot_time = 20;
+	core->rtwlan_dev->mode = RTWLAN_TXMODE_RAW;
+	core->config.update_flags = UPDATE_ALL_CONFIG;
+}
+
+struct rtnet_device *rt2x00_core_probe(struct _rt2x00_dev_handler *handler,
+				       void *priv, u32 sizeof_dev)
+{
+	struct rtnet_device *rtnet_dev = NULL;
+	struct _rt2x00_core *core = NULL;
+	struct rtwlan_device *rtwlan_dev = NULL;
+	static int cards_found = -1;
+	int err;
+
+	DEBUG("Start.\n");
+
+	cards_found++;
+	if (cards[cards_found] == 0)
+		goto exit;
+
+	rtnet_dev =
+		rtwlan_alloc_dev(sizeof_dev + sizeof(*core), RX_ENTRIES * 2);
+	if (!rtnet_dev)
+		goto exit;
+
+	rt_rtdev_connect(rtnet_dev, &RTDEV_manager);
+	rtnet_dev->vers = RTDEV_VERS_2_0;
+
+	rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	memset(rtwlan_dev, 0x00, sizeof(*rtwlan_dev));
+
+	core = rtwlan_priv(rtwlan_dev);
+	memset(core, 0x00, sizeof(*core));
+
+	core->rtwlan_dev = rtwlan_dev;
+	core->handler = handler;
+	core->priv = (void *)core + sizeof(*core);
+	core->rtnet_dev = rtnet_dev;
+
+	/* Set configuration default values. */
+	rt2x00_init_config(core);
+
+	if (core->handler->dev_probe && core->handler->dev_probe(core, priv)) {
+		ERROR("device probe failed.\n");
+		goto exit;
+	}
+	INFO("Device " MAC_FMT " detected.\n", MAC_ARG(rtnet_dev->dev_addr));
+
+	rtwlan_dev->hard_start_xmit = rt2x00_start_xmit;
+
+	rtnet_dev->open = &rt2x00_open;
+	rtnet_dev->stop = &rt2x00_close;
+	rtnet_dev->do_ioctl = &rt2x00_ioctl;
+	rtnet_dev->hard_header = &rt_eth_header;
+
+	if ((err = rt_register_rtnetdev(rtnet_dev)) != 0) {
+		rtdev_free(rtnet_dev);
+		ERROR("rtnet_device registration failed.\n");
+		printk("err=%d\n", err);
+		goto exit_dev_remove;
+	}
+
+	set_bit(DEVICE_AWAKE, &core->flags);
+
+	return rtnet_dev;
+
+exit_dev_remove:
+	if (core->handler->dev_remove)
+		core->handler->dev_remove(core);
+
+exit:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(rt2x00_core_probe);
+
+void rt2x00_core_remove(struct rtnet_device *rtnet_dev)
+{
+	rt_unregister_rtnetdev(rtnet_dev);
+	rt_rtdev_disconnect(rtnet_dev);
+
+	rtdev_free(rtnet_dev);
+}
+EXPORT_SYMBOL_GPL(rt2x00_core_remove);
+
+/*
+ * RT2x00 core module information.
+ */
+static char version[] = DRV_NAME " - " DRV_VERSION;
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("RTnet rt2500 PCI WLAN driver (Core Module)");
+MODULE_LICENSE("GPL");
+
+static int __init rt2x00_core_init(void)
+{
+	printk(KERN_INFO "Loading module: %s\n", version);
+	return 0;
+}
+
+static void __exit rt2x00_core_exit(void)
+{
+	printk(KERN_INFO "Unloading module: %s\n", version);
+}
+
+module_init(rt2x00_core_init);
+module_exit(rt2x00_core_exit);
+++ linux-patched/drivers/xenomai/net/drivers/experimental/rt2500/Makefile	2022-03-21 12:58:29.902883959 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:3 @
--- linux/drivers/xenomai/net/drivers/experimental/Kconfig	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_RT2500) += rt_rt2x00core.o rt_rt2500pci.o
+
+rt_rt2x00core-y := rt2x00core.o
+rt_rt2500pci-y := rt2500pci.o
+++ linux-patched/drivers/xenomai/net/drivers/experimental/Kconfig	2022-03-21 12:58:29.897884008 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/3c59x.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_NET_EXP_DRIVERS
+    depends on XENO_DRIVERS_NET && PCI
+    bool "Experimental Drivers"
+
+if XENO_DRIVERS_NET_EXP_DRIVERS
+
+config XENO_DRIVERS_NET_DRV_3C59X
+    depends on PCI
+    tristate "3Com 59x"
+
+config XENO_DRIVERS_NET_DRV_E1000_NEW
+    depends on PCI
+    tristate "New Intel(R) PRO/1000 (Gigabit)"
+
+source "drivers/xenomai/net/drivers/experimental/rt2500/Kconfig"
+
+endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/3c59x.c	2022-03-21 12:58:29.891884066 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/Makefile	1970-01-01 01:00:00.000000000 +0100
+#warning  *********************************************************************
+#warning  This driver is probably not real-time safe! Under certain conditions
+#warning  it can cause interrupt locks of up to 1 second (issue_and_wait). We
+#warning  need a rewrite of critical parts, but we are lacking the knowledge
+#warning  about the hardware details (e.g. how long does a normal delay take =>
+#warning  apply this value and throw an error message on timeouts).
+#warning  *********************************************************************
+
+/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux / RTnet. */
+/*
+  RTnet porting 2002 by Mathias Koehrer (mathias_koehrer@yahoo.de)
+  -- Support only for PCI boards, EISA stuff ignored...
+
+  Originally written 1996-1999 by Donald Becker.
+
+  This software may be used and distributed according to the terms
+  of the GNU General Public License, incorporated herein by reference.
+
+  This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+  Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+  and the EtherLink XL 3c900 and 3c905 cards.
+
+  Problem reports and questions should be directed to
+  vortex@scyld.com
+
+  The author may be reached as becker@scyld.com, or C/O
+  Scyld Computing Corporation
+  410 Severn Ave., Suite 210
+  Annapolis MD 21403
+
+  Linux Kernel Additions:
+
+  0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
+  0.99H+lk1.0 - Jeff Garzik <jgarzik@mandrakesoft.com>
+  Remove compatibility defines for kernel versions < 2.2.x.
+  Update for new 2.3.x module interface
+  LK1.1.2 (March 19, 2000)
+  * New PCI interface (jgarzik)
+
+  LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
+  - Merged with 3c575_cb.c
+  - Don't set RxComplete in boomerang interrupt enable reg
+  - spinlock in vortex_timer to protect mdio functions
+  - disable local interrupts around call to vortex_interrupt in
+  vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
+  - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
+  - In vortex_start_xmit(), move the lock to _after_ we've altered
+  vp->cur_tx and vp->tx_full.  This defeats the race between
+  vortex_start_xmit() and vortex_interrupt which was identified
+  by Bogdan Costescu.
+  - Merged back support for six new cards from various sources
+  - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
+  insertion oops)
+  - Tell it that 3c905C has NWAY for 100bT autoneg
+  - Fix handling of SetStatusEnd in 'Too much work..' code, as
+  per 2.3.99's 3c575_cb (Dave Hinds).
+  - Split ISR into two for vortex & boomerang
+  - Fix MOD_INC/DEC races
+  - Handle resource allocation failures.
+  - Fix 3CCFE575CT LED polarity
+  - Make tx_interrupt_mitigation the default
+
+  LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
+  - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
+  - Put vortex_info_tbl into __devinitdata
+  - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
+  as in the hardware.
+  - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
+
+  LK1.1.5 28 April 2000, andrewm
+  - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
+  - Some extra diagnostics
+  - In vortex_error(), reset the Tx on maxCollisions.  Otherwise most
+  chips usually get a Tx timeout.
+  - Added extra_reset module parm
+  - Replaced some inline timer manip with mod_timer
+  (Franois romieu <Francois.Romieu@nic.fr>)
+  - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
+  (this came across from 3c575_cb).
+
+  LK1.1.6 06 Jun 2000, andrewm
+  - Backed out the PPC defines.
+  - Use del_timer_sync(), mod_timer().
+  - Fix wrapped ulong comparison in boomerang_rx()
+  - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
+  (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
+  - Replace union wn3_config with BFINS/BFEXT manipulation for
+  sparc64 (Pete Zaitcev, Peter Jones)
+  - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
+  do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
+  Donald Becker)
+  - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
+  - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
+
+  LK1.1.7 2 Jul 2000 andrewm
+  - Better handling of shared IRQs
+  - Reset the transmitter on a Tx reclaim error
+  - Fixed crash under OOM during vortex_open() (Mark Hemment)
+  - Fix Rx cessation problem during OOM (help from Mark Hemment)
+  - The spinlocks around the mdio access were blocking interrupts for 300uS.
+  Fix all this to use spin_lock_bh() within mdio_read/write
+  - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
+  have one.
+  - Added 802.3x MAC-layer flow control support
+
+  LK1.1.8 13 Aug 2000 andrewm
+  - Ignore request_region() return value - already reserved if Cardbus.
+  - Merged some additional Cardbus flags from Don's 0.99Qk
+  - Some fixes for 3c556 (Fred Maciel)
+  - Fix for EISA initialisation (Jan Rekorajski)
+  - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
+  - Fixed MII_XCVR_PWR for 3CCFE575CT
+  - Added INVERT_LED_PWR, used it.
+  - Backed out the extra_reset stuff
+
+  LK1.1.9 12 Sep 2000 andrewm
+  - Backed out the tx_reset_resume flags.  It was a no-op.
+  - In vortex_error, don't reset the Tx on txReclaim errors
+  - In vortex_error, don't reset the Tx on maxCollisions errors.
+  Hence backed out all the DownListPtr logic here.
+  - In vortex_error, give Tornado cards a partial TxReset on
+  maxCollisions (David Hinds).	Defined MAX_COLLISION_RESET for this.
+  - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
+  - Fixed a bug where, if vp->tx_full is set when the interface
+  is downed, it remains set when the interface is upped.  Bad
+  things happen.
+
+  LK1.1.10 17 Sep 2000 andrewm
+  - Added EEPROM_8BIT for 3c555 (Fred Maciel)
+  - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
+  - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
+
+  LK1.1.11 13 Nov 2000 andrewm
+  - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
+
+  LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
+  - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
+  - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
+  - Added extended issue_and_wait for the 3c905CX.
+  - Look for an MII on PHY index 24 first (3c905CX oddity).
+  - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
+  - Don't free skbs we don't own on oom path in vortex_open().
+
+  LK1.1.13 27 Jan 2001
+  - Added explicit `medialock' flag so we can truly
+  lock the media type down with `options'.
+  - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
+  - Added and used EEPROM_NORESET for 3c556B PM resumes.
+  - Fixed leakage of vp->rx_ring.
+  - Break out separate HAS_HWCKSM device capability flag.
+  - Kill vp->tx_full (ANK)
+  - Merge zerocopy fragment handling (ANK?)
+
+  LK1.1.14 15 Feb 2001
+  - Enable WOL.  Can be turned on with `enable_wol' module option.
+  - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
+  - If a device's internalconfig register reports it has NWAY,
+  use it, even if autoselect is enabled.
+
+  LK1.1.15 6 June 2001 akpm
+  - Prevent double counting of received bytes (Lars Christensen)
+  - Add ethtool support (jgarzik)
+  - Add module parm descriptions (Andrzej M. Krzysztofowicz)
+  - Implemented alloc_etherdev() API
+  - Special-case the 'Tx error 82' message.
+
+  LK1.1.16 18 July 2001 akpm
+  - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
+  - Lessen verbosity of bootup messages
+  - Fix WOL - use new PM API functions.
+  - Use netif_running() instead of vp->open in suspend/resume.
+  - Don't reset the interface logic on open/close/rmmod.  It upsets
+  autonegotiation, and hence DHCP (from 0.99T).
+  - Back out EEPROM_NORESET flag because of the above (we do it for all
+  NICs).
+  - Correct 3c982 identification string
+  - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
+  clash.
+
+  - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
+  - Also see Documentation/networking/vortex.txt
+*/
+
+/*
+ * FIXME: This driver _could_ support MTU changing, but doesn't.  See Don's hamachi.c implementation
+ * as well as other drivers
+ *
+ * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
+ * due to dead code elimination.  There will be some performance benefits from this due to
+ * elimination of all the tests and reduced cache footprint.
+ */
+
+
+#define DRV_NAME	"3c59x"
+#define DRV_VERSION	"LK1.1.16"
+#define DRV_RELDATE	"19 July 2001"
+
+
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE	16
+#define RX_RING_SIZE	8 /*** RTnet ***/
+#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1512 effectively disables this feature. */
+/*** RTnet ***/
+/*** RTnet ***/
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 32;
+/* Tx timeout interval (millisecs) */
+// *** RTnet ***
+//static int watchdog = 5000;
+// *** RTnet ***
+
+/* Allow aggregation of Tx interrupts.	Saves CPU load at the cost
+ * of possible Tx stalls if the system is blocking interrupts
+ * somewhere else.  Undefine this to disable.
+ */
+#define tx_interrupt_mitigation 1
+
+/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
+#define vortex_debug debug
+#ifdef VORTEX_DEBUG
+static int vortex_debug = VORTEX_DEBUG;
+#else
+static int vortex_debug = 1;
+#endif
+
+#ifndef __OPTIMIZE__
+#error You must compile this file with the correct options!
+#error See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>			/* For NR_IRQS only. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+// *** RTnet ***
+#include <rtnet_port.h>
+
+static int cards = INT_MAX;
+module_param(cards, int, 0444);
+MODULE_PARM_DESC(cards, "number of cards to be supported");
+// *** RTnet ***
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
+   This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+#include <linux/delay.h>
+
+// *** RTnet - no power management ***
+#undef pci_set_power_state
+#define pci_set_power_state null_set_power_state
+static inline int null_set_power_state(struct pci_dev *dev, int state)
+{
+	return 0;
+}
+// *** RTnet ***
+
+
+static char version[] =
+	DRV_NAME " for RTnet : Donald Becker and others. www.scyld.com/network/vortex.html\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver for RTnet "
+		DRV_VERSION " " DRV_RELDATE);
+MODULE_LICENSE("GPL");
+
+/* Operational parameter that usually are not changed. */
+
+/* The Vortex size is twice that of the original EtherLinkIII series: the
+   runtime register window, window 1, is now always mapped in.
+   The Boomerang size is twice as large as the Vortex -- it has additional
+   bus master control registers. */
+#define VORTEX_TOTAL_SIZE 0x20
+#define BOOMERANG_TOTAL_SIZE 0x40
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+   This only set with the original DP83840 on older 3c905 boards, so the extra
+   code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required;
+
+#define PFX DRV_NAME ": "
+
+
+
+/*
+  Theory of Operation
+
+  I. Board Compatibility
+
+  This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+  XL, 3Com's PCI to 10/100baseT adapters.  It also works with the 10Mbs
+  versions of the FastEtherLink cards.	The supported product IDs are
+  3c590, 3c592, 3c595, 3c597, 3c900, 3c905
+
+  The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+  with the kernel source or available from
+  cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
+
+  II. Board-specific settings
+
+  PCI bus devices are configured by the system at boot time, so no jumpers
+  need to be set on the board.	The system BIOS should be set to assign the
+  PCI INTA signal to an otherwise unused system IRQ line.
+
+  The EEPROM settings for media type and forced-full-duplex are observed.
+  The EEPROM media type should be left at the default "autoselect" unless using
+  10base2 or AUI connections which cannot be reliably detected.
+
+  III. Driver operation
+
+  The 3c59x series use an interface that's very similar to the previous 3c5x9
+  series.  The primary interface is two programmed-I/O FIFOs, with an
+  alternate single-contiguous-region bus-master transfer (see next).
+
+  The 3c900 "Boomerang" series uses a full-bus-master interface with separate
+  lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+  DEC Tulip and Intel Speedo3.	The first chip version retains a compatible
+  programmed-I/O interface that has been removed in 'B' and subsequent board
+  revisions.
+
+  One extension that is advertised in a very large font is that the adapters
+  are capable of being bus masters.  On the Vortex chip this capability was
+  only for a single contiguous region making it far less useful than the full
+  bus master capability.  There is a significant performance impact of taking
+  an extra interrupt or polling for the completion of each transfer, as well
+  as difficulty sharing the single transfer engine between the transmit and
+  receive threads.  Using DMA transfers is a win only with large blocks or
+  with the flawed versions of the Intel Orion motherboard PCI controller.
+
+  The Boomerang chip's full-bus-master interface is useful, and has the
+  currently-unused advantages over other similar chips that queued transmit
+  packets may be reordered and receive buffer groups are associated with a
+  single frame.
+
+  With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
+  Rather than a fixed intermediate receive buffer, this scheme allocates
+  full-sized skbuffs as receive buffers.  The value RX_COPYBREAK is used as
+  the copying breakpoint: it is chosen to trade-off the memory wasted by
+  passing the full-sized skbuff to the queue layer for all frames vs. the
+  copying cost of copying a frame to a correctly-sized skbuff.
+
+  IIIC. Synchronization
+  The driver runs as two independent, single-threaded flows of control.  One
+  is the send-packet routine, which enforces single-threaded use by the
+  dev->tbusy flag.  The other thread is the interrupt handler, which is single
+  threaded by the hardware and other software.
+
+  IV. Notes
+
+  Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
+  3c590, 3c595, and 3c900 boards.
+  The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+  the EISA version is called "Demon".  According to Terry these names come
+  from rides at the local amusement park.
+
+  The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
+  This driver only supports ethernet packets because of the skbuff allocation
+  limit of 4K.
+*/
+
+/* This table drives the PCI probe routines.  It's mostly boilerplate in all
+   of the drivers, and will likely be provided by some future kernel.
+*/
+enum pci_flags_bit {
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+enum {	IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
+	EEPROM_8BIT=0x10,	/* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
+	HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
+	INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
+	EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000 };
+
+enum vortex_chips {
+	CH_3C590 = 0,
+	CH_3C592,
+	CH_3C597,
+	CH_3C595_1,
+	CH_3C595_2,
+
+	CH_3C595_3,
+	CH_3C900_1,
+	CH_3C900_2,
+	CH_3C900_3,
+	CH_3C900_4,
+
+	CH_3C900_5,
+	CH_3C900B_FL,
+	CH_3C905_1,
+	CH_3C905_2,
+	CH_3C905B_1,
+
+	CH_3C905B_2,
+	CH_3C905B_FX,
+	CH_3C905C,
+	CH_3C980,
+	CH_3C9805,
+
+	CH_3CSOHO100_TX,
+	CH_3C555,
+	CH_3C556,
+	CH_3C556B,
+	CH_3C575,
+
+	CH_3C575_1,
+	CH_3CCFE575,
+	CH_3CCFE575CT,
+	CH_3CCFE656,
+	CH_3CCFEM656,
+
+	CH_3CCFEM656_1,
+	CH_3C450,
+};
+
+
+/* note: this array directly indexed by above enums, and MUST
+ * be kept in sync with both the enums above, and the PCI device
+ * table below
+ */
+static struct vortex_chip_info {
+	const char *name;
+	int flags;
+	int drv_flags;
+	int io_size;
+} vortex_info_tbl[] = {
+#define EISA_TBL_OFFSET	0		/* Offset of this entry for vortex_eisa_init */
+	{"3c590 Vortex 10Mbps",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c592 EISA 10Mbps Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c597 EISA Fast Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c595 Vortex 100baseTx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c595 Vortex 100baseT4",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+
+	{"3c595 Vortex 100base-MII",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c900 Boomerang 10baseT",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
+	{"3c900 Boomerang 10Mbps Combo",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
+	{"3c900 Cyclone 10Mbps TPO",						/* AKPM: from Don's 0.99M */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c900 Cyclone 10Mbps Combo",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+	{"3c900 Cyclone 10Mbps TPC",						/* AKPM: from Don's 0.99M */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c900B-FL Cyclone 10base-FL",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c905 Boomerang 100baseTx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
+	{"3c905 Boomerang 100baseT4",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
+	{"3c905B Cyclone 100baseTx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+
+	{"3c905B Cyclone 10/100/BNC",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c905B-FX Cyclone 100baseFx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c905C Tornado",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c980 Cyclone",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c982 Dual Port Server Cyclone",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+	{"3cSOHO100-TX Hurricane",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c555 Laptop Hurricane",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
+	{"3c556 Laptop Tornado",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
+	 HAS_HWCKSM, 128, },
+	{"3c556B Laptop Hurricane",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
+	 HAS_HWCKSM, 128, },
+	{"3c575 [Megahertz] 10/100 LAN	CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+
+	{"3c575 Boomerang CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+	{"3CCFE575BT Cyclone CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
+	 INVERT_LED_PWR|HAS_HWCKSM, 128, },
+	{"3CCFE575CT Tornado CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+	{"3CCFE656 Cyclone CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 INVERT_LED_PWR|HAS_HWCKSM, 128, },
+	{"3CCFEM656B Cyclone+Winmodem CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 INVERT_LED_PWR|HAS_HWCKSM, 128, },
+
+	{"3CXFEM656C Tornado+Winmodem CardBus",			/* From pcmcia-cs-3.1.5 */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+	{"3c450 HomePNA Tornado",						/* AKPM: from Don's 0.99Q */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+	{0,}, /* 0 terminated list. */
+};
+
+
+static struct pci_device_id vortex_pci_tbl[] = {
+	{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
+	{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
+	{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
+	{ 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
+	{ 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
+
+	{ 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
+	{ 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
+	{ 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
+	{ 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
+	{ 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
+
+	{ 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
+	{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
+	{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
+	{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
+	{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
+
+	{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
+	{ 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
+	{ 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
+	{ 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
+	{ 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
+
+	{ 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
+	{ 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
+	{ 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
+	{ 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
+	{ 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
+
+	{ 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
+	{ 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
+	{ 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
+	{ 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
+	{ 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
+
+	{ 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
+	{ 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
+	{0,}						/* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
+
+
+/* Operational definitions.
+   These are not used by other compilation units and thus are not
+   exported in a ".h" file.
+
+   First the windows.  There are eight register windows, with the command
+   and status registers available in each.
+*/
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+   11 bits are the parameter, if applicable.
+   Note that 11 parameters bits was fine for ethernet, but the new chip
+   can handle FDDI length frames (~4500 octets) and now parameters count
+   32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+	TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+	RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+	UpStall = 6<<11, UpUnstall = (6<<11)+1,
+	DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+	RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+	FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+	SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+	SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+	StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+	StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+	RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+	IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
+	TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+	IntReq = 0x0040, StatsFull = 0x0080,
+	DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+	DMAInProgress = 1<<11,			/* DMA controller is still busy.*/
+	CmdInProgress = 1<<12,			/* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+   On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+	TX_FIFO = 0x10,  RX_FIFO = 0x10,  RxErrors = 0x14,
+	RxStatus = 0x18,  Timer=0x1A, TxStatus = 0x1B,
+	TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+	Wn0EepromCmd = 10,		/* Window 0: EEPROM command register. */
+	Wn0EepromData = 12,		/* Window 0: EEPROM results register. */
+	IntrStatus=0x0E,		/* Valid in all windows. */
+};
+enum Win0_EEPROM_bits {
+	EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+	EEPROM_EWENB = 0x30,		/* Enable erasing/writing for 10 msec. */
+	EEPROM_EWDIS = 0x00,		/* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+	PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+	EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+	NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+	DriverTune=13, Checksum=15};
+
+enum Window2 {			/* Window 2. */
+	Wn2_ResetOptions=12,
+};
+enum Window3 {			/* Window 3: MAC/config bits. */
+	Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+
+#define BFEXT(value, offset, bitcount)					\
+	((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
+
+#define BFINS(lhs, rhs, offset, bitcount)				\
+	(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) |		\
+		(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
+
+#define RAM_SIZE(v)		BFEXT(v, 0, 3)
+#define RAM_WIDTH(v)	BFEXT(v, 3, 1)
+#define RAM_SPEED(v)	BFEXT(v, 4, 2)
+#define ROM_SIZE(v)		BFEXT(v, 6, 2)
+#define RAM_SPLIT(v)	BFEXT(v, 16, 2)
+#define XCVR(v)			BFEXT(v, 20, 4)
+#define AUTOSELECT(v)	BFEXT(v, 24, 1)
+
+enum Window4 {		/* Window 4: Xcvr/media bits. */
+	Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+enum Win4_Media_bits {
+	Media_SQE = 0x0008,		/* Enable SQE error counting for AUI. */
+	Media_10TP = 0x00C0,	/* Enable link beat and jabber for 10baseT. */
+	Media_Lnk = 0x0080,		/* Enable just link beat for 100TX/100FX. */
+	Media_LnkBeat = 0x0800,
+};
+enum Window7 {					/* Window 7: Bus Master control. */
+	Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+/* Boomerang bus master control registers. */
+enum MasterCtrl {
+	PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+	TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
+};
+
+/* The Rx and Tx descriptor lists.
+   Caution Alpha hackers: these types are 32 bits!  Note also the 8 byte
+   alignment contraint on tx_ring[] and rx_ring[]. */
+#define LAST_FRAG	0x80000000			/* Last Addr/Len pair in descriptor. */
+#define DN_COMPLETE	0x00010000			/* This packet has been downloaded */
+struct boom_rx_desc {
+	u32 next;					/* Last entry points to 0.   */
+	s32 status;
+	u32 addr;					/* Up to 63 addr/len pairs possible. */
+	s32 length;					/* Set LAST_FRAG to indicate last pair. */
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+	RxDComplete=0x00008000, RxDError=0x4000,
+	/* See boomerang_rx() for actual error bits */
+	IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
+	IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
+};
+
+// *** RTnet ***
+//#ifdef MAX_SKB_FRAGS
+//#define DO_ZEROCOPY 1
+//#else
+#define DO_ZEROCOPY 0
+//#endif
+
+struct boom_tx_desc {
+	u32 next;					/* Last entry points to 0.   */
+	s32 status;					/* bits 0:12 length, others see below.	*/
+#if DO_ZEROCOPY
+	struct {
+		u32 addr;
+		s32 length;
+	} frag[1+MAX_SKB_FRAGS];
+#else
+	u32 addr;
+	s32 length;
+#endif
+};
+
+/* Values for the Tx status entry. */
+enum tx_desc_status {
+	CRCDisable=0x2000, TxDComplete=0x8000,
+	AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
+	TxIntrUploaded=0x80000000,		/* IRQ when in FIFO, but maybe not sent. */
+};
+
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
+
+struct vortex_private {
+	/* The Rx and Tx rings should be quad-word-aligned. */
+	struct boom_rx_desc* rx_ring;
+	struct boom_tx_desc* tx_ring;
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_ring_dma;
+	/* The addresses of transmit- and receive-in-place skbuffs. */
+
+	// *** RTnet ***
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	// *** RTnet ***
+
+	struct rtnet_device *next_module;		/* NULL if PCI device */
+	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
+	unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */
+	struct net_device_stats stats;
+	struct rtskb *tx_skb;				/* Packet being eaten by bus master ctrl.  */
+	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */
+
+	/* PCI configuration space information. */
+	struct pci_dev *pdev;
+	char *cb_fn_base;					/* CardBus function status addr space. */
+
+	/* Some values here only for performance evaluation and path-coverage */
+	int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
+	int card_idx;
+
+	/* The remainder are related to chip state, mostly media selection. */
+	struct timer_list timer;			/* Media selection timer. */
+	struct timer_list rx_oom_timer;		/* Rx skb allocation retry timer */
+	int options;						/* User-settable misc. driver options. */
+	unsigned int media_override:4,		/* Passed-in media type. */
+		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
+		full_duplex:1, force_fd:1, autoselect:1,
+		bus_master:1,					/* Vortex can only do a fragment bus-m. */
+		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
+		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
+		partner_flow_ctrl:1,			/* Partner supports flow control */
+		has_nway:1,
+		enable_wol:1,					/* Wake-on-LAN is enabled */
+		pm_state_valid:1,				/* power_state[] has sane contents */
+		open:1,
+		medialock:1,
+		must_free_region:1;				/* Flag: if zero, Cardbus owns the I/O region */
+	int drv_flags;
+	u16 status_enable;
+	u16 intr_enable;
+	u16 available_media;				/* From Wn3_Options. */
+	u16 capabilities, info1, info2;		/* Various, from EEPROM. */
+	u16 advertising;					/* NWay media advertisement */
+	unsigned char phys[2];				/* MII device addresses. */
+	u16 deferred;						/* Resend these interrupts when we
+								 * bale from the ISR */
+	u16 io_size;						/* Size of PCI region (for release_region) */
+	rtdm_lock_t lock;					/* Serialise access to device & its vortex_private */
+	spinlock_t mdio_lock;				/* Serialise access to mdio hardware */
+	u32 power_state[16];
+	rtdm_irq_t irq_handle;
+};
+
+/* The action to take with a media selection timer tick.
+   Note that we deviate from the 3Com order by checking 10base2 before AUI.
+*/
+enum xcvr_types {
+	XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+	XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
+};
+
+static struct media_table {
+	char *name;
+	unsigned int media_bits:16,		/* Bits to set in Wn4_Media register. */
+		mask:8,						/* The transceiver-present bit in Wn3_Config.*/
+		next:8;						/* The media type to try next. */
+	int wait;						/* Time before we check media status. */
+} media_tbl[] = {
+	{	"10baseT",   Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+	{ "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+	{ "undefined", 0,			0x80, XCVR_10baseT, 10000},
+	{ "10base2",   0,			0x10, XCVR_AUI,		(1*HZ)/10},
+	{ "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+	{ "100baseFX", Media_Lnk, 0x04, XCVR_MII,		(14*HZ)/10},
+	{ "MII",               0,			0x41, XCVR_10baseT, 3*HZ },
+	{ "undefined", 0,			0x01, XCVR_10baseT, 10000},
+	{ "Autonegotiate", 0,		0x41, XCVR_10baseT, 3*HZ},
+	{ "MII-External",      0,		0x41, XCVR_10baseT, 3*HZ },
+	{ "Default",   0,			0xFF, XCVR_10baseT, 10000},
+};
+
+static int vortex_probe1(struct pci_dev *pdev, long ioaddr, int irq,
+			int chip_idx, int card_idx);
+static void vortex_up(struct rtnet_device *rtdev);
+static void vortex_down(struct rtnet_device *rtdev);
+static int vortex_open(struct rtnet_device *rtdev);
+static void mdio_sync(long ioaddr, int bits);
+static int mdio_read(struct rtnet_device *rtdev, int phy_id, int location);
+static void mdio_write(struct rtnet_device *vp, int phy_id, int location, int value);
+
+// *** RTnet ***
+//static void vortex_timer(unsigned long arg);
+//static void rx_oom_timer(unsigned long arg);
+// *** RTnet ***
+
+static int vortex_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int boomerang_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int vortex_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int boomerang_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int vortex_interrupt(rtdm_irq_t *irq_handle);
+static int boomerang_interrupt(rtdm_irq_t *irq_handle);
+static int vortex_close(struct rtnet_device *rtdev);
+static void dump_tx_ring(struct rtnet_device *rtdev);
+
+static void update_stats(long ioaddr, struct rtnet_device *dev);
+static struct net_device_stats *vortex_get_stats(struct rtnet_device *rtdev);
+
+static void set_rx_mode(struct rtnet_device *rtdev);
+
+// *** RTnet ***
+//static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+//static void vortex_tx_timeout(struct net_device *dev);
+// *** RTnet ***
+
+static void acpi_set_WOL(struct rtnet_device *rtdev);
+
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Option count limit only -- unlimited interfaces are supported. */
+#define MAX_UNITS 8
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+module_param(debug, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+module_param_array(hw_checksums, int, NULL, 0444);
+module_param_array(flow_ctrl, int, NULL, 0444);
+module_param_array(enable_wol, int, NULL, 0444);
+/*** RTnet ***
+     MODULE_PARM(rx_copybreak, "i");
+     *** RTnet ***/
+module_param(max_interrupt_work, int, 0444);
+/*** RTnet ***
+     MODULE_PARM(compaq_ioaddr, "i");
+     MODULE_PARM(compaq_irq, "i");
+     MODULE_PARM(compaq_device_id, "i");
+     MODULE_PARM(watchdog, "i");
+     *** RTnet ***/
+MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
+MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
+MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
+MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
+MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
+MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
+/*** RTnet ***
+     MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
+     *** RTnet ***/
+MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
+/*** RTnet ***
+     MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
+     MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
+     MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
+     MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
+     *** RTnet ***/
+
+/* #define dev_alloc_skb dev_alloc_skb_debug */
+
+/* A list of all installed Vortex EISA devices, for removing the driver module. */
+static struct rtnet_device *root_vortex_eisa_dev;
+
+/* Variables to work-around the Compaq PCI BIOS32 problem. */
+// *** RTnet ***
+//static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
+// *** RTnet ***
+
+static int vortex_cards_found;
+
+#ifdef CONFIG_PM
+
+#endif /* CONFIG_PM */
+
+/* returns count found (>= 0), or negative on error */
+
+/* returns count (>= 0), or negative on error */
+static int vortex_init_one (struct pci_dev *pdev,
+			const struct pci_device_id *ent)
+{
+	int rc;
+
+	if( vortex_cards_found >= cards )
+		return -ENODEV;
+
+	/* wake up and enable device */
+	if (pci_enable_device (pdev)) {
+		rc = -EIO;
+	} else {
+		rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
+				ent->driver_data, vortex_cards_found);
+		if (rc == 0)
+			vortex_cards_found++;
+	}
+	return rc;
+}
+
+/*
+ * Start up the PCI device which is described by *pdev.
+ * Return 0 on success.
+ *
+ * NOTE: pdev can be NULL, for the case of an EISA driver
+ */
+static int vortex_probe1(struct pci_dev *pdev,
+			long ioaddr, int irq,
+			int chip_idx, int card_idx)
+{
+	// *** RTnet ***
+	struct rtnet_device *rtdev = NULL;
+	// *** RTnet ***
+
+	struct vortex_private *vp;
+	int option;
+	unsigned int eeprom[0x40], checksum = 0;		/* EEPROM contents */
+	int i, step;
+	static int printed_version;
+	int retval, print_info;
+	struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
+	const char *print_name;
+
+
+
+	if (!printed_version) {
+		printk (version);
+		printed_version = 1;
+	}
+
+	print_name = pdev ? pci_name(pdev) : "3c59x";
+
+	// *** RTnet ***
+	rtdev = rt_alloc_etherdev(sizeof(*vp), RX_RING_SIZE * 2 + TX_RING_SIZE);
+	retval = -ENOMEM;
+	if (!rtdev) {
+		printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
+		goto out;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	memset(rtdev->priv, 0, sizeof(*vp));
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	// *** RTnet ***
+
+	vp = rtdev->priv;
+
+	/* The lower four bits are the media type. */
+	if (rtdev->mem_start) {
+		/*
+		 * The 'options' param is passed in as the third arg to the
+		 * LILO 'ether=' argument for non-modular use
+		 */
+		option = rtdev->mem_start;
+	}
+	else if (card_idx < MAX_UNITS)
+		option = options[card_idx];
+	else
+		option = -1;
+
+	if (option > 0) {
+		if (option & 0x8000)
+			vortex_debug = 7;
+		if (option & 0x4000)
+			vortex_debug = 2;
+		if (option & 0x0400)
+			vp->enable_wol = 1;
+	}
+
+	print_info = (vortex_debug > 1);
+	if (print_info)
+		printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
+
+	printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n",
+		print_name,
+		pdev ? "PCI" : "EISA",
+		vci->name,
+		ioaddr);
+
+	rtdev->base_addr = ioaddr;
+	rtdev->irq = irq;
+	rtdev->mtu = mtu;
+	vp->drv_flags = vci->drv_flags;
+	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
+	vp->io_size = vci->io_size;
+	vp->card_idx = card_idx;
+
+	/* module list only for EISA devices */
+	if (pdev == NULL) {
+		vp->next_module = root_vortex_eisa_dev;
+		root_vortex_eisa_dev = rtdev;
+	}
+
+	/* PCI-only startup logic */
+	if (pdev) {
+		/* EISA resources already marked, so only PCI needs to do this here */
+		/* Ignore return value, because Cardbus drivers already allocate for us */
+		if (!request_region(ioaddr, vci->io_size, print_name))
+			printk(KERN_INFO "rt_3c50x: request region failed\n");
+		else
+			vp->must_free_region = 1;
+
+		/* enable bus-mastering if necessary */
+		if (vci->flags & PCI_USES_MASTER)
+			pci_set_master (pdev);
+
+		if (vci->drv_flags & IS_VORTEX) {
+			u8 pci_latency;
+			u8 new_latency = 248;
+
+			/* Check the PCI latency value.  On the 3c590 series the latency timer
+			   must be set to the maximum value to avoid data corruption that occurs
+			   when the timer expires during a transfer.  This bug exists the Vortex
+			   chip only. */
+			pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+			if (pci_latency < new_latency) {
+				printk(KERN_INFO "%s: Overriding PCI latency"
+					" timer (CFLT) setting of %d, new value is %d.\n",
+					print_name, pci_latency, new_latency);
+				pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+			}
+		}
+	}
+
+	rtdm_lock_init(&vp->lock);
+	spin_lock_init(&vp->mdio_lock);
+	vp->pdev = pdev;
+
+	/* Makes sure rings are at least 16 byte aligned. */
+	vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+					+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+					&vp->rx_ring_dma);
+	retval = -ENOMEM;
+	if (vp->rx_ring == 0)
+		goto free_region;
+
+	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+	vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+
+	/* if we are a PCI driver, we store info in pdev->driver_data
+	 * instead of a module list */
+	if (pdev)
+		pci_set_drvdata(pdev, rtdev);
+
+	vp->media_override = 7;
+	if (option >= 0) {
+		vp->media_override = ((option & 7) == 2)  ?  0	:  option & 15;
+		if (vp->media_override != 7)
+			vp->medialock = 1;
+		vp->full_duplex = (option & 0x200) ? 1 : 0;
+		vp->bus_master = (option & 16) ? 1 : 0;
+	}
+
+	if (card_idx < MAX_UNITS) {
+		if (full_duplex[card_idx] > 0)
+			vp->full_duplex = 1;
+		if (flow_ctrl[card_idx] > 0)
+			vp->flow_ctrl = 1;
+		if (enable_wol[card_idx] > 0)
+			vp->enable_wol = 1;
+	}
+
+	vp->force_fd = vp->full_duplex;
+	vp->options = option;
+
+	/* Read the station address from the EEPROM. */
+	EL3WINDOW(0);
+	{
+		int base;
+
+		if (vci->drv_flags & EEPROM_8BIT)
+			base = 0x230;
+		else if (vci->drv_flags & EEPROM_OFFSET)
+			base = EEPROM_Read + 0x30;
+		else
+			base = EEPROM_Read;
+
+		for (i = 0; i < 0x40; i++) {
+			int timer;
+			outw(base + i, ioaddr + Wn0EepromCmd);
+			/* Pause for at least 162 us. for the read to take place. */
+			for (timer = 10; timer >= 0; timer--) {
+				udelay(162);
+				if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+					break;
+			}
+			eeprom[i] = inw(ioaddr + Wn0EepromData);
+		}
+	}
+	for (i = 0; i < 0x18; i++)
+		checksum ^= eeprom[i];
+	checksum = (checksum ^ (checksum >> 8)) & 0xff;
+	if (checksum != 0x00) {		/* Grrr, needless incompatible change 3Com. */
+		while (i < 0x21)
+			checksum ^= eeprom[i++];
+		checksum = (checksum ^ (checksum >> 8)) & 0xff;
+	}
+	if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
+		printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+
+	for (i = 0; i < 3; i++)
+		((u16 *)rtdev->dev_addr)[i] = htons(eeprom[i + 10]);
+	if (print_info) {
+		for (i = 0; i < 6; i++)
+			printk("%c%2.2x", i ? ':' : ' ', rtdev->dev_addr[i]);
+	}
+	EL3WINDOW(2);
+	for (i = 0; i < 6; i++)
+		outb(rtdev->dev_addr[i], ioaddr + i);
+
+#ifdef __sparc__
+	if (print_info)
+		printk(", IRQ %s\n", __irq_itoa(rtdev->irq));
+#else
+	if (print_info)
+		printk(", IRQ %d\n", rtdev->irq);
+	/* Tell them about an invalid IRQ. */
+	if (rtdev->irq <= 0 || rtdev->irq >= NR_IRQS)
+		printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+			rtdev->irq);
+#endif
+
+	EL3WINDOW(4);
+	step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+	if (print_info) {
+		printk(KERN_INFO "  product code %02x%02x rev %02x.%d date %02d-"
+			"%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
+			step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
+	}
+
+
+	if (pdev && vci->drv_flags & HAS_CB_FNS) {
+		unsigned long fn_st_addr;			/* Cardbus function status space */
+		unsigned short n;
+
+		fn_st_addr = pci_resource_start (pdev, 2);
+		if (fn_st_addr) {
+			vp->cb_fn_base = ioremap(fn_st_addr, 128);
+			retval = -ENOMEM;
+			if (!vp->cb_fn_base)
+				goto free_ring;
+		}
+		if (print_info) {
+			printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
+				print_name, fn_st_addr, vp->cb_fn_base);
+		}
+		EL3WINDOW(2);
+
+		n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+		if (vp->drv_flags & INVERT_LED_PWR)
+			n |= 0x10;
+		if (vp->drv_flags & INVERT_MII_PWR)
+			n |= 0x4000;
+		outw(n, ioaddr + Wn2_ResetOptions);
+	}
+
+	/* Extract our information from the EEPROM data. */
+	vp->info1 = eeprom[13];
+	vp->info2 = eeprom[15];
+	vp->capabilities = eeprom[16];
+
+	if (vp->info1 & 0x8000) {
+		vp->full_duplex = 1;
+		if (print_info)
+			printk(KERN_INFO "Full duplex capable\n");
+	}
+
+	{
+		static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+		unsigned int config;
+		EL3WINDOW(3);
+		vp->available_media = inw(ioaddr + Wn3_Options);
+		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
+			vp->available_media = 0x40;
+		config = inl(ioaddr + Wn3_Config);
+		if (print_info) {
+			printk(KERN_DEBUG "  Internal config register is %4.4x, "
+				"transceivers %#x.\n", config, inw(ioaddr + Wn3_Options));
+			printk(KERN_INFO "  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+				8 << RAM_SIZE(config),
+				RAM_WIDTH(config) ? "word" : "byte",
+				ram_split[RAM_SPLIT(config)],
+				AUTOSELECT(config) ? "autoselect/" : "",
+				XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
+				media_tbl[XCVR(config)].name);
+		}
+		vp->default_media = XCVR(config);
+		if (vp->default_media == XCVR_NWAY)
+			vp->has_nway = 1;
+		vp->autoselect = AUTOSELECT(config);
+	}
+
+	if (vp->media_override != 7) {
+		printk(KERN_INFO "%s:  Media override to transceiver type %d (%s).\n",
+			print_name, vp->media_override,
+			media_tbl[vp->media_override].name);
+		rtdev->if_port = vp->media_override;
+	} else
+		rtdev->if_port = vp->default_media;
+
+	if (rtdev->if_port == XCVR_MII || rtdev->if_port == XCVR_NWAY) {
+		int phy, phy_idx = 0;
+		EL3WINDOW(4);
+		mii_preamble_required++;
+		mii_preamble_required++;
+		mdio_read(rtdev, 24, 1);
+		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
+			int mii_status, phyx;
+
+			/*
+			 * For the 3c905CX we look at index 24 first, because it bogusly
+			 * reports an external PHY at all indices
+			 */
+			if (phy == 0)
+				phyx = 24;
+			else if (phy <= 24)
+				phyx = phy - 1;
+			else
+				phyx = phy;
+			mii_status = mdio_read(rtdev, phyx, 1);
+			if (mii_status	&&  mii_status != 0xffff) {
+				vp->phys[phy_idx++] = phyx;
+				if (print_info) {
+					printk(KERN_INFO "  MII transceiver found at address %d,"
+						" status %4x.\n", phyx, mii_status);
+				}
+				if ((mii_status & 0x0040) == 0)
+					mii_preamble_required++;
+			}
+		}
+		mii_preamble_required--;
+		if (phy_idx == 0) {
+			printk(KERN_WARNING"  ***WARNING*** No MII transceivers found!\n");
+			vp->phys[0] = 24;
+		} else {
+			vp->advertising = mdio_read(rtdev, vp->phys[0], 4);
+			if (vp->full_duplex) {
+				/* Only advertise the FD media types. */
+				vp->advertising &= ~0x02A0;
+				mdio_write(rtdev, vp->phys[0], 4, vp->advertising);
+			}
+		}
+	}
+
+	if (vp->capabilities & CapBusMaster) {
+		vp->full_bus_master_tx = 1;
+		if (print_info) {
+			printk(KERN_INFO "  Enabling bus-master transmits and %s receives.\n",
+				(vp->info2 & 1) ? "early" : "whole-frame" );
+		}
+		vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+		vp->bus_master = 0;		/* AKPM: vortex only */
+	}
+
+	// *** RTnet ***
+	/* The 3c59x-specific entries in the device structure. */
+	rtdev->open = vortex_open;
+	if (vp->full_bus_master_tx) {
+		rtdev->hard_start_xmit = boomerang_start_xmit;
+		/* Actually, it still should work with iommu. */
+		rtdev->features |= NETIF_F_SG;
+		if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
+			(hw_checksums[card_idx] == 1)) {
+			rtdev->features |= NETIF_F_IP_CSUM;
+		}
+	} else {
+		rtdev->hard_start_xmit = vortex_start_xmit;
+	}
+	rtdev->get_stats = vortex_get_stats;
+
+	if (print_info) {
+		printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
+			print_name,
+			(rtdev->features & NETIF_F_SG) ? "en":"dis",
+			(rtdev->features & NETIF_F_IP_CSUM) ? "en":"dis");
+	}
+
+	rtdev->stop = vortex_close;
+	retval = rt_register_rtnetdev(rtdev);
+	if (retval) {
+		printk(KERN_ERR "rt_3c59x: rtnet device registration failed %d\n",retval);
+		goto free_ring;
+	}
+	return 0;
+
+	// *** RTnet ***
+
+  free_ring:
+	pci_free_consistent(pdev,
+			sizeof(struct boom_rx_desc) * RX_RING_SIZE
+			+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+			vp->rx_ring,
+			vp->rx_ring_dma);
+  free_region:
+	if (vp->must_free_region)
+		release_region(ioaddr, vci->io_size);
+	rtdev_free (rtdev);
+	printk(KERN_ERR PFX "vortex_probe1 fails.  Returns %d\n", retval);
+  out:
+	return retval;
+}
+
+static void
+issue_and_wait(struct rtnet_device *rtdev, int cmd)
+{
+	int i;
+
+	outw(cmd, rtdev->base_addr + EL3_CMD);
+	for (i = 0; i < 2000; i++) {
+		if (!(inw(rtdev->base_addr + EL3_STATUS) & CmdInProgress))
+			return;
+	}
+
+	/* OK, that didn't work.  Do it the slow way.  One second */
+	for (i = 0; i < 100000; i++) {
+		if (!(inw(rtdev->base_addr + EL3_STATUS) & CmdInProgress)) {
+			if (vortex_debug > 1)
+				rtdm_printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
+					rtdev->name, cmd, i * 10);
+			return;
+		}
+		udelay(10);
+	}
+	rtdm_printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
+		rtdev->name, cmd, inw(rtdev->base_addr + EL3_STATUS));
+}
+
+static void
+vortex_up(struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	unsigned int config;
+	int i;
+
+	if (vp->pdev && vp->enable_wol) {
+		pci_set_power_state(vp->pdev, 0);	/* Go active */
+		pci_restore_state(vp->pdev, vp->power_state);
+	}
+
+	/* Before initializing select the active media port. */
+	EL3WINDOW(3);
+	config = inl(ioaddr + Wn3_Config);
+
+	if (vp->media_override != 7) {
+		printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+			rtdev->name, vp->media_override,
+			media_tbl[vp->media_override].name);
+		rtdev->if_port = vp->media_override;
+	} else if (vp->autoselect) {
+		if (vp->has_nway) {
+			if (vortex_debug > 1)
+				printk(KERN_INFO "%s: using NWAY device table, not %d\n",
+					rtdev->name, rtdev->if_port);
+			rtdev->if_port = XCVR_NWAY;
+		} else {
+			/* Find first available media type, starting with 100baseTx. */
+			rtdev->if_port = XCVR_100baseTx;
+			while (! (vp->available_media & media_tbl[rtdev->if_port].mask))
+				rtdev->if_port = media_tbl[rtdev->if_port].next;
+			if (vortex_debug > 1)
+				printk(KERN_INFO "%s: first available media type: %s\n",
+					rtdev->name, media_tbl[rtdev->if_port].name);
+		}
+	} else {
+		rtdev->if_port = vp->default_media;
+		if (vortex_debug > 1)
+			printk(KERN_INFO "%s: using default media %s\n",
+				rtdev->name, media_tbl[rtdev->if_port].name);
+	}
+
+	init_timer(&vp->timer);
+	vp->timer.expires = RUN_AT(media_tbl[rtdev->if_port].wait);
+	vp->timer.data = (unsigned long)rtdev;
+	// *** RTnet  vp->timer.function = vortex_timer;		/* timer handler */
+	// *** RTnet  add_timer(&vp->timer);
+
+	init_timer(&vp->rx_oom_timer);
+	vp->rx_oom_timer.data = (unsigned long)rtdev;
+	// **** RTnet *** vp->rx_oom_timer.function = rx_oom_timer;
+
+	if (vortex_debug > 1)
+		printk(KERN_DEBUG "%s: Initial media type %s.\n",
+			rtdev->name, media_tbl[rtdev->if_port].name);
+
+	vp->full_duplex = vp->force_fd;
+	config = BFINS(config, rtdev->if_port, 20, 4);
+	if (vortex_debug > 6)
+		printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
+	outl(config, ioaddr + Wn3_Config);
+
+	if (rtdev->if_port == XCVR_MII || rtdev->if_port == XCVR_NWAY) {
+		int mii_reg1, mii_reg5;
+		EL3WINDOW(4);
+		/* Read BMSR (reg1) only to clear old status. */
+		mii_reg1 = mdio_read(rtdev, vp->phys[0], 1);
+		mii_reg5 = mdio_read(rtdev, vp->phys[0], 5);
+		if (mii_reg5 == 0xffff	||  mii_reg5 == 0x0000)
+			;					/* No MII device or no link partner report */
+		else if ((mii_reg5 & 0x0100) != 0	/* 100baseTx-FD */
+			|| (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
+			vp->full_duplex = 1;
+		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
+		if (vortex_debug > 1)
+			printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+				" info1 %04x, setting %s-duplex.\n",
+				rtdev->name, vp->phys[0],
+				mii_reg1, mii_reg5,
+				vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
+		EL3WINDOW(3);
+	}
+
+	/* Set the full-duplex bit. */
+	outw(	((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+		(rtdev->mtu > 1500 ? 0x40 : 0) |
+		((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
+		ioaddr + Wn3_MAC_Ctrl);
+
+	if (vortex_debug > 1) {
+		printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
+			rtdev->name, config);
+	}
+
+	issue_and_wait(rtdev, TxReset);
+	/*
+	 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
+	 */
+	issue_and_wait(rtdev, RxReset|0x04);
+
+	outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+	if (vortex_debug > 1) {
+		EL3WINDOW(4);
+		printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
+			rtdev->name, rtdev->irq, inw(ioaddr + Wn4_Media));
+	}
+
+	/* Set the station address and mask in window 2 each time opened. */
+	EL3WINDOW(2);
+	for (i = 0; i < 6; i++)
+		outb(rtdev->dev_addr[i], ioaddr + i);
+	for (; i < 12; i+=2)
+		outw(0, ioaddr + i);
+
+	if (vp->cb_fn_base) {
+		unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+		if (vp->drv_flags & INVERT_LED_PWR)
+			n |= 0x10;
+		if (vp->drv_flags & INVERT_MII_PWR)
+			n |= 0x4000;
+		outw(n, ioaddr + Wn2_ResetOptions);
+	}
+
+	if (rtdev->if_port == XCVR_10base2)
+		/* Start the thinnet transceiver. We should really wait 50ms...*/
+		outw(StartCoax, ioaddr + EL3_CMD);
+	if (rtdev->if_port != XCVR_NWAY) {
+		EL3WINDOW(4);
+		outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+			media_tbl[rtdev->if_port].media_bits, ioaddr + Wn4_Media);
+	}
+
+	/* Switch to the stats window, and clear all stats by reading. */
+	outw(StatsDisable, ioaddr + EL3_CMD);
+	EL3WINDOW(6);
+	for (i = 0; i < 10; i++)
+		inb(ioaddr + i);
+	inw(ioaddr + 10);
+	inw(ioaddr + 12);
+	/* New: On the Vortex we must also clear the BadSSD counter. */
+	EL3WINDOW(4);
+	inb(ioaddr + 12);
+	/* ..and on the Boomerang we enable the extra statistics bits. */
+	outw(0x0040, ioaddr + Wn4_NetDiag);
+
+	/* Switch to register set 7 for normal use. */
+	EL3WINDOW(7);
+
+	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+		vp->cur_rx = vp->dirty_rx = 0;
+		/* Initialize the RxEarly register as recommended. */
+		outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+		outl(0x0020, ioaddr + PktStatus);
+		outl(vp->rx_ring_dma, ioaddr + UpListPtr);
+	}
+	if (vp->full_bus_master_tx) {		/* Boomerang bus master Tx. */
+		vp->cur_tx = vp->dirty_tx = 0;
+		if (vp->drv_flags & IS_BOOMERANG)
+			outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+		/* Clear the Rx, Tx rings. */
+		for (i = 0; i < RX_RING_SIZE; i++)	/* AKPM: this is done in vortex_open, too */
+			vp->rx_ring[i].status = 0;
+		for (i = 0; i < TX_RING_SIZE; i++)
+			vp->tx_skbuff[i] = 0;
+		outl(0, ioaddr + DownListPtr);
+	}
+	/* Set receiver mode: presumably accept b-case and phys addr only. */
+	set_rx_mode(rtdev);
+	outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+//	issue_and_wait(dev, SetTxStart|0x07ff);
+	outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+	outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+	/* Allow status bits to be seen. */
+	vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+		(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+		(vp->full_bus_master_rx ? UpComplete : RxComplete) |
+		(vp->bus_master ? DMADone : 0);
+	vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
+		(vp->full_bus_master_rx ? 0 : RxComplete) |
+		StatsFull | HostError | TxComplete | IntReq
+		| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+	outw(vp->status_enable, ioaddr + EL3_CMD);
+	/* Ack all pending events, and set active indicator mask. */
+	outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+		ioaddr + EL3_CMD);
+	outw(vp->intr_enable, ioaddr + EL3_CMD);
+	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
+		writel(0x8000, vp->cb_fn_base + 4);
+	rtnetif_start_queue (rtdev);
+}
+
+static int
+vortex_open(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int i;
+	int retval;
+
+	// *** RTnet ***
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	if ((retval = rtdm_irq_request(&vp->irq_handle, rtdev->irq,
+						(vp->full_bus_master_rx ? boomerang_interrupt : vortex_interrupt),
+						0, "rt_3c59x", rtdev))) {
+		printk(KERN_ERR "%s: Could not reserve IRQ %d\n", rtdev->name, rtdev->irq);
+		goto out;
+	}
+	// *** RTnet ***
+
+	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+		if (vortex_debug > 2)
+			printk(KERN_DEBUG "%s:	Filling in the Rx ring.\n", rtdev->name);
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			struct rtskb *skb; // *** RTnet
+			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
+			vp->rx_ring[i].status = 0;	/* Clear complete bit. */
+			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
+			skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+			vp->rx_skbuff[i] = skb;
+			if (skb == NULL)
+				break;			/* Bad news!  */
+			// *** RTnet ***
+			rtskb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+			vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(vp->pdev,
+													skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+			// *** RTnet ***
+		}
+		if (i != RX_RING_SIZE) {
+			int j;
+			printk(KERN_EMERG "%s: no memory for rx ring\n", rtdev->name);
+			for (j = 0; j < i; j++) {
+				if (vp->rx_skbuff[j]) {
+					dev_kfree_rtskb(vp->rx_skbuff[j]);
+					vp->rx_skbuff[j] = 0;
+				}
+			}
+			retval = -ENOMEM;
+			goto out_free_irq;
+		}
+		/* Wrap the ring. */
+		vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
+	}
+
+	vortex_up(rtdev);
+	return 0;
+
+  out_free_irq:
+
+	// *** RTnet ***
+	if ( (i=rtdm_irq_free(&vp->irq_handle))<0 )
+		return i;
+	rt_stack_disconnect(rtdev);
+	// *** RTnet ***
+  out:
+	if (vortex_debug > 1)
+		printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", rtdev->name, retval);
+	return retval;
+}
+
+/*
+ * Handle uncommon interrupt sources.  This is a separate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct rtnet_device *rtdev, int status, nanosecs_abs_t *time_stamp)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int do_tx_reset = 0, reset_mask = 0;
+	unsigned char tx_status = 0;
+	int packets=0;
+
+	if (vortex_debug > 2) {
+		rtdm_printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", rtdev->name, status);
+	}
+
+	if (status & TxComplete) {			/* Really "TxError" for us. */
+		tx_status = inb(ioaddr + TxStatus);
+		/* Presumably a tx-timeout. We must merely re-enable. */
+		if (vortex_debug > 2
+			|| (tx_status != 0x88 && vortex_debug > 0)) {
+			rtdm_printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
+				rtdev->name, tx_status);
+			if (tx_status == 0x82) {
+				rtdm_printk(KERN_ERR "Probably a duplex mismatch.  See "
+					"Documentation/networking/vortex.txt\n");
+			}
+			dump_tx_ring(rtdev);
+		}
+		if (tx_status & 0x14)  vp->stats.tx_fifo_errors++;
+		if (tx_status & 0x38)  vp->stats.tx_aborted_errors++;
+		outb(0, ioaddr + TxStatus);
+		if (tx_status & 0x30) {			/* txJabber or txUnderrun */
+			do_tx_reset = 1;
+		} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) {	/* maxCollisions */
+			do_tx_reset = 1;
+			reset_mask = 0x0108;		/* Reset interface logic, but not download logic */
+		} else {						/* Merely re-enable the transmitter. */
+			outw(TxEnable, ioaddr + EL3_CMD);
+		}
+	}
+
+	if (status & RxEarly) {				/* Rx early is unused. */
+		vortex_rx(rtdev, &packets, time_stamp);
+		outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+	}
+	if (status & StatsFull) {			/* Empty statistics. */
+		static int DoneDidThat;
+		if (vortex_debug > 4)
+			rtdm_printk(KERN_DEBUG "%s: Updating stats.\n", rtdev->name);
+		// *** RTnet *** update_stats(ioaddr, dev);
+		/* HACK: Disable statistics as an interrupt source. */
+		/* This occurs when we have the wrong media type! */
+		if (DoneDidThat == 0  &&
+			inw(ioaddr + EL3_STATUS) & StatsFull) {
+			rtdm_printk(KERN_WARNING "%s: Updating statistics failed, disabling "
+				"stats as an interrupt source.\n", rtdev->name);
+			EL3WINDOW(5);
+			outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+			vp->intr_enable &= ~StatsFull;
+			EL3WINDOW(7);
+			DoneDidThat++;
+		}
+	}
+	if (status & IntReq) {		/* Restore all interrupt sources.  */
+		outw(vp->status_enable, ioaddr + EL3_CMD);
+		outw(vp->intr_enable, ioaddr + EL3_CMD);
+	}
+	if (status & HostError) {
+		u16 fifo_diag;
+		EL3WINDOW(4);
+		fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+		rtdm_printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
+			rtdev->name, fifo_diag);
+		/* Adapter failure requires Tx/Rx reset and reinit. */
+		if (vp->full_bus_master_tx) {
+			int bus_status = inl(ioaddr + PktStatus);
+			/* 0x80000000 PCI master abort. */
+			/* 0x40000000 PCI target abort. */
+			if (vortex_debug)
+				rtdm_printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", rtdev->name, bus_status);
+
+			/* In this case, blow the card away */
+			vortex_down(rtdev);
+			issue_and_wait(rtdev, TotalReset | 0xff);
+			vortex_up(rtdev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
+		} else if (fifo_diag & 0x0400)
+			do_tx_reset = 1;
+		if (fifo_diag & 0x3000) {
+			/* Reset Rx fifo and upload logic */
+			issue_and_wait(rtdev, RxReset|0x07);
+			/* Set the Rx filter to the current state. */
+			set_rx_mode(rtdev);
+			outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+			outw(AckIntr | HostError, ioaddr + EL3_CMD);
+		}
+	}
+
+	if (do_tx_reset) {
+		issue_and_wait(rtdev, TxReset|reset_mask);
+		outw(TxEnable, ioaddr + EL3_CMD);
+		if (!vp->full_bus_master_tx)
+			rtnetif_wake_queue(rtdev);
+	}
+}
+
+static int
+vortex_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	rtdm_lockctx_t context;
+
+	/* Put out the doubleword header... */
+	outl(skb->len, ioaddr + TX_FIFO);
+	if (vp->bus_master) {
+		/* Set the bus-master controller to transfer the packet. */
+		int len = (skb->len + 3) & ~3;
+		outl(	vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data,
+							len, PCI_DMA_TODEVICE),
+			ioaddr + Wn7_MasterAddr);
+		outw(len, ioaddr + Wn7_MasterLen);
+		vp->tx_skb = skb;
+
+		rtdm_lock_irqsave(context);
+		if (unlikely(skb->xmit_stamp != NULL))
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+						*skb->xmit_stamp);
+		outw(StartDMADown, ioaddr + EL3_CMD);
+		rtdm_lock_irqrestore(context);
+
+		/* rtnetif_wake_queue() will be called at the DMADone interrupt. */
+	} else {
+		rtdm_printk("rt_3x59x: UNSUPPORTED CODE PATH (device is lacking DMA support)!\n");
+		/* ... and the packet rounded to a doubleword. */
+		outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+		dev_kfree_rtskb (skb);
+		if (inw(ioaddr + TxFree) > 1536) {
+			rtnetif_start_queue (rtdev);	/* AKPM: redundant? */
+		} else {
+			/* Interrupt us when the FIFO has room for max-sized packet. */
+			rtnetif_stop_queue(rtdev);
+			outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+		}
+	}
+
+	//rtdev->trans_start = jiffies;
+
+	/* Clear the Tx status stack. */
+	{
+		int tx_status;
+		int i = 32;
+
+		while (--i > 0	&&	(tx_status = inb(ioaddr + TxStatus)) > 0) {
+			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
+				if (vortex_debug > 2)
+					printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+						rtdev->name, tx_status);
+				if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+				if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+				if (tx_status & 0x30) {
+					issue_and_wait(rtdev, TxReset);
+				}
+				outw(TxEnable, ioaddr + EL3_CMD);
+			}
+			outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+		}
+	}
+	return 0;
+}
+
+static int
+boomerang_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	/* Calculate the next Tx descriptor entry. */
+	int entry = vp->cur_tx % TX_RING_SIZE;
+	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+	rtdm_lockctx_t context;
+
+	if (vortex_debug > 6) {
+		rtdm_printk(KERN_DEBUG "boomerang_start_xmit()\n");
+		if (vortex_debug > 3)
+			rtdm_printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
+				rtdev->name, vp->cur_tx);
+	}
+
+	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
+		if (vortex_debug > 0)
+			rtdm_printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
+				rtdev->name);
+		rtnetif_stop_queue(rtdev);
+		return 1;
+	}
+
+	vp->tx_skbuff[entry] = skb;
+
+	vp->tx_ring[entry].next = 0;
+#if DO_ZEROCOPY
+	if (skb->ip_summed != CHECKSUM_HW)
+		vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+	else
+		vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum);
+
+	if (!skb_shinfo(skb)->nr_frags) {
+		{
+//            int j;
+//            for (j=0; j<skb->len; j++)
+//            {
+//                rtdm_printk("%02x ", skb->data[j]);
+//            }
+
+		}
+		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev,
+														skb->data, skb->len, PCI_DMA_TODEVICE));
+		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
+	} else {
+		int i;
+
+		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev,
+														skb->data, skb->len, PCI_DMA_TODEVICE));
+		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len);
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+			vp->tx_ring[entry].frag[i+1].addr =
+				cpu_to_le32(pci_map_single(vp->pdev, // *** RTnet: page mapping correct? Or is this code never used?
+								(void*)page_address(frag->page) + frag->page_offset,
+								frag->size, PCI_DMA_TODEVICE));
+
+			if (i == skb_shinfo(skb)->nr_frags-1)
+				vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+			else
+				vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+		}
+	}
+#else
+	vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev,
+											skb->data, skb->len, PCI_DMA_TODEVICE));
+	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+#endif
+
+	// *** RTnet ***
+	rtdm_irq_disable(&vp->irq_handle);
+	rtdm_lock_get(&vp->lock);
+	// *** RTnet ***
+
+	/* Wait for the stall to complete. */
+	issue_and_wait(rtdev, DownStall);
+
+	rtdm_lock_irqsave(context);
+	if (unlikely(skb->xmit_stamp != NULL))
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
+	if (inl(ioaddr + DownListPtr) == 0) {
+		outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
+		vp->queued_packet++;
+	}
+
+	vp->cur_tx++;
+	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
+		rtnetif_stop_queue (rtdev);
+	} else {					/* Clear previous interrupt enable. */
+#if defined(tx_interrupt_mitigation)
+		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
+		 * were selected, this would corrupt DN_COMPLETE. No?
+		 */
+		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+#endif
+	}
+	outw(DownUnstall, ioaddr + EL3_CMD);
+	rtdm_lock_put_irqrestore(&vp->lock, context);
+	rtdm_irq_enable(&vp->irq_handle);
+	//rtdev->trans_start = jiffies;
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+
+/*
+ * This is the ISR for the vortex series chips.
+ * full_bus_master_tx == 0 && full_bus_master_rx == 0
+ */
+
+static int vortex_interrupt(rtdm_irq_t *irq_handle)
+{
+	// *** RTnet ***
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	// *** RTnet ***
+
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr;
+	int status;
+	int work_done = max_interrupt_work;
+
+	ioaddr = rtdev->base_addr;
+	rtdm_lock_get(&vp->lock);
+
+	status = inw(ioaddr + EL3_STATUS);
+
+	if (vortex_debug > 6)
+		printk("vortex_interrupt(). status=0x%4x\n", status);
+
+	if ((status & IntLatch) == 0)
+		goto handler_exit;		/* No interrupt: shared IRQs cause this */
+
+	if (status & IntReq) {
+		status |= vp->deferred;
+		vp->deferred = 0;
+	}
+
+	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
+		goto handler_exit;
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+			rtdev->name, status, inb(ioaddr + Timer));
+
+	do {
+		if (vortex_debug > 5)
+			rtdm_printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+				rtdev->name, status);
+		if (status & RxComplete)
+			vortex_rx(rtdev, &packets, &time_stamp);
+
+		if (status & TxAvailable) {
+			if (vortex_debug > 5)
+				rtdm_printk(KERN_DEBUG "	TX room bit was handled.\n");
+			/* There's room in the FIFO for a full-sized packet. */
+			outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+			rtnetif_wake_queue (rtdev);
+		}
+
+		if (status & DMADone) {
+			if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
+				outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+				pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+				dev_kfree_rtskb(vp->tx_skb); /* Release the transferred buffer */
+				if (inw(ioaddr + TxFree) > 1536) {
+					/*
+					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
+					 * insufficient FIFO room, the TxAvailable test will succeed and call
+					 * rtnetif_wake_queue()
+					 */
+					rtnetif_wake_queue(rtdev);
+				} else { /* Interrupt when FIFO has room for max-sized packet. */
+					outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+					rtnetif_stop_queue(rtdev);
+				}
+			}
+		}
+		/* Check for all uncommon interrupts at once. */
+		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
+			if (status == 0xffff)
+				break;
+			vortex_error(rtdev, status, &time_stamp);
+		}
+
+		if (--work_done < 0) {
+			rtdm_printk(KERN_WARNING "%s: Too much work in interrupt, status "
+				"%4.4x.\n", rtdev->name, status);
+			/* Disable all pending interrupts. */
+			do {
+				vp->deferred |= status;
+				outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+					ioaddr + EL3_CMD);
+				outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+			/* The timer will reenable interrupts. */
+			mod_timer(&vp->timer, jiffies + 1*HZ);
+			break;
+		}
+		/* Acknowledge the IRQ. */
+		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+	} while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+			rtdev->name, status);
+  handler_exit:
+	rtdm_lock_put(&vp->lock);
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/*
+ * This is the ISR for the boomerang series chips.
+ * full_bus_master_tx == 1 && full_bus_master_rx == 1
+ */
+
+static int boomerang_interrupt(rtdm_irq_t *irq_handle)
+{
+	// *** RTnet ***
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	// *** RTnet ***
+
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr;
+	int status;
+	int work_done = max_interrupt_work;
+
+	ioaddr = rtdev->base_addr;
+
+	/*
+	 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
+	 * and boomerang_start_xmit
+	 */
+	rtdm_lock_get(&vp->lock);
+
+	status = inw(ioaddr + EL3_STATUS);
+
+	if (vortex_debug > 6)
+		rtdm_printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
+
+	if ((status & IntLatch) == 0)
+		goto handler_exit;		/* No interrupt: shared IRQs can cause this */
+
+	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
+		if (vortex_debug > 1)
+			rtdm_printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
+		goto handler_exit;
+	}
+
+	if (status & IntReq) {
+		status |= vp->deferred;
+		vp->deferred = 0;
+	}
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+			rtdev->name, status, inb(ioaddr + Timer));
+	do {
+		if (vortex_debug > 5)
+			rtdm_printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+				rtdev->name, status);
+		if (status & UpComplete) {
+			outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+			if (vortex_debug > 5)
+				rtdm_printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
+			boomerang_rx(rtdev, &packets, &time_stamp);
+		}
+
+		if (status & DownComplete) {
+			unsigned int dirty_tx = vp->dirty_tx;
+
+			outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+			while (vp->cur_tx - dirty_tx > 0) {
+				int entry = dirty_tx % TX_RING_SIZE;
+				if (inl(ioaddr + DownListPtr) ==
+					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
+					break;			/* It still hasn't been processed. */
+
+				if (vp->tx_skbuff[entry]) {
+					struct rtskb *skb = vp->tx_skbuff[entry];
+#if DO_ZEROCOPY
+					int i;
+					for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
+						pci_unmap_single(vp->pdev,
+								le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+								le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+								PCI_DMA_TODEVICE);
+#else
+					pci_unmap_single(vp->pdev,
+							le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+					dev_kfree_rtskb(skb);
+					vp->tx_skbuff[entry] = 0;
+				} else {
+					rtdm_printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
+				}
+				/* vp->stats.tx_packets++;  Counted below. */
+				dirty_tx++;
+			}
+			vp->dirty_tx = dirty_tx;
+			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
+				if (vortex_debug > 6)
+					rtdm_printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
+				rtnetif_wake_queue (rtdev);
+			}
+		}
+
+		/* Check for all uncommon interrupts at once. */
+		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
+			vortex_error(rtdev, status, &time_stamp);
+
+		if (--work_done < 0) {
+			rtdm_printk(KERN_WARNING "%s: Too much work in interrupt, status "
+				"%4.4x.\n", rtdev->name, status);
+			/* Disable all pending interrupts. */
+			do {
+				vp->deferred |= status;
+				outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+					ioaddr + EL3_CMD);
+				outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+			/* The timer will reenable interrupts. */
+			mod_timer(&vp->timer, jiffies + 1*HZ);
+			break;
+		}
+		/* Acknowledge the IRQ. */
+		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
+			writel(0x8000, vp->cb_fn_base + 4);
+
+	} while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+			rtdev->name, status);
+  handler_exit:
+	rtdm_lock_put(&vp->lock);
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int vortex_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int i;
+	short rx_status;
+
+	if (vortex_debug > 5)
+		printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
+			inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+	while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+		if (rx_status & 0x4000) { /* Error, update stats. */
+			unsigned char rx_error = inb(ioaddr + RxErrors);
+			if (vortex_debug > 2)
+				printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+			vp->stats.rx_errors++;
+			if (rx_error & 0x01)  vp->stats.rx_over_errors++;
+			if (rx_error & 0x02)  vp->stats.rx_length_errors++;
+			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;
+			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;
+			if (rx_error & 0x10)  vp->stats.rx_length_errors++;
+		} else {
+			/* The packet length: up to 4.5K!. */
+			int pkt_len = rx_status & 0x1fff;
+			struct rtskb *skb;
+
+			skb = rtnetdev_alloc_rtskb(rtdev, pkt_len + 5);
+			if (vortex_debug > 4)
+				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+					pkt_len, rx_status);
+			if (skb != NULL) {
+				rtskb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+				/* 'skb_put()' points to the start of sk_buff data area. */
+				if (vp->bus_master &&
+					! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+					dma_addr_t dma = pci_map_single(vp->pdev,
+									rtskb_put(skb, pkt_len),
+									pkt_len, PCI_DMA_FROMDEVICE);
+					outl(dma, ioaddr + Wn7_MasterAddr);
+					outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+					outw(StartDMAUp, ioaddr + EL3_CMD);
+					while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
+						;
+					pci_unmap_single(vp->pdev, dma, pkt_len, PCI_DMA_FROMDEVICE);
+				} else {
+					insl(ioaddr + RX_FIFO, rtskb_put(skb, pkt_len),
+						(pkt_len + 3) >> 2);
+				}
+				outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+				skb->protocol = rt_eth_type_trans(skb, rtdev);
+				skb->time_stamp = *time_stamp;
+				rtnetif_rx(skb);
+				//rtdev->last_rx = jiffies;
+				vp->stats.rx_packets++;
+				(*packets)++;
+
+				/* Wait a limited time to go to next packet. */
+				for (i = 200; i >= 0; i--)
+					if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+						break;
+				continue;
+			} else if (vortex_debug > 0)
+				printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
+					"size %d.\n", rtdev->name, pkt_len);
+		}
+		vp->stats.rx_dropped++;
+		issue_and_wait(rtdev, RxDiscard);
+	}
+
+	return 0;
+}
+
+static int
+boomerang_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int entry = vp->cur_rx % RX_RING_SIZE;
+	long ioaddr = rtdev->base_addr;
+	int rx_status;
+	int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+
+	if (vortex_debug > 5)
+		rtdm_printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS));
+
+	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+		if (--rx_work_limit < 0)
+			break;
+		if (rx_status & RxDError) { /* Error, update stats. */
+			unsigned char rx_error = rx_status >> 16;
+			if (vortex_debug > 2)
+				rtdm_printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+			vp->stats.rx_errors++;
+			if (rx_error & 0x01)  vp->stats.rx_over_errors++;
+			if (rx_error & 0x02)  vp->stats.rx_length_errors++;
+			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;
+			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;
+			if (rx_error & 0x10)  vp->stats.rx_length_errors++;
+		} else {
+			/* The packet length: up to 4.5K!. */
+			int pkt_len = rx_status & 0x1fff;
+			struct rtskb *skb;
+			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
+
+			if (vortex_debug > 4)
+				rtdm_printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+					pkt_len, rx_status);
+
+			/* Check if the packet is long enough to just accept without
+			   copying to a properly sized skbuff. */
+			{
+/*** RTnet ***/
+				/* Pass up the skbuff already on the Rx ring. */
+				skb = vp->rx_skbuff[entry];
+				vp->rx_skbuff[entry] = NULL;
+				rtskb_put(skb, pkt_len);
+				pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+				vp->rx_nocopy++;
+			}
+			skb->protocol = rt_eth_type_trans(skb, rtdev);
+			skb->time_stamp = *time_stamp;
+			{					/* Use hardware checksum info. */
+				int csum_bits = rx_status & 0xee000000;
+				if (csum_bits &&
+					(csum_bits == (IPChksumValid | TCPChksumValid) ||
+						csum_bits == (IPChksumValid | UDPChksumValid))) {
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+					vp->rx_csumhits++;
+				}
+			}
+			rtnetif_rx(skb);
+			//rtdev->last_rx = jiffies;
+			vp->stats.rx_packets++;
+			(*packets)++;
+		}
+		entry = (++vp->cur_rx) % RX_RING_SIZE;
+	}
+	/* Refill the Rx ring buffers. */
+	for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+		struct rtskb *skb;
+		entry = vp->dirty_rx % RX_RING_SIZE;
+		if (vp->rx_skbuff[entry] == NULL) {
+			skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+			if (skb == NULL) {
+				static unsigned long last_jif;
+				if ((jiffies - last_jif) > 10 * HZ) {
+					rtdm_printk(KERN_WARNING "%s: memory shortage\n", rtdev->name);
+					last_jif = jiffies;
+				}
+				if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
+					{
+						// *** RTnet *** mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
+						;
+					}
+				break;			/* Bad news!  */
+			}
+			rtskb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev,
+													skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+			vp->rx_skbuff[entry] = skb;
+		}
+		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
+		outw(UpUnstall, ioaddr + EL3_CMD);
+	}
+	return 0;
+}
+
+/*
+ * If we've hit a total OOM refilling the Rx ring we poll once a second
+ * for some memory.  Otherwise there is no way to restart the rx process.
+ */
+static void
+vortex_down(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+
+	rtnetif_stop_queue (rtdev);
+
+	del_timer_sync(&vp->rx_oom_timer);
+	del_timer_sync(&vp->timer);
+
+	/* Turn off statistics ASAP.  We update vp->stats below. */
+	outw(StatsDisable, ioaddr + EL3_CMD);
+
+	/* Disable the receiver and transmitter. */
+	outw(RxDisable, ioaddr + EL3_CMD);
+	outw(TxDisable, ioaddr + EL3_CMD);
+
+	if (rtdev->if_port == XCVR_10base2)
+		/* Turn off thinnet power.  Green! */
+		outw(StopCoax, ioaddr + EL3_CMD);
+
+	outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+	// *** RTnet ***  update_stats(ioaddr, dev);
+	if (vp->full_bus_master_rx)
+		outl(0, ioaddr + UpListPtr);
+	if (vp->full_bus_master_tx)
+		outl(0, ioaddr + DownListPtr);
+
+	if (vp->pdev && vp->enable_wol) {
+		pci_save_state(vp->pdev, vp->power_state);
+		acpi_set_WOL(rtdev);
+	}
+}
+
+static int
+vortex_close(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int i;
+
+	// rtnet_device is always present after vortex_open was called.
+	//if (netif_device_present(dev))
+	//	vortex_down(dev);
+	vortex_down(rtdev);
+
+	if (vortex_debug > 1) {
+		printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+			rtdev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+		printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+			" tx_queued %d Rx pre-checksummed %d.\n",
+			rtdev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
+	}
+
+#if DO_ZEROCOPY
+	if (	vp->rx_csumhits &&
+		((vp->drv_flags & HAS_HWCKSM) == 0) &&
+		(hw_checksums[vp->card_idx] == -1)) {
+		printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", rtdev->name);
+		printk(KERN_WARNING "Please see http://www.uow.edu.au/~andrewm/zerocopy.html\n");
+	}
+#endif
+
+	// *** RTnet ***
+	if ( (i=rtdm_irq_free(&vp->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(rtdev);
+
+	// *** RTnet ***
+
+	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+		for (i = 0; i < RX_RING_SIZE; i++)
+			if (vp->rx_skbuff[i]) {
+				pci_unmap_single(	vp->pdev, le32_to_cpu(vp->rx_ring[i].addr),
+						PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+				dev_kfree_rtskb(vp->rx_skbuff[i]);
+				vp->rx_skbuff[i] = 0;
+			}
+	}
+	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+		for (i = 0; i < TX_RING_SIZE; i++) {
+			if (vp->tx_skbuff[i]) {
+				struct rtskb *skb = vp->tx_skbuff[i];
+#if DO_ZEROCOPY
+				int k;
+
+				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+					pci_unmap_single(vp->pdev,
+							le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+							le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+							PCI_DMA_TODEVICE);
+#else
+				pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+				dev_kfree_rtskb(skb);
+				vp->tx_skbuff[i] = 0;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void
+dump_tx_ring(struct rtnet_device *rtdev)
+{
+	if (vortex_debug > 0) {
+		struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+		long ioaddr = rtdev->base_addr;
+
+		if (vp->full_bus_master_tx) {
+			int i;
+			int stalled = inl(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
+
+			rtdm_printk(KERN_ERR "	Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
+				vp->full_bus_master_tx,
+				vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
+				vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
+			rtdm_printk(KERN_ERR "	Transmit list %8.8x vs. %p.\n",
+				inl(ioaddr + DownListPtr),
+				&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
+			issue_and_wait(rtdev, DownStall);
+			for (i = 0; i < TX_RING_SIZE; i++) {
+				rtdm_printk(KERN_ERR "	%d: @%p  length %8.8x status %8.8x\n", i,
+					&vp->tx_ring[i],
+#if DO_ZEROCOPY
+					le32_to_cpu(vp->tx_ring[i].frag[0].length),
+#else
+					le32_to_cpu(vp->tx_ring[i].length),
+#endif
+					le32_to_cpu(vp->tx_ring[i].status));
+			}
+			if (!stalled)
+				outw(DownUnstall, ioaddr + EL3_CMD);
+		}
+	}
+}
+
+static struct net_device_stats *vortex_get_stats(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	rtdm_lockctx_t flags;
+
+	if (rtnetif_device_present(rtdev)) {	/* AKPM: Used to be netif_running */
+		rtdm_lock_get_irqsave (&vp->lock, flags);
+		update_stats(rtdev->base_addr, rtdev);
+		rtdm_lock_put_irqrestore (&vp->lock, flags);
+	}
+	return &vp->stats;
+}
+
+/*  Update statistics.
+    Unlike with the EL3 we need not worry about interrupts changing
+    the window setting from underneath us, but we must still guard
+    against a race condition with a StatsUpdate interrupt updating the
+    table.  This is done by checking that the ASM (!) code generated uses
+    atomic updates with '+='.
+*/
+static void update_stats(long ioaddr, struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int old_window = inw(ioaddr + EL3_CMD);
+
+	if (old_window == 0xffff)	/* Chip suspended or ejected. */
+		return;
+	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+	/* Switch to the stats window, and read everything. */
+	EL3WINDOW(6);
+	vp->stats.tx_carrier_errors		+= inb(ioaddr + 0);
+	vp->stats.tx_heartbeat_errors	+= inb(ioaddr + 1);
+	/* Multiple collisions. */		inb(ioaddr + 2);
+	vp->stats.collisions			+= inb(ioaddr + 3);
+	vp->stats.tx_window_errors		+= inb(ioaddr + 4);
+	vp->stats.rx_fifo_errors		+= inb(ioaddr + 5);
+	vp->stats.tx_packets			+= inb(ioaddr + 6);
+	vp->stats.tx_packets			+= (inb(ioaddr + 9)&0x30) << 4;
+	/* Rx packets	*/				inb(ioaddr + 7);   /* Must read to clear */
+	/* Tx deferrals */				inb(ioaddr + 8);
+	/* Don't bother with register 9, an extension of registers 6&7.
+	   If we do use the 6&7 values the atomic update assumption above
+	   is invalid. */
+	vp->stats.rx_bytes += inw(ioaddr + 10);
+	vp->stats.tx_bytes += inw(ioaddr + 12);
+	/* New: On the Vortex we must also clear the BadSSD counter. */
+	EL3WINDOW(4);
+	inb(ioaddr + 12);
+
+	{
+		u8 up = inb(ioaddr + 13);
+		vp->stats.rx_bytes += (up & 0x0f) << 16;
+		vp->stats.tx_bytes += (up & 0xf0) << 12;
+	}
+
+	EL3WINDOW(old_window >> 13);
+	return;
+}
+
+/* Pre-Cyclone chips have no documented multicast filter, so the only
+   multicast setting is to receive all multicast frames.  At least
+   the chip has a very clean way to set the mode, unlike many others. */
+static void set_rx_mode(struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	int new_mode;
+
+	if (rtdev->flags & IFF_PROMISC) {
+		if (vortex_debug > 0)
+			printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", rtdev->name);
+		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+	} else	if (rtdev->flags & IFF_ALLMULTI) {
+		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+	} else
+		new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+	outw(new_mode, ioaddr + EL3_CMD);
+}
+
+/* MII transceiver control section.
+   Read and write the MII registers using software-generated serial
+   MDIO protocol.  See the MII specifications or DP83840A data sheet
+   for details. */
+
+/* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues. */
+#define mdio_delay() inl(mdio_addr)
+
+#define MDIO_SHIFT_CLK	0x01
+#define MDIO_DIR_WRITE	0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ	0x02
+#define MDIO_ENB_IN		0x00
+
+/* Generate the preamble required for initial synchronization and
+   a few older transceivers. */
+static void mdio_sync(long ioaddr, int bits)
+{
+	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+	/* Establish sync by sending at least 32 logic ones. */
+	while (-- bits >= 0) {
+		outw(MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+}
+
+static int mdio_read(struct rtnet_device *rtdev, int phy_id, int location)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int i;
+	long ioaddr = rtdev->base_addr;
+	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	unsigned int retval = 0;
+	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+	spin_lock_bh(&vp->mdio_lock);
+
+	if (mii_preamble_required)
+		mdio_sync(ioaddr, 32);
+
+	/* Shift the read command bits out. */
+	for (i = 14; i >= 0; i--) {
+		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+		outw(dataval, mdio_addr);
+		mdio_delay();
+		outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		outw(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+		outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	spin_unlock_bh(&vp->mdio_lock);
+	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
+}
+
+static void mdio_write(struct rtnet_device *rtdev, int phy_id, int location, int value)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+	int i;
+
+	spin_lock_bh(&vp->mdio_lock);
+
+	if (mii_preamble_required)
+		mdio_sync(ioaddr, 32);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+		outw(dataval, mdio_addr);
+		mdio_delay();
+		outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Leave the interface idle. */
+	for (i = 1; i >= 0; i--) {
+		outw(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	spin_unlock_bh(&vp->mdio_lock);
+	return;
+}
+
+/* ACPI: Advanced Configuration and Power Interface. */
+/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+static void acpi_set_WOL(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+
+	/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
+	EL3WINDOW(7);
+	outw(2, ioaddr + 0x0c);
+	/* The RxFilter must accept the WOL frames. */
+	outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+	outw(RxEnable, ioaddr + EL3_CMD);
+
+	/* Change the power state to D3; RxEnable doesn't take effect. */
+	pci_enable_wake(vp->pdev, 0, 1);
+	pci_set_power_state(vp->pdev, 3);
+}
+
+
+static void vortex_remove_one (struct pci_dev *pdev)
+{
+	struct vortex_private *vp;
+	// *** RTnet ***
+	struct rtnet_device *rtdev = pci_get_drvdata (pdev);
+
+
+
+	if (!rtdev) {
+		printk("vortex_remove_one called for EISA device!\n");
+		BUG();
+	}
+
+	vp = rtdev->priv;
+
+	/* AKPM: FIXME: we should have
+	 *	if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
+	 * here
+	 */
+	rt_unregister_rtnetdev(rtdev);
+	/* Should really use issue_and_wait() here */
+	outw(TotalReset|0x14, rtdev->base_addr + EL3_CMD);
+
+	if (vp->pdev && vp->enable_wol) {
+		pci_set_power_state(vp->pdev, 0);	/* Go active */
+		if (vp->pm_state_valid)
+			pci_restore_state(vp->pdev, vp->power_state);
+	}
+
+	pci_free_consistent(pdev,
+			sizeof(struct boom_rx_desc) * RX_RING_SIZE
+			+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+			vp->rx_ring,
+			vp->rx_ring_dma);
+	if (vp->must_free_region)
+		release_region(rtdev->base_addr, vp->io_size);
+	// *** RTnet ***
+	rtdev_free(rtdev);
+	// *** RTnet ***
+}
+
+
+static struct pci_driver vortex_driver = {
+  name:		"3c59x_rt",
+  probe:		vortex_init_one,
+  remove:		vortex_remove_one,
+  id_table:	vortex_pci_tbl,
+#ifdef CONFIG_PM
+  suspend:	NULL,
+  resume:		NULL,
+#endif
+};
+
+
+static int vortex_have_pci;
+
+
+static int __init vortex_init (void)
+{
+	int pci_rc;
+
+	pci_rc = pci_register_driver(&vortex_driver);
+
+	if (pci_rc == 0)
+		vortex_have_pci = 1;
+
+	return (vortex_have_pci) ? 0 : -ENODEV;
+}
+
+
+static void __exit vortex_cleanup (void)
+{
+	if (vortex_have_pci)
+		pci_unregister_driver (&vortex_driver);
+}
+
+module_init(vortex_init);
+module_exit(vortex_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/experimental/Makefile	2022-03-21 12:58:29.886884115 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_RT2500) += rt2500/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000_NEW) += e1000/
+
+obj-$(CONFIG_RTNET_DRV_3C59X) += rt_3c59x.o
+
+rt_3c59x-y := 3c59x.o
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c	2022-03-21 12:58:29.880884173 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_phy.h"
+
+static s32  e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static void e1000_release_phy(struct e1000_hw *hw);
+static s32  e1000_acquire_phy(struct e1000_hw *hw);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] =
+	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_m88_cable_length_table) / \
+                 sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_2_cable_length_table) / \
+                 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  e1000_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+	u32 manc;
+
+	DEBUGFUNC("e1000_check_reset_block");
+
+	manc = E1000_READ_REG(hw, E1000_MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	       E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 phy_id;
+
+	DEBUGFUNC("e1000_get_phy_id");
+
+	ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id = (u32)(phy_id << 16);
+	usec_delay(20);
+	ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+	phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_reset_dsp_generic - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control regsiter in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+	        (phy->addr << E1000_MDIC_PHY_SHIFT) |
+	        (E1000_MDIC_OP_READ));
+
+	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		usec_delay(50);
+		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		DEBUGOUT("MDI Read did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		DEBUGOUT("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	*data = (u16) mdic;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+	        (offset << E1000_MDIC_REG_SHIFT) |
+	        (phy->addr << E1000_MDIC_PHY_SHIFT) |
+	        (E1000_MDIC_OP_WRITE));
+
+	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		usec_delay(50);
+		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		DEBUGOUT("MDI Write did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		DEBUGOUT("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_m88 - Read m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_phy_reg_m88");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+	                                  MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_m88 - Write m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_write_phy_reg_m88");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+	                                   MAX_PHY_REG_ADDRESS & offset,
+	                                   data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_phy_reg_igp");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000_write_phy_reg_mdic(hw,
+		                                   IGP01E1000_PHY_PAGE_SELECT,
+		                                   (u16)offset);
+		if (ret_val) {
+			e1000_release_phy(hw);
+			goto out;
+		}
+	}
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+	                                  MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_write_phy_reg_igp");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000_write_phy_reg_mdic(hw,
+		                                   IGP01E1000_PHY_PAGE_SELECT,
+		                                   (u16)offset);
+		if (ret_val) {
+			e1000_release_phy(hw);
+			goto out;
+		}
+	}
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+	                                   MAX_PHY_REG_ADDRESS & offset,
+	                                   data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_kmrn_reg_generic - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_kmrn_reg_generic");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+	E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+	usec_delay(2);
+
+	kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+	*data = (u16)kmrnctrlsta;
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_kmrn_reg_generic - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary.  Then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release any acquired semaphores
+ *  before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | data;
+	E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+	usec_delay(2);
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	DEBUGFUNC("e1000_copper_link_setup_m88");
+
+	if (phy->reset_disable) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/*
+	 * Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+		case 1:
+			phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+			break;
+		case 2:
+			phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+			break;
+		case 3:
+			phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+			break;
+		case 0:
+		default:
+			phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+			break;
+	}
+
+	/*
+	 * Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	if (phy->revision < E1000_REVISION_4) {
+		/*
+		 * Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = e1000_read_phy_reg(hw,
+		                             M88E1000_EXT_PHY_SPEC_CTRL,
+		                             &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == E1000_REVISION_2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+			             M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+			             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = e1000_write_phy_reg(hw,
+		                             M88E1000_EXT_PHY_SPEC_CTRL,
+		                             phy_data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Commit the changes. */
+	ret_val = e1000_phy_commit(hw);
+	if (ret_val) {
+		DEBUGOUT("Error committing the PHY changes\n");
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_copper_link_setup_igp");
+
+	if (phy->reset_disable) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_phy_hw_reset(hw);
+	if (ret_val) {
+		DEBUGOUT("Error resetting the PHY.\n");
+		goto out;
+	}
+
+	/* Wait 15ms for MAC to configure PHY from NVM settings. */
+	msec_delay(15);
+
+	/*
+	 * The NVM settings will configure LPLU in D3 for
+	 * non-IGP1 PHYs.
+	 */
+	if (phy->type == e1000_phy_igp) {
+		/* disable lplu d3 during driver init */
+		ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+		if (ret_val) {
+			DEBUGOUT("Error Disabling LPLU D3\n");
+			goto out;
+		}
+	}
+
+	/* disable lplu d0 during driver init */
+	ret_val = e1000_set_d0_lplu_state(hw, FALSE);
+	if (ret_val) {
+		DEBUGOUT("Error Disabling LPLU D0\n");
+		goto out;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		goto out;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/*
+		 * when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default.
+		 */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				goto out;
+		}
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	DEBUGFUNC("e1000_copper_link_autoneg");
+
+	/*
+	 * Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/*
+	 * If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+	ret_val = e1000_phy_setup_autoneg(hw);
+	if (ret_val) {
+		DEBUGOUT("Error Setting up Auto-Negotiation\n");
+		goto out;
+	}
+	DEBUGOUT("Restarting Auto-Neg\n");
+
+	/*
+	 * Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->autoneg_wait_to_complete) {
+		ret_val = e1000_wait_autoneg(hw);
+		if (ret_val) {
+			DEBUGOUT("Error while waiting for "
+			         "autoneg to complete\n");
+			goto out;
+		}
+	}
+
+	hw->mac.get_link_status = TRUE;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+	DEBUGFUNC("e1000_phy_setup_autoneg");
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = e1000_read_phy_reg(hw,
+		                            PHY_1000T_CTRL,
+		                            &mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/*
+	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+	                         NWAY_AR_100TX_HD_CAPS |
+	                         NWAY_AR_10T_FD_CAPS   |
+	                         NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+	DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		DEBUGOUT("Advertise 10mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		DEBUGOUT("Advertise 10mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		DEBUGOUT("Advertise 100mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		DEBUGOUT("Advertise 100mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF) {
+		DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+	}
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		DEBUGOUT("Advertise 1000mb Full duplex\n");
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          but we do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *          in the EEPROM is used.
+	 */
+	switch (hw->fc.type) {
+	case e1000_fc_none:
+		/*
+		 * Flow control (Rx & Tx) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled, and Tx Flow control is
+		 * disabled, by a software over-ride.
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of Rx Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric Rx PAUSE.  Later
+		 * (in e1000_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		ret_val = e1000_write_phy_reg(hw,
+		                              PHY_1000T_CTRL,
+		                              mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_generic - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_setup_copper_link_generic");
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		DEBUGOUT("Forcing Speed and Duplex\n");
+		ret_val = e1000_phy_force_speed_duplex(hw);
+		if (ret_val) {
+			DEBUGOUT("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw,
+	                                     COPPER_LINK_UP_LIMIT,
+	                                     10,
+	                                     &link);
+	if (ret_val)
+		goto out;
+
+	if (link) {
+		DEBUGOUT("Valid link established!!!\n");
+		e1000_config_collision_dist_generic(hw);
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+	} else {
+		DEBUGOUT("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+	usec_delay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			DEBUGOUT("Link taking longer than expected.\n");
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	usec_delay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			/*
+			 * We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000_write_phy_reg(hw,
+			                              M88E1000_PHY_PAGE_SELECT,
+			                              0x001d);
+			if (ret_val)
+				goto out;
+			ret_val = e1000_phy_reset_dsp_generic(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+	/* Turn off flow control when forcing speed/duplex */
+	hw->fc.type = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+		DEBUGOUT("Half Duplex\n");
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+		DEBUGOUT("Full Duplex\n");
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+		DEBUGOUT("Forcing 100mb\n");
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+		DEBUGOUT("Forcing 10mb\n");
+	}
+
+	e1000_config_collision_dist_generic(hw);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+	ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP02E1000_PHY_POWER_MGMT,
+		                             data);
+		if (ret_val)
+			goto out;
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP01E1000_PHY_PORT_CONFIG,
+		                              data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_downshift_generic - Checks whether a downshift in speed occured
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	DEBUGFUNC("e1000_check_downshift_generic");
+
+	switch (phy->type) {
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = FALSE;
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_check_polarity_m88");
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+	DEBUGFUNC("e1000_check_polarity_igp");
+
+	/*
+	 * Polarity is determined based on the speed of
+	 * our connection.
+	 */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/*
+		 * This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg_generic - Wait for auto-neg compeletion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, phy_status;
+
+	DEBUGFUNC("e1000_wait_autoneg_generic");
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msec_delay(100);
+	}
+
+	/*
+	 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                               u32 usec_interval, bool *success)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, phy_status;
+
+	DEBUGFUNC("e1000_phy_has_link_generic");
+
+	for (i = 0; i < iterations; i++) {
+		/*
+		 * Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			msec_delay_irq(usec_interval/1000);
+		else
+			usec_delay(usec_interval);
+	}
+
+	*success = (i < iterations) ? TRUE : FALSE;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	DEBUGFUNC("e1000_get_cable_length_m88");
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+	        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index+1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which reperesent the
+ *  cobination of course and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+	                                                 {IGP02E1000_PHY_AGC_A,
+	                                                  IGP02E1000_PHY_AGC_B,
+	                                                  IGP02E1000_PHY_AGC_C,
+	                                                  IGP02E1000_PHY_AGC_D};
+
+	DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * Getting bits 15:9, which represent the combination of
+		 * course and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length.
+		 */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+		                IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+	              e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+	                         (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_get_phy_info_m88");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		DEBUGOUT("Phy info is only valid for copper media\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		DEBUGOUT("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+	                           ? TRUE
+	                           : FALSE;
+
+	ret_val = e1000_check_polarity_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	DEBUGFUNC("e1000_get_phy_info_igp");
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		DEBUGOUT("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = TRUE;
+
+	ret_val = e1000_check_polarity_igp(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_sw_reset_generic - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	usec_delay(1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and relase the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+	ret_val = e1000_check_reset_block(hw);
+	if (ret_val) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+	E1000_WRITE_FLUSH(hw);
+
+	usec_delay(phy->reset_delay_us);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+
+	usec_delay(150);
+
+	e1000_release_phy(hw);
+
+	ret_val = e1000_get_phy_cfg_done(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_generic - Generic configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Generic function to wait 10 milli-seconds for configuration to complete
+ *  and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_get_cfg_done_generic");
+
+	msec_delay_irq(10);
+
+	return E1000_SUCCESS;
+}
+
+/* Internal function pointers */
+
+/**
+ *  e1000_get_phy_cfg_done - Generic PHY configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family did not implement a family specific
+ *  get_cfg_done function.
+ **/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+	if (hw->func.get_cfg_done)
+		return hw->func.get_cfg_done(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_phy - Generic release PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return if silicon family does not require a semaphore when accessing the
+ *  PHY.
+ **/
+static void e1000_release_phy(struct e1000_hw *hw)
+{
+	if (hw->func.release_phy)
+		hw->func.release_phy(hw);
+}
+
+/**
+ *  e1000_acquire_phy - Generic acquire PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family does not require a semaphore when
+ *  accessing the PHY.
+ **/
+static s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+	if (hw->func.acquire_phy)
+		return hw->func.acquire_phy(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  When the silicon family has not implemented a forced speed/duplex
+ *  function for the PHY, simply return E1000_SUCCESS.
+ **/
+s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+	if (hw->func.force_speed_duplex)
+		return hw->func.force_speed_duplex(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	DEBUGOUT("Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	e1000_write_phy_reg(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	e1000_write_phy_reg(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	e1000_write_phy_reg(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	e1000_write_phy_reg(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to Tx amplitude in Giga mode */
+	e1000_write_phy_reg(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	e1000_write_phy_reg(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	e1000_write_phy_reg(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	e1000_write_phy_reg(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	e1000_write_phy_reg(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	e1000_write_phy_reg(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	e1000_write_phy_reg(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	e1000_write_phy_reg(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	e1000_write_phy_reg(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	e1000_write_phy_reg(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	e1000_write_phy_reg(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	e1000_write_phy_reg(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	e1000_write_phy_reg(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	e1000_write_phy_reg(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	e1000_write_phy_reg(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	e1000_write_phy_reg(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	e1000_write_phy_reg(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	e1000_write_phy_reg(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	e1000_write_phy_reg(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	e1000_write_phy_reg(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	e1000_write_phy_reg(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	e1000_write_phy_reg(hw, 0x1798, 0xD008);
+	/*
+	 * Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	e1000_write_phy_reg(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	e1000_write_phy_reg(hw, 0x187A, 0x0800);
+	/*
+	 * Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	e1000_write_phy_reg(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	e1000_write_phy_reg(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	e1000_write_phy_reg(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	e1000_write_phy_reg(hw, 0x0000, 0x1340);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_type_from_id - Get PHY type from id
+ *  @phy_id: phy_id read from the phy
+ *
+ *  Returns the phy type from the id.
+ **/
+e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+	e1000_phy_type phy_type = e1000_phy_unknown;
+
+	switch (phy_id)	{
+	case M88E1000_I_PHY_ID:
+	case M88E1000_E_PHY_ID:
+	case M88E1111_I_PHY_ID:
+	case M88E1011_I_PHY_ID:
+		phy_type = e1000_phy_m88;
+		break;
+	case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+		phy_type = e1000_phy_igp_2;
+		break;
+	case GG82563_E_PHY_ID:
+		phy_type = e1000_phy_gg82563;
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy_type = e1000_phy_igp_3;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy_type = e1000_phy_ife;
+		break;
+	default:
+		phy_type = e1000_phy_unknown;
+		break;
+	}
+	return phy_type;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg);
+	msec_delay(1);
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c	2022-03-21 12:58:29.874884232 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_ich8lan
+ * e1000_ich9lan
+ */
+
+#include "e1000_api.h"
+#include "e1000_ich8lan.h"
+
+static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
+static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static s32  e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
+static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
+static s32  e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
+static s32  e1000_get_phy_info_ich8lan(struct e1000_hw *hw);
+static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
+                                            bool active);
+static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
+                                            bool active);
+static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+                                   u16 words, u16 *data);
+static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+                                    u16 words, u16 *data);
+static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
+static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
+static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
+                                            u16 *data);
+static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
+static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
+static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
+static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
+                                           u16 *speed, u16 *duplex);
+static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
+static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
+static s32  e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u8 size, u16* data);
+static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
+                                          u32 offset, u16 *data);
+static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+                                                 u32 offset, u8 byte);
+static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
+                                           u32 offset, u8 data);
+static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                           u8 size, u16 data);
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+	struct ich8_hsfsts {
+		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
+		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
+		u16 dael       :1; /* bit 2 Direct Access error Log */
+		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
+		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
+		u16 reserved1  :2; /* bit 13:6 Reserved */
+		u16 reserved2  :6; /* bit 13:6 Reserved */
+		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
+	} hsf_status;
+	u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+	struct ich8_hsflctl {
+		u16 flcgo      :1;   /* 0 Flash Cycle Go */
+		u16 flcycle    :2;   /* 2:1 Flash Cycle */
+		u16 reserved   :5;   /* 7:3 Reserved  */
+		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
+		u16 flockdn    :6;   /* 15:10 Reserved */
+	} hsf_ctrl;
+	u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+	struct ich8_flracc {
+		u32 grra      :8; /* 0:7 GbE region Read Access */
+		u32 grwa      :8; /* 8:15 GbE region Write Access */
+		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
+		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
+	} hsf_flregacc;
+	u16 regval;
+};
+
+struct e1000_shadow_ram {
+	u16  value;
+	bool modified;
+};
+
+struct e1000_dev_spec_ich8lan {
+	bool kmrn_lock_loss_workaround_enabled;
+	struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
+};
+
+/**
+ *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i = 0;
+
+	DEBUGFUNC("e1000_init_phy_params_ich8lan");
+
+	phy->addr                       = 1;
+	phy->reset_delay_us             = 100;
+
+	func->acquire_phy               = e1000_acquire_swflag_ich8lan;
+	func->check_polarity            = e1000_check_polarity_ife_ich8lan;
+	func->check_reset_block         = e1000_check_reset_block_ich8lan;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_ich8lan;
+	func->get_cable_length          = e1000_get_cable_length_igp_2;
+	func->get_cfg_done              = e1000_get_cfg_done_ich8lan;
+	func->get_phy_info              = e1000_get_phy_info_ich8lan;
+	func->read_phy_reg              = e1000_read_phy_reg_igp;
+	func->release_phy               = e1000_release_swflag_ich8lan;
+	func->reset_phy                 = e1000_phy_hw_reset_ich8lan;
+	func->set_d0_lplu_state         = e1000_set_d0_lplu_state_ich8lan;
+	func->set_d3_lplu_state         = e1000_set_d3_lplu_state_ich8lan;
+	func->write_phy_reg             = e1000_write_phy_reg_igp;
+	func->power_up_phy              = e1000_power_up_phy_copper;
+	func->power_down_phy            = e1000_power_down_phy_copper_ich8lan;
+
+
+	phy->id = 0;
+	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
+	       (i++ < 100)) {
+		msec_delay(1);
+		ret_val = e1000_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Verify phy id */
+	switch (phy->id) {
+	case IGP03E1000_E_PHY_ID:
+		phy->type = e1000_phy_igp_3;
+		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy->type = e1000_phy_ife;
+		phy->autoneg_mask = E1000_ALL_NOT_GIG;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific NVM parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 gfpreg, sector_base_addr, sector_end_addr;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
+
+	/* Can't read flash registers if the register set isn't mapped. */
+	if (!hw->flash_address) {
+		DEBUGOUT("ERROR: Flash registers not mapped\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	nvm->type               = e1000_nvm_flash_sw;
+
+	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+	/*
+	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
+	 * Add 1 to sector_end_addr since this sector is included in
+	 * the overall size.
+	 */
+	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+	/* flash_base_addr is byte-aligned */
+	nvm->flash_base_addr    = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+
+	/*
+	 * find total size of the NVM, then cut in half since the total
+	 * size represents two separate NVM banks.
+	 */
+	nvm->flash_bank_size    = (sector_end_addr - sector_base_addr)
+	                          << FLASH_SECTOR_ADDR_SHIFT;
+	nvm->flash_bank_size    /= 2;
+	/* Adjust to word count */
+	nvm->flash_bank_size    /= sizeof(u16);
+
+	nvm->word_size          = E1000_SHADOW_RAM_WORDS;
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Clear shadow ram */
+	for (i = 0; i < nvm->word_size; i++) {
+		dev_spec->shadow_ram[i].modified = FALSE;
+		dev_spec->shadow_ram[i].value    = 0xFFFF;
+	}
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_swflag_ich8lan;
+	func->read_nvm          = e1000_read_nvm_ich8lan;
+	func->release_nvm       = e1000_release_swflag_ich8lan;
+	func->update_nvm        = e1000_update_nvm_checksum_ich8lan;
+	func->valid_led_default = e1000_valid_led_default_ich8lan;
+	func->validate_nvm      = e1000_validate_nvm_checksum_ich8lan;
+	func->write_nvm         = e1000_write_nvm_ich8lan;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific MAC parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_ich8lan");
+
+	/* Set media type function pointer */
+	hw->phy.media_type = e1000_media_type_copper;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 32;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+	if (mac->type == e1000_ich8lan)
+		mac->rar_entry_count--;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid = TRUE;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_ich8lan;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_ich8lan;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_ich8lan;
+	/* link setup */
+	func->setup_link = e1000_setup_link_ich8lan;
+	/* physical interface setup */
+	func->setup_physical_interface = e1000_setup_copper_link_ich8lan;
+	/* check for link */
+	func->check_for_link = e1000_check_for_copper_link_generic;
+	/* check management mode */
+	func->check_mng_mode = e1000_check_mng_mode_ich8lan;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_ich8lan;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_ich8lan;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_ich8lan;
+	func->led_off = e1000_led_off_ich8lan;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_ich8lan);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+	if (ret_val)
+		goto out;
+
+	/* Enable PCS Lock-loss workaround for ICH8 */
+	if (mac->type == e1000_ich8lan)
+		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
+
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific function pointers for PHY, MAC, and NVM.
+ **/
+void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
+
+	hw->func.init_mac_params = e1000_init_mac_params_ich8lan;
+	hw->func.init_nvm_params = e1000_init_nvm_params_ich8lan;
+	hw->func.init_phy_params = e1000_init_phy_params_ich8lan;
+}
+
+/**
+ *  e1000_acquire_swflag_ich8lan - Acquire software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the software control flag for performing NVM and PHY
+ *  operations.  This is a function pointer entry point only called by
+ *  read/write routines for the PHY and NVM parts.
+ **/
+static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
+
+	while (timeout) {
+		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+			break;
+		msec_delay_irq(1);
+		timeout--;
+	}
+
+	if (!timeout) {
+		DEBUGOUT("FW or HW has locked the resource for too long.\n");
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swflag_ich8lan - Release software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the software control flag for performing NVM and PHY operations.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	DEBUGFUNC("e1000_release_swflag_ich8lan");
+
+	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+	return;
+}
+
+/**
+ *  e1000_check_mng_mode_ich8lan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
+
+	fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+	return ((fwsm & E1000_FWSM_MODE_MASK) ==
+	        (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if firmware is blocking the reset of the PHY.
+ *  This is a function pointer entry point only called by
+ *  reset routines.
+ **/
+static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	DEBUGFUNC("e1000_check_reset_block_ich8lan");
+
+	fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
+	                                        : E1000_BLK_PHY_RESET;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_ich8lan");
+
+	if (phy->type != e1000_phy_ife) {
+		ret_val = e1000_phy_force_speed_duplex_igp(hw);
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &data);
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	/* Disable MDI-X support for 10/100 */
+	ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IFE_PMC_AUTO_MDIX;
+	data &= ~IFE_PMC_FORCE_MDIX;
+
+	ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("IFE PMC: %X\n", data);
+
+	usec_delay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			DEBUGOUT("Link taking longer than expected.\n");
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY
+ *  This is a function pointer entry point called by drivers
+ *  or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+	s32 ret_val;
+	u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+	u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
+
+	ret_val = e1000_phy_hw_reset_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Initialize the PHY from the NVM on ICH platforms.  This
+	 * is needed due to an issue where the NVM configuration is
+	 * not properly autoloaded after power transitions.
+	 * Therefore, after each PHY reset, we will load the
+	 * configuration data out of the NVM manually.
+	 */
+	if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
+		/* Check if SW needs configure the PHY */
+		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_M))
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+		else
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+
+		data = E1000_READ_REG(hw, E1000_FEXTNVM);
+		if (!(data & sw_cfg_mask))
+			goto out;
+
+		/* Wait for basic configuration completes before proceeding*/
+		do {
+			data = E1000_READ_REG(hw, E1000_STATUS);
+			data &= E1000_STATUS_LAN_INIT_DONE;
+			usec_delay(100);
+		} while ((!data) && --loop);
+
+		/*
+		 * If basic configuration is incomplete before the above loop
+		 * count reaches 0, loading the configuration from NVM will
+		 * leave the PHY in a bad state possibly resulting in no link.
+		 */
+		if (loop == 0) {
+			DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
+		}
+
+		/* Clear the Init Done bit for the next init event */
+		data = E1000_READ_REG(hw, E1000_STATUS);
+		data &= ~E1000_STATUS_LAN_INIT_DONE;
+		E1000_WRITE_REG(hw, E1000_STATUS, data);
+
+		/*
+		 * Make sure HW does not configure LCD from PHY
+		 * extended configuration before SW configuration
+		 */
+		data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+			goto out;
+
+		cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
+		cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+		cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+		if (!cnf_size)
+			goto out;
+
+		cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+		cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+		/*
+		 * Configure LCD from extended configuration
+		 * region.
+		 */
+
+		/* cnf_base_addr is in DWORD */
+		word_addr = (u16)(cnf_base_addr << 1);
+
+		for (i = 0; i < cnf_size; i++) {
+			ret_val = e1000_read_nvm(hw,
+			                        (word_addr + i * 2),
+			                        1,
+			                        &reg_data);
+			if (ret_val)
+				goto out;
+
+			ret_val = e1000_read_nvm(hw,
+			                        (word_addr + i * 2 + 1),
+			                        1,
+			                        &reg_addr);
+			if (ret_val)
+				goto out;
+
+			/* Save off the PHY page for future writes. */
+			if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+				phy_page = reg_data;
+				continue;
+			}
+
+			reg_addr |= phy_page;
+
+			ret_val = e1000_write_phy_reg(hw,
+			                             (u32)reg_addr,
+			                             reg_data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info
+ *  @hw: pointer to the HW structure
+ *
+ *  Wrapper for calling the get_phy_info routines for the appropriate phy type.
+ *  This is a function pointer entry point called by drivers
+ *  or other shared routines.
+ **/
+static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_PHY_TYPE;
+
+	DEBUGFUNC("e1000_get_phy_info_ich8lan");
+
+	switch (hw->phy.type) {
+	case e1000_phy_ife:
+		ret_val = e1000_get_phy_info_ife_ich8lan(hw);
+		break;
+	case e1000_phy_igp_3:
+		ret_val = e1000_get_phy_info_igp(hw);
+		break;
+	default:
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ *  This function is only called by other family-specific
+ *  routines.
+ **/
+static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	DEBUGFUNC("e1000_get_phy_info_ife_ich8lan");
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		DEBUGOUT("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+	if (ret_val)
+		goto out;
+	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+	                           ? FALSE : TRUE;
+
+	if (phy->polarity_correction) {
+		ret_val = e1000_check_polarity_ife_ich8lan(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* Polarity is forced */
+		phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
+
+	/* The following parameters are undefined for 10/100 operation. */
+	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+	phy->local_rx = e1000_1000t_rx_status_undefined;
+	phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reveral feature being enabled.
+ *  This function is only called by other family-specific
+ *  routines.
+ **/
+static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	DEBUGFUNC("e1000_check_polarity_ife_ich8lan");
+
+	/*
+	 * Polarity is determined based on the reversal feature
+	 * being enabled.
+	 */
+	if (phy->polarity_correction) {
+		offset	= IFE_PHY_EXTENDED_STATUS_CONTROL;
+		mask	= IFE_PESC_POLARITY_REVERSED;
+	} else {
+		offset	= IFE_PHY_SPECIAL_CONTROL;
+		mask	= IFE_PSC_FORCE_POLARITY;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->cable_polarity = (phy_data & mask)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
+                                           bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
+
+	if (phy->type == e1000_phy_ife)
+		goto out;
+
+	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+	if (active) {
+		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                            IGP01E1000_PHY_PORT_CONFIG,
+		                            &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             data);
+		if (ret_val)
+			goto out;
+	} else {
+		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D3 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
+                                           bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
+
+	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+	if (!active) {
+		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                            IGP01E1000_PHY_PORT_CONFIG,
+		                            &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ *  @hw: pointer to the HW structure
+ *  @bank:  pointer to the variable that returns the active bank
+ *
+ *  Reads signature byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+	s32 ret_val = E1000_SUCCESS;
+	if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_SEC1VAL)
+		*bank = 1;
+	else
+		*bank = 0;
+	
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to read.
+ *  @words: Size of data to read in words
+ *  @data: Pointer to the word(s) to read at offset.
+ *
+ *  Reads a word(s) from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+                                  u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 act_offset;
+	s32 ret_val = E1000_SUCCESS;
+	u32 bank = 0;
+	u16 i, word;
+
+	DEBUGFUNC("e1000_read_nvm_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val != E1000_SUCCESS)
+		goto out;
+
+	act_offset = (bank) ? nvm->flash_bank_size : 0;
+	act_offset += offset;
+
+	for (i = 0; i < words; i++) {
+		if ((dev_spec->shadow_ram) &&
+		    (dev_spec->shadow_ram[offset+i].modified)) {
+			data[i] = dev_spec->shadow_ram[offset+i].value;
+		} else {
+			ret_val = e1000_read_flash_word_ich8lan(hw,
+			                                        act_offset + i,
+			                                        &word);
+			if (ret_val)
+				break;
+			data[i] = word;
+		}
+	}
+
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_init_ich8lan - Initialize flash
+ *  @hw: pointer to the HW structure
+ *
+ *  This function does initial flash setup so that a new read/write/erase cycle
+ *  can be started.
+ **/
+static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+	s32 i = 0;
+
+	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
+
+	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+	/* Check if the flash descriptor is valid */
+	if (hsfsts.hsf_status.fldesvalid == 0) {
+		DEBUGOUT("Flash descriptor invalid.  "
+		         "SW Sequencing must be used.");
+		goto out;
+	}
+
+	/* Clear FCERR and DAEL in hw status by writing 1 */
+	hsfsts.hsf_status.flcerr = 1;
+	hsfsts.hsf_status.dael = 1;
+
+	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+	/*
+	 * Either we should have a hardware SPI cycle in progress
+	 * bit to check against, in order to start a new cycle or
+	 * FDONE bit should be changed in the hardware so that it
+	 * is 1 after harware reset, which can then be used as an
+	 * indication whether a cycle is in progress or has been
+	 * completed.
+	 */
+
+	if (hsfsts.hsf_status.flcinprog == 0) {
+		/*
+		 * There is no cycle running at present,
+		 * so we can start a cycle.
+		 * Begin by setting Flash Cycle Done.
+		 */
+		hsfsts.hsf_status.flcdone = 1;
+		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+		ret_val = E1000_SUCCESS;
+	} else {
+		/*
+		 * Otherwise poll for sometime so the current
+		 * cycle has a chance to end before giving up.
+		 */
+		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcinprog == 0) {
+				ret_val = E1000_SUCCESS;
+				break;
+			}
+			usec_delay(1);
+		}
+		if (ret_val == E1000_SUCCESS) {
+			/*
+			 * Successful in waiting for previous cycle to timeout,
+			 * now set the Flash Cycle Done.
+			 */
+			hsfsts.hsf_status.flcdone = 1;
+			E1000_WRITE_FLASH_REG16(hw,
+			                        ICH_FLASH_HSFSTS,
+			                        hsfsts.regval);
+		} else {
+			DEBUGOUT("Flash controller busy, cannot get access");
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ *  @hw: pointer to the HW structure
+ *  @timeout: maximum time to wait for completion
+ *
+ *  This function starts a flash cycle and waits for its completion.
+ **/
+static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+	union ich8_hws_flash_ctrl hsflctl;
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i = 0;
+
+	DEBUGFUNC("e1000_flash_cycle_ich8lan");
+
+	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+	hsflctl.hsf_ctrl.flcgo = 1;
+	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+	/* wait till FDONE bit is set to 1 */
+	do {
+		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+		if (hsfsts.hsf_status.flcdone == 1)
+			break;
+		usec_delay(1);
+	} while (i++ < timeout);
+
+	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+		ret_val = E1000_SUCCESS;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_flash_word_ich8lan - Read word from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: offset to data location
+ *  @data: pointer to the location for storing the data
+ *
+ *  Reads the flash word at offset into data.  Offset is converted
+ *  to bytes before read.
+ **/
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+                                         u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_flash_word_ich8lan");
+
+	if (!data) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Must convert offset into bytes. */
+	offset <<= 1;
+
+	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte or word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: Pointer to the word to store the value read.
+ *
+ *  Reads a byte or word from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                         u8 size, u16* data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+	u8 count = 0;
+
+	DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		goto out;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+	                    hw->nvm.flash_base_addr;
+
+	do {
+		usec_delay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val != E1000_SUCCESS)
+			break;
+
+		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size - 1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+		                                ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+		/*
+		 * Check if FCERR is set to 1, if set to 1, clear it
+		 * and try the whole sequence a few more times, else
+		 * read in (shift in) the Flash Data0, the order is
+		 * least significant byte first msb to lsb
+		 */
+		if (ret_val == E1000_SUCCESS) {
+			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+			if (size == 1) {
+				*data = (u8)(flash_data & 0x000000FF);
+			} else if (size == 2) {
+				*data = (u16)(flash_data & 0x0000FFFF);
+			}
+			break;
+		} else {
+			/*
+			 * If we've gotten here, then things are probably
+			 * completely hosed, but if the error condition is
+			 * detected, it won't hurt to give it another try...
+			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+			 */
+			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1) {
+				/* Repeat for some time before giving up. */
+				continue;
+			} else if (hsfsts.hsf_status.flcdone == 0) {
+				DEBUGOUT("Timeout error - flash cycle "
+				         "did not complete.");
+				break;
+			}
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to write.
+ *  @words: Size of data to write in words
+ *  @data: Pointer to the word(s) to write at offset.
+ *
+ *  Writes a byte or word to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+                                   u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("e1000_write_nvm_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	for (i = 0; i < words; i++) {
+		dev_spec->shadow_ram[offset+i].modified = TRUE;
+		dev_spec->shadow_ram[offset+i].value = data[i];
+	}
+
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  The NVM checksum is updated by calling the generic update_nvm_checksum,
+ *  which writes the checksum to the shadow ram.  The changes in the shadow
+ *  ram are then committed to the EEPROM by processing each bank at a time
+ *  checking for the modified bit and writing only the pending changes.
+ *  After a succesful commit, the shadow ram is cleared and is ready for
+ *  future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	ret_val = e1000_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		goto out;
+
+	if (nvm->type != e1000_nvm_flash_sw)
+		goto out;
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * We're writing to the opposite bank so if we're on bank 1,
+	 * write to bank 0 etc.  We also need to erase the segment that
+	 * is going to be written
+	 */
+	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val != E1000_SUCCESS)
+		goto out;
+
+	if (bank == 0) {
+		new_bank_offset = nvm->flash_bank_size;
+		old_bank_offset = 0;
+		e1000_erase_flash_bank_ich8lan(hw, 1);
+	} else {
+		old_bank_offset = nvm->flash_bank_size;
+		new_bank_offset = 0;
+		e1000_erase_flash_bank_ich8lan(hw, 0);
+	}
+
+	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+		/*
+		 * Determine whether to write the value stored
+		 * in the other NVM bank or a modified value stored
+		 * in the shadow RAM
+		 */
+		if (dev_spec->shadow_ram[i].modified) {
+			data = dev_spec->shadow_ram[i].value;
+		} else {
+			e1000_read_flash_word_ich8lan(hw,
+			                              i + old_bank_offset,
+			                              &data);
+		}
+
+		/*
+		 * If the word is 0x13, then make sure the signature bits
+		 * (15:14) are 11b until the commit has completed.
+		 * This will allow us to write 10b which indicates the
+		 * signature is valid.  We want to do this after the write
+		 * has completed so that we don't mark the segment valid
+		 * while the write is still in progress
+		 */
+		if (i == E1000_ICH_NVM_SIG_WORD)
+			data |= E1000_ICH_NVM_SIG_MASK;
+
+		/* Convert offset to bytes. */
+		act_offset = (i + new_bank_offset) << 1;
+
+		usec_delay(100);
+		/* Write the bytes to the new bank. */
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+		                                               act_offset,
+		                                               (u8)data);
+		if (ret_val)
+			break;
+
+		usec_delay(100);
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+		                                          act_offset + 1,
+		                                          (u8)(data >> 8));
+		if (ret_val)
+			break;
+	}
+
+	/*
+	 * Don't bother writing the segment valid bits if sector
+	 * programming failed.
+	 */
+	if (ret_val) {
+		DEBUGOUT("Flash commit failed.\n");
+		e1000_release_nvm(hw);
+		goto out;
+	}
+
+	/*
+	 * Finally validate the new segment by setting bit 15:14
+	 * to 10b in word 0x13 , this can be done without an
+	 * erase as well since these bits are 11 to start with
+	 * and we need to change bit 14 to 0b
+	 */
+	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+	e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+	data &= 0xBFFF;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+	                                               act_offset * 2 + 1,
+	                                               (u8)(data >> 8));
+	if (ret_val) {
+		e1000_release_nvm(hw);
+		goto out;
+	}
+
+	/*
+	 * And invalidate the previously valid segment by setting
+	 * its signature word (0x13) high_byte to 0b. This can be
+	 * done without an erase because flash erase sets all bits
+	 * to 1's. We can write 1's to 0's without an erase
+	 */
+	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+	if (ret_val) {
+		e1000_release_nvm(hw);
+		goto out;
+	}
+
+	/* Great!  Everything worked, we can now clear the cached entries. */
+	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+		dev_spec->shadow_ram[i].modified = FALSE;
+		dev_spec->shadow_ram[i].value = 0xFFFF;
+	}
+
+	e1000_release_nvm(hw);
+
+	/*
+	 * Reload the EEPROM, or else modifications will not appear
+	 * until after the next adapter reset.
+	 */
+	e1000_reload_nvm(hw);
+	msec_delay(10);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ *  If the bit is 0, that the EEPROM had been modified, but the checksum was
+ *  not calculated, in which case we need to calculate the checksum and set
+ *  bit 6.
+ **/
+static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
+
+	/*
+	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
+	 * needs to be fixed.  This bit is an indication that the NVM
+	 * was prepared by OEM software and did not calculate the
+	 * checksum...a likely scenario.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & 0x40) == 0) {
+		data |= 0x40;
+		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_update_nvm_checksum(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_validate_nvm_checksum_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte/word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: The byte(s) to write to the NVM.
+ *
+ *  Writes one/two bytes to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u8 size, u16 data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+	u8 count = 0;
+
+	DEBUGFUNC("e1000_write_ich8_data");
+
+	if (size < 1 || size > 2 || data > size * 0xff ||
+	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		goto out;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+	                    hw->nvm.flash_base_addr;
+
+	do {
+		usec_delay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val != E1000_SUCCESS)
+			break;
+
+		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size -1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+		if (size == 1)
+			flash_data = (u32)data & 0x00FF;
+		else
+			flash_data = (u32)data;
+
+		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+		/*
+		 * check if FCERR is set to 1 , if set to 1, clear it
+		 * and try the whole sequence a few more times else done
+		 */
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+		                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+		if (ret_val == E1000_SUCCESS) {
+			break;
+		} else {
+			/*
+			 * If we're here, then things are most likely
+			 * completely hosed, but if the error condition
+			 * is detected, it won't hurt to give it another
+			 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+			 */
+			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1) {
+				/* Repeat for some time before giving up. */
+				continue;
+			} else if (hsfsts.hsf_status.flcdone == 0) {
+				DEBUGOUT("Timeout error - flash cycle "
+				         "did not complete.");
+				break;
+			}
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The index of the byte to read.
+ *  @data: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u8 data)
+{
+	u16 word = (u16)data;
+
+	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
+
+	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+ *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to write.
+ *  @byte: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ *  Goes through a retry algorithm before giving up.
+ **/
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+                                                u32 offset, u8 byte)
+{
+	s32 ret_val;
+	u16 program_retries;
+
+	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
+
+	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+	if (ret_val == E1000_SUCCESS)
+		goto out;
+
+	for (program_retries = 0; program_retries < 100; program_retries++) {
+		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
+		usec_delay(100);
+		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+		if (ret_val == E1000_SUCCESS)
+			break;
+	}
+	if (program_retries == 100) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ *  @hw: pointer to the HW structure
+ *  @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ *  bank N is 4096 * N + flash_reg_addr.
+ **/
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	/* bank size is in 16bit words - adjust to bytes */
+	u32 flash_bank_size = nvm->flash_bank_size * 2;
+	s32  ret_val = E1000_SUCCESS;
+	s32  count = 0;
+	s32  j, iteration, sector_size;
+
+	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
+
+	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+	/*
+	 * Determine HW Sector size: Read BERASE bits of hw flash status
+	 * register
+	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
+	 *     consecutive sectors.  The start index for the nth Hw sector
+	 *     can be calculated as = bank * 4096 + n * 256
+	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+	 *     The start index for the nth Hw sector can be calculated
+	 *     as = bank * 4096
+	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+	 *     (ich9 only, otherwise error condition)
+	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+	 */
+	switch (hsfsts.hsf_status.berasesz) {
+	case 0:
+		/* Hw sector size 256 */
+		sector_size = ICH_FLASH_SEG_SIZE_256;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+		break;
+	case 1:
+		sector_size = ICH_FLASH_SEG_SIZE_4K;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
+		break;
+	case 2:
+		if (hw->mac.type == e1000_ich9lan) {
+			sector_size = ICH_FLASH_SEG_SIZE_8K;
+			iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K;
+		} else {
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+		break;
+	case 3:
+		sector_size = ICH_FLASH_SEG_SIZE_64K;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
+		break;
+	default:
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Start with the base address, then add the sector offset. */
+	flash_linear_addr = hw->nvm.flash_base_addr;
+	flash_linear_addr += (bank) ? (sector_size * iteration) : 0;
+
+	for (j = 0; j < iteration ; j++) {
+		do {
+			/* Steps */
+			ret_val = e1000_flash_cycle_init_ich8lan(hw);
+			if (ret_val)
+				goto out;
+
+			/*
+			 * Write a value 11 (block Erase) in Flash
+			 * Cycle field in hw flash control
+			 */
+			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFCTL);
+			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+			E1000_WRITE_FLASH_REG16(hw,
+			                        ICH_FLASH_HSFCTL,
+			                        hsflctl.regval);
+
+			/*
+			 * Write the last 24 bits of an index within the
+			 * block into Flash Linear address field in Flash
+			 * Address.
+			 */
+			flash_linear_addr += (j * sector_size);
+			E1000_WRITE_FLASH_REG(hw,
+			                      ICH_FLASH_FADDR,
+			                      flash_linear_addr);
+
+			ret_val = e1000_flash_cycle_ich8lan(hw,
+			                       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+			if (ret_val == E1000_SUCCESS) {
+				break;
+			} else {
+				/*
+				 * Check if FCERR is set to 1.  If 1,
+				 * clear it and try the whole sequence
+				 * a few more times else Done
+				 */
+				hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+				                              ICH_FLASH_HSFSTS);
+				if (hsfsts.hsf_status.flcerr == 1) {
+					/*
+					 * repeat for some time before
+					 * giving up
+					 */
+					continue;
+				} else if (hsfsts.hsf_status.flcdone == 0)
+					goto out;
+			}
+		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_ich8lan - Set the default LED settings
+ *  @hw: pointer to the HW structure
+ *  @data: Pointer to the LED settings
+ *
+ *  Reads the LED default settings from the NVM to data.  If the NVM LED
+ *  settings is all 0's or F's, set the LED default to a valid LED default
+ *  setting.
+ **/
+static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_valid_led_default_ich8lan");
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 ||
+	    *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT_ICH8LAN;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ *  @hw: pointer to the HW structure
+ *
+ *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ *  register, so the the bus width is hard coded.
+ **/
+static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_get_bus_info_ich8lan");
+
+	ret_val = e1000_get_bus_info_pcie_generic(hw);
+
+	/*
+	 * ICH devices are "PCI Express"-ish.  They have
+	 * a configuration space, but do not contain
+	 * PCI Express Capability registers, so bus width
+	 * must be hardcoded.
+	 */
+	if (bus->width == e1000_bus_width_unknown)
+		bus->width = e1000_bus_width_pcie_x1;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_ich8lan - Reset the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a full reset of the hardware which includes a reset of the PHY and
+ *  MAC.
+ **/
+static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl, icr, kab;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_reset_hw_ich8lan");
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("PCI-E Master disable polling has failed.\n");
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	/*
+	 * Disable the Transmit and Receive units.  Then delay to allow
+	 * any pending transactions to complete before we hit the MAC
+	 * with the global reset.
+	 */
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	/* Workaround for ICH8 bit corruption issue in FIFO memory */
+	if (hw->mac.type == e1000_ich8lan) {
+		/* Set Tx and Rx buffer allocation to 8k apiece. */
+		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
+		/* Set Packet Buffer Size to 16k. */
+		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
+	}
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	if (!e1000_check_reset_block(hw) && !hw->phy.reset_disable) {
+		/*
+		 * PHY HW reset requires MAC CORE reset at the same
+		 * time to make sure the interface between MAC and the
+		 * external PHY is reset.
+		 */
+		ctrl |= E1000_CTRL_PHY_RST;
+	}
+	ret_val = e1000_acquire_swflag_ich8lan(hw);
+	DEBUGOUT("Issuing a global reset to ich8lan");
+	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
+	msec_delay(20);
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val) {
+		/*
+		 * When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		DEBUGOUT("Auto Read Done did not complete\n");
+	}
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	kab = E1000_READ_REG(hw, E1000_KABGTXD);
+	kab |= E1000_KABGTXD_BGSQLBIAS;
+	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_ich8lan - Initialize the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Prepares the hardware for transmit and receive by doing the following:
+ *   - initialize hardware bits
+ *   - initialize LED identification
+ *   - setup receive address registers
+ *   - setup flow control
+ *   - setup transmit discriptors
+ *   - clear statistics
+ **/
+static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl_ext, txdctl, snoop;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_ich8lan");
+
+	e1000_initialize_hw_bits_ich8lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Set the transmit descriptor write-back policy for both queues */
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
+
+	/*
+	 * ICH8 has opposite polarity of no_snoop bits.
+	 * By default, we should use snoop behavior.
+	 */
+	if (mac->type == e1000_ich8lan)
+		snoop = PCIE_ICH8_SNOOP_ALL;
+	else
+		snoop = (u32)~(PCIE_NO_SNOOP_ALL);
+	e1000_set_pcie_no_snoop_generic(hw, snoop);
+
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_ich8lan(hw);
+
+	return ret_val;
+}
+/**
+ *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets/Clears required hardware bits necessary for correctly setting up the
+ *  hardware for transmit and receive.
+ **/
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
+
+	if (hw->mac.disable_hw_init_bits)
+		goto out;
+
+	/* Extended Device Control */
+	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+	/* Transmit Descriptor Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TARC(0));
+	if (hw->mac.type == e1000_ich8lan)
+		reg |= (1 << 28) | (1 << 29);
+	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TARC(1));
+	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	reg |= (1 << 24) | (1 << 26) | (1 << 30);
+	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+	/* Device Status */
+	if (hw->mac.type == e1000_ich8lan) {
+		reg = E1000_READ_REG(hw, E1000_STATUS);
+		reg &= ~(1 << 31);
+		E1000_WRITE_REG(hw, E1000_STATUS, reg);
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_setup_link_ich8lan - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_link_ich8lan");
+
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	/*
+	 * ICH parts do not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	if (hw->fc.type == e1000_fc_default)
+		hw->fc.type = e1000_fc_full;
+
+	hw->fc.original_type = hw->fc.type;
+
+	DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type);
+
+	/* Continue to configure the copper link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the kumeran interface to the PHY to wait the appropriate time
+ *  when polling the PHY, then call the generic setup_copper_link to finish
+ *  configuring the copper link.
+ **/
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 reg_data;
+
+	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each iteration
+	 * and increase the max iterations when polling the phy;
+	 * this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= 0x3F;
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+	if (ret_val)
+		goto out;
+
+	if (hw->phy.type == e1000_phy_igp_3) {
+		ret_val = e1000_copper_link_setup_igp(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (hw->phy.type == e1000_phy_ife) {
+		ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+		if (ret_val)
+			goto out;
+
+		reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+		switch (hw->phy.mdix) {
+		case 1:
+			reg_data &= ~IFE_PMC_FORCE_MDIX;
+			break;
+		case 2:
+			reg_data |= IFE_PMC_FORCE_MDIX;
+			break;
+		case 0:
+		default:
+			reg_data |= IFE_PMC_AUTO_MDIX;
+			break;
+		}
+		ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+		if (ret_val)
+			goto out;
+	}
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to store current link speed
+ *  @duplex: pointer to store the current link duplex
+ *
+ *  Calls the generic get_speed_and_duplex to retreive the current link
+ *  information and then calls the Kumeran lock loss workaround for links at
+ *  gigabit speeds.
+ **/
+static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+                                          u16 *duplex)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
+
+	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+	if (ret_val)
+		goto out;
+
+	if ((hw->mac.type == e1000_ich8lan) &&
+	    (hw->phy.type == e1000_phy_igp_3) &&
+	    (*speed == SPEED_1000)) {
+		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ *  @hw: pointer to the HW structure
+ *
+ *  Work-around for 82566 Kumeran PCS lock loss:
+ *  On link status change (i.e. PCI reset, speed change) and link is up and
+ *  speed is gigabit-
+ *    0) if workaround is optionally disabled do nothing
+ *    1) wait 1ms for Kumeran link to come up
+ *    2) check Kumeran Diagnostic register PCS lock loss bit
+ *    3) if not set the link is locked (all is good), otherwise...
+ *    4) reset the PHY
+ *    5) repeat up to 10 times
+ *  Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 phy_ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, data;
+	bool link;
+
+	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
+		goto out;
+
+	/*
+	 * Make sure link is up before proceeding.  If not just return.
+	 * Attempting this while link is negotiating fouled up link
+	 * stability
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (!link) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	for (i = 0; i < 10; i++) {
+		/* read once to clear */
+		ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			goto out;
+		/* and again to get new status */
+		ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			goto out;
+
+		/* check for PCS lock */
+		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		/* Issue PHY reset */
+		e1000_phy_hw_reset(hw);
+		msec_delay_irq(5);
+	}
+	/* Disable GigE link negotiation */
+	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+	             E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+	/*
+	 * Call gig speed drop workaround on Giga disable before accessing
+	 * any PHY registers
+	 */
+	e1000_gig_downshift_workaround_ich8lan(hw);
+
+	/* unable to acquire PCS lock */
+	ret_val = -E1000_ERR_PHY;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
+ *  @hw: pointer to the HW structure
+ *  @state: boolean value used to set the current Kumaran workaround state
+ *
+ *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
+ *  /disabled - FALSE).
+ **/
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+                                                 bool state)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec;
+
+	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
+
+	if (hw->mac.type != e1000_ich8lan) {
+		DEBUGOUT("Workaround applies to ICH8 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	dev_spec->kmrn_lock_loss_workaround_enabled = state;
+
+out:
+	return;
+}
+
+/**
+ *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ *  @hw: pointer to the HW structure
+ *
+ *  Workaround for 82566 power-down on D3 entry:
+ *    1) disable gigabit link
+ *    2) write VR power-down enable
+ *    3) read it back
+ *  Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+	u16 data;
+	u8  retry = 0;
+
+	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
+
+	if (hw->phy.type != e1000_phy_igp_3)
+		goto out;
+
+	/* Try the workaround twice (if needed) */
+	do {
+		/* Disable link */
+		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+		        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
+
+		/*
+		 * Call gig speed drop workaround on Giga disable before
+		 * accessing any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* Write VR power-down enable */
+		e1000_read_phy_reg(hw, IGP3_VR_CTRL, &data);
+		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		e1000_write_phy_reg(hw,
+		                   IGP3_VR_CTRL,
+		                   data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+		/* Read it back and test */
+		e1000_read_phy_reg(hw, IGP3_VR_CTRL, &data);
+		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+			break;
+
+		/* Issue PHY reset and repeat at most one more time */
+		reg = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
+		retry++;
+	} while (retry);
+
+out:
+	return;
+}
+
+/**
+ *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ *  @hw: pointer to the HW structure
+ *
+ *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ *  LPLU, Giga disable, MDIC PHY reset):
+ *    1) Set Kumeran Near-end loopback
+ *    2) Clear Kumeran Near-end loopback
+ *  Should only be called for ICH8[m] devices with IGP_3 Phy.
+ **/
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 reg_data;
+
+	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
+
+	if ((hw->mac.type != e1000_ich8lan) ||
+	    (hw->phy.type != e1000_phy_igp_3))
+		goto out;
+
+	ret_val = e1000_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+	                              &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+	                               reg_data);
+out:
+	return;
+}
+
+/**
+ *  e1000_cleanup_led_ich8lan - Restore the default LED operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_cleanup_led_ich8lan");
+
+	if (hw->phy.type == e1000_phy_ife)
+		ret_val = e1000_write_phy_reg(hw,
+		                              IFE_PHY_SPECIAL_CONTROL_LED,
+		                              0);
+	else
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_led_on_ich8lan - Turn LED's on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn on the LED's.
+ **/
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_led_on_ich8lan");
+
+	if (hw->phy.type == e1000_phy_ife)
+		ret_val = e1000_write_phy_reg(hw,
+		                IFE_PHY_SPECIAL_CONTROL_LED,
+		                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+	else
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_led_off_ich8lan - Turn LED's off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn off the LED's.
+ **/
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_led_off_ich8lan");
+
+	if (hw->phy.type == e1000_phy_ife)
+		ret_val = e1000_write_phy_reg(hw,
+		               IFE_PHY_SPECIAL_CONTROL_LED,
+		               (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+	else
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_ich8lan - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	e1000_get_cfg_done_generic(hw);
+
+	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
+	if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+	    (hw->phy.type == e1000_phy_igp_3)) {
+		e1000_phy_init_script_igp3(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears hardware counters specific to the silicon family and calls
+ *  clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h	2022-03-21 12:58:29.869884281 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82571_H_
+#define _E1000_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+                              (ID_LED_OFF1_ON2  <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c	2022-03-21 12:58:29.863884339 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82571
+ * e1000_82572
+ * e1000_82573
+ * e1000_82574
+ */
+
+#include "e1000_api.h"
+#include "e1000_82571.h"
+
+static s32  e1000_init_phy_params_82571(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82571(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82571(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_82571(struct e1000_hw *hw);
+static void e1000_release_nvm_82571(struct e1000_hw *hw);
+static s32  e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset,
+                                  u16 words, u16 *data);
+static s32  e1000_update_nvm_checksum_82571(struct e1000_hw *hw);
+static s32  e1000_validate_nvm_checksum_82571(struct e1000_hw *hw);
+static s32  e1000_get_cfg_done_82571(struct e1000_hw *hw);
+static s32  e1000_set_d0_lplu_state_82571(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_reset_hw_82571(struct e1000_hw *hw);
+static s32  e1000_init_hw_82571(struct e1000_hw *hw);
+static void e1000_clear_vfta_82571(struct e1000_hw *hw);
+static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
+                                           u8 *mc_addr_list, u32 mc_addr_count,
+                                           u32 rar_used_count, u32 rar_count);
+static s32  e1000_setup_link_82571(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82571(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32  e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static s32  e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
+static s32  e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+static s32  e1000_get_phy_id_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+static s32  e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+                                       u16 words, u16 *data);
+static s32  e1000_read_mac_addr_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+
+struct e1000_dev_spec_82571 {
+	bool laa_is_present;
+};
+
+/**
+ *  e1000_init_phy_params_82571 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82571");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type        = e1000_phy_none;
+		goto out;
+	}
+
+	phy->addr                        = 1;
+	phy->autoneg_mask                = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us              = 100;
+
+	func->acquire_phy                = e1000_get_hw_semaphore_82571;
+	func->check_polarity             = e1000_check_polarity_igp;
+	func->check_reset_block          = e1000_check_reset_block_generic;
+	func->release_phy                = e1000_put_hw_semaphore_82571;
+	func->reset_phy                  = e1000_phy_hw_reset_generic;
+	func->set_d0_lplu_state          = e1000_set_d0_lplu_state_82571;
+	func->set_d3_lplu_state          = e1000_set_d3_lplu_state_generic;
+	func->power_up_phy               = e1000_power_up_phy_copper;
+	func->power_down_phy             = e1000_power_down_phy_copper_82571;
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		phy->type                = e1000_phy_igp_2;
+		func->get_cfg_done       = e1000_get_cfg_done_82571;
+		func->get_phy_info       = e1000_get_phy_info_igp;
+		func->force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+		func->get_cable_length   = e1000_get_cable_length_igp_2;
+		func->read_phy_reg       = e1000_read_phy_reg_igp;
+		func->write_phy_reg      = e1000_write_phy_reg_igp;
+
+		/* This uses above function pointers */
+		ret_val = e1000_get_phy_id_82571(hw);
+
+		/* Verify PHY ID */
+		if (phy->id != IGP01E1000_I_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		break;
+	case e1000_82573:
+		phy->type                = e1000_phy_m88;
+		func->get_cfg_done       = e1000_get_cfg_done_generic;
+		func->get_phy_info       = e1000_get_phy_info_m88;
+		func->commit_phy         = e1000_phy_sw_reset_generic;
+		func->force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+		func->get_cable_length   = e1000_get_cable_length_m88;
+		func->read_phy_reg       = e1000_read_phy_reg_m88;
+		func->write_phy_reg      = e1000_write_phy_reg_m88;
+
+		/* This uses above function pointers */
+		ret_val = e1000_get_phy_id_82571(hw);
+
+		/* Verify PHY ID */
+		if (phy->id != M88E1111_I_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id);
+			goto out;
+		}
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+	DEBUGFUNC("e1000_init_nvm_params_82571");
+
+	nvm->opcode_bits          = 8;
+	nvm->delay_usec           = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		if (((eecd >> 15) & 0x3) == 0x3) {
+			nvm->type = e1000_nvm_flash_hw;
+			nvm->word_size = 2048;
+			/*
+			 * Autonomous Flash update bit must be cleared due
+			 * to Flash update issue.
+			 */
+			eecd &= ~E1000_EECD_AUPDEN;
+			E1000_WRITE_REG(hw, E1000_EECD, eecd);
+			break;
+		}
+		/* Fall Through */
+	default:
+		nvm->type	= e1000_nvm_eeprom_spi;
+		size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+		                  E1000_EECD_SIZE_EX_SHIFT);
+		/*
+		 * Added to a constant, "size" becomes the left-shift value
+		 * for setting word_size.
+		 */
+		size += NVM_WORD_SIZE_BASE_SHIFT;
+
+		/* EEPROM access above 16k is unsupported */
+		if (size > 14)
+			size = 14;
+		nvm->word_size	= 1 << size;
+		break;
+	}
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_nvm_82571;
+	func->read_nvm          = (hw->mac.type == e1000_82573)
+	                          ? e1000_read_nvm_eerd
+	                          : e1000_read_nvm_spi;
+	func->release_nvm       = e1000_release_nvm_82571;
+	func->update_nvm        = e1000_update_nvm_checksum_82571;
+	func->validate_nvm      = e1000_validate_nvm_checksum_82571;
+	func->valid_led_default = e1000_valid_led_default_82571;
+	func->write_nvm         = e1000_write_nvm_82571;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82571 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_82571");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+	case E1000_DEV_ID_82572EI_SERDES:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+	                ? TRUE : FALSE;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pcie_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82571;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82571;
+	/* link setup */
+	func->setup_link = e1000_setup_link_82571;
+	/* physical interface link setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82571
+	                : e1000_setup_fiber_serdes_link_82571;
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->check_for_link = e1000_check_for_copper_link_generic;
+		break;
+	case e1000_media_type_fiber:
+		func->check_for_link = e1000_check_for_fiber_link_generic;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->check_for_link = e1000_check_for_serdes_link_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/* check management mode */
+	func->check_mng_mode = e1000_check_mng_mode_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_82571;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_82571;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* read mac address */
+	func->read_mac_addr = e1000_read_mac_addr_82571;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82571;
+	/* link info */
+	func->get_link_up_info =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_get_speed_and_duplex_copper_generic
+	                : e1000_get_speed_and_duplex_fiber_serdes_generic;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82571);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82571 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82571");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82571;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82571;
+	hw->func.init_phy_params = e1000_init_phy_params_82571;
+}
+
+/**
+ *  e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_get_phy_id_82571");
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * The 82571 firmware may still be configuring the PHY.
+		 * In this case, we cannot access the PHY until the
+		 * configuration is done.  So we explicitly set the
+		 * PHY ID.
+		 */
+		phy->id = IGP01E1000_I_PHY_ID;
+		break;
+	case e1000_82573:
+		ret_val = e1000_get_phy_id(hw);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = E1000_SUCCESS;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	DEBUGFUNC("e1000_get_hw_semaphore_82571");
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		usec_delay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_generic(hw);
+		DEBUGOUT("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	DEBUGFUNC("e1000_put_hw_semaphore_82571");
+
+	swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+	swsm &= ~E1000_SWSM_SWESMBI;
+
+	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ *  Then for non-82573 hardware, set the EEPROM access request bit and wait
+ *  for EEPROM access grant bit.  If the access grant bit is not set, release
+ *  hardware semaphore.
+ **/
+static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_acquire_nvm_82571");
+
+	ret_val = e1000_get_hw_semaphore_82571(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type != e1000_82573)
+		ret_val = e1000_acquire_nvm_generic(hw);
+
+	if (ret_val)
+		e1000_put_hw_semaphore_82571(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+static void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_release_nvm_82571");
+
+	e1000_release_nvm_generic(hw);
+	e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ *  e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+                                 u16 *data)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_nvm_82571");
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+		ret_val = e1000_write_nvm_spi(hw, offset, words, data);
+		break;
+	default:
+		ret_val = -E1000_ERR_NVM;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	u32 eecd;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_update_nvm_checksum_82571");
+
+	ret_val = e1000_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * If our nvm is an EEPROM, then we're done
+	 * otherwise, commit the checksum to the flash NVM.
+	 */
+	if (hw->nvm.type != e1000_nvm_flash_hw)
+		goto out;
+
+	/* Check for pending operations. */
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		msec_delay(1);
+		if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Reset the firmware if using STM opcode. */
+	if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+		/*
+		 * The enabling of and the actual reset must be done
+		 * in two write cycles.
+		 */
+		E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET);
+	}
+
+	/* Commit the write to flash */
+	eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		msec_delay(1);
+		if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_validate_nvm_checksum_82571");
+
+	if (hw->nvm.type == e1000_nvm_flash_hw)
+		e1000_fix_nvm_checksum_82571(hw);
+
+	return e1000_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ *  e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  After checking for invalid values, poll the EEPROM to ensure the previous
+ *  command has completed before trying to write the next word.  After write
+ *  poll for completion.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+                                      u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eewr = 0;
+	s32 ret_val = 0;
+
+	DEBUGFUNC("e1000_write_nvm_eewr_82571");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
+		       ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+		       E1000_NVM_RW_REG_START;
+
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+
+		E1000_WRITE_REG(hw, E1000_EEWR, eewr);
+
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_82571 - Poll for configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the management control register for the config done bit to be set.
+ **/
+static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_get_cfg_done_82571");
+
+	while (timeout) {
+		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
+			break;
+		msec_delay(1);
+		timeout--;
+	}
+	if (!timeout) {
+		DEBUGOUT("MNG configuration cycle has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When activating LPLU
+ *  this function also disables smart speed and vice versa.  LPLU will not be
+ *  activated unless the device autonegotiation advertisement meets standards
+ *  of either 10 or 10/100 or 10/100/1000 at all duplexes.  This is a function
+ *  pointer entry point only called by PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d0_lplu_state_82571");
+
+	ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP01E1000_PHY_PORT_CONFIG,
+		                              data);
+		if (ret_val)
+			goto out;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_82571 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+	u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+	s32 ret_val;
+	u16 i = 0;
+
+	DEBUGFUNC("e1000_reset_hw_82571");
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("PCI-E Master disable polling has failed.\n");
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	/*
+	 * Must acquire the MDIO ownership before MAC reset.
+	 * Ownership defaults to firmware after a reset.
+	 */
+	if (hw->mac.type == e1000_82573) {
+		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+		do {
+			E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+			extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+			if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+				break;
+
+			extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+			msec_delay(2);
+			i++;
+		} while (i < MDIO_OWNERSHIP_TIMEOUT);
+	}
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to MAC\n");
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	if (hw->nvm.type == e1000_nvm_flash_hw) {
+		usec_delay(10);
+		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		goto out;
+
+	/*
+	 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+	 * Need to wait for Phy configuration completion before accessing
+	 * NVM and Phy.
+	 */
+	if (hw->mac.type == e1000_82573)
+		msec_delay(25);
+
+	/* Clear any pending interrupt events. */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	if (!(e1000_check_alt_mac_addr_generic(hw)))
+		e1000_set_laa_state_82571(hw, TRUE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82571 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+	DEBUGFUNC("e1000_init_hw_82571");
+
+	e1000_initialize_hw_bits_82571(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	/*
+	 * If, however, a locally administered address was assigned to the
+	 * 82571, we must reserve a RAR for it to work around an issue where
+	 * resetting one port will reload the MAC on the other port.
+	 */
+	if (e1000_get_laa_state_82571(hw))
+		rar_count--;
+	e1000_init_rx_addrs_generic(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+	           E1000_TXDCTL_FULL_TX_DESC_WB |
+	           E1000_TXDCTL_COUNT_DESC;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	if (mac->type != e1000_82573) {
+		reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+		reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		           E1000_TXDCTL_FULL_TX_DESC_WB |
+		           E1000_TXDCTL_COUNT_DESC;
+		E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+	} else {
+		e1000_enable_tx_pkt_filtering(hw);
+		reg_data = E1000_READ_REG(hw, E1000_GCR);
+		reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+		E1000_WRITE_REG(hw, E1000_GCR, reg_data);
+	}
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82571(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("e1000_initialize_hw_bits_82571");
+
+	if (hw->mac.disable_hw_init_bits)
+		goto out;
+
+	/* Transmit Descriptor Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+		break;
+	default:
+		break;
+	}
+	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TARC(1));
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg &= ~((1 << 29) | (1 << 30));
+		reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+		if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+			reg &= ~(1 << 28);
+		else
+			reg |= (1 << 28);
+		E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+		break;
+	default:
+		break;
+	}
+
+	/* Device Control */
+	if (hw->mac.type == e1000_82573) {
+		reg = E1000_READ_REG(hw, E1000_CTRL);
+		reg &= ~(1 << 29);
+		E1000_WRITE_REG(hw, E1000_CTRL, reg);
+	}
+
+	/* Extended Device Control */
+	if (hw->mac.type == e1000_82573) {
+		reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		reg &= ~(1 << 23);
+		reg |= (1 << 22);
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_clear_vfta_82571 - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+	u32 offset;
+	u32 vfta_value = 0;
+	u32 vfta_offset = 0;
+	u32 vfta_bit_in_reg = 0;
+
+	DEBUGFUNC("e1000_clear_vfta_82571");
+
+	if (hw->mac.type == e1000_82573) {
+		if (hw->mng_cookie.vlan_id != 0) {
+			/*
+			 * The VFTA is a 4096b bit-field, each identifying
+			 * a single VLAN ID.  The following operations
+			 * determine which 32b entry (i.e. offset) into the
+			 * array we want to set the VLAN ID (i.e. bit) of
+			 * the manageability unit.
+			 */
+			vfta_offset = (hw->mng_cookie.vlan_id >>
+			               E1000_VFTA_ENTRY_SHIFT) &
+			              E1000_VFTA_ENTRY_MASK;
+			vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+			                       E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+		}
+	}
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		/*
+		 * If the offset we want to clear is the same offset of the
+		 * manageability VLAN ID, then clear all bits except that of
+		 * the manageability unit.
+		 */
+		vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_update_mc_addr_list_82571 - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.
+ **/
+static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
+                                           u8 *mc_addr_list, u32 mc_addr_count,
+                                           u32 rar_used_count, u32 rar_count)
+{
+	DEBUGFUNC("e1000_update_mc_addr_list_82571");
+
+	if (e1000_get_laa_state_82571(hw))
+		rar_count--;
+
+	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
+	                                  rar_used_count, rar_count);
+}
+
+/**
+ *  e1000_setup_link_82571 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_setup_link_82571");
+
+	/*
+	 * 82573 does not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	if (hw->mac.type == e1000_82573)
+		hw->fc.type = e1000_fc_full;
+
+	return e1000_setup_link_generic(hw);
+}
+
+/**
+ *  e1000_setup_copper_link_82571 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+	u32 ctrl, led_ctrl;
+	s32  ret_val;
+
+	DEBUGFUNC("e1000_setup_copper_link_82571");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
+		ret_val = e1000_copper_link_setup_m88(hw);
+		break;
+	case e1000_phy_igp_2:
+		ret_val = e1000_copper_link_setup_igp(hw);
+		/* Setup activity LED */
+		led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
+		led_ctrl &= IGP_ACTIVITY_LED_MASK;
+		led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes links.
+ *  Upon successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_setup_fiber_serdes_link_82571");
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * If SerDes loopback mode is entered, there is no form
+		 * of reset to take the adapter out of that mode.  So we
+		 * have to explicitly take the adapter out of loopback
+		 * mode.  This prevents drivers from twidling their thumbs
+		 * if another tool failed to take it out of loopback mode.
+		 */
+		E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+		break;
+	default:
+		break;
+	}
+
+	return e1000_setup_fiber_serdes_link_generic(hw);
+}
+
+/**
+ *  e1000_valid_led_default_82571 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_valid_led_default_82571");
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (hw->mac.type == e1000_82573 &&
+	    *data == ID_LED_RESERVED_F746)
+		*data = ID_LED_DEFAULT_82573;
+	else if (*data == ID_LED_RESERVED_0000 ||
+	         *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_laa_state_82571 - Get locally administered address state
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieve and return the current locally administed address state.
+ **/
+bool e1000_get_laa_state_82571(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82571 *dev_spec;
+	bool state = FALSE;
+
+	DEBUGFUNC("e1000_get_laa_state_82571");
+
+	if (hw->mac.type != e1000_82571)
+		goto out;
+
+	dev_spec = (struct e1000_dev_spec_82571 *)hw->dev_spec;
+
+	state = dev_spec->laa_is_present;
+
+out:
+	return state;
+}
+
+/**
+ *  e1000_set_laa_state_82571 - Set locally administered address state
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable locally administered address
+ *
+ *  Enable/Disable the current locally administed address state.
+ **/
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82571 *dev_spec;
+
+	DEBUGFUNC("e1000_set_laa_state_82571");
+
+	if (hw->mac.type != e1000_82571)
+		goto out;
+
+	dev_spec = (struct e1000_dev_spec_82571 *)hw->dev_spec;
+
+	dev_spec->laa_is_present = state;
+
+	/* If workaround is activated... */
+	if (state) {
+		/*
+		 * Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered and the time it
+		 * gets fixed, the actual LAA is in one of the RARs and no
+		 * incoming packets directed to this port are dropped.
+		 * Eventually the LAA will be in RAR[0] and RAR[14].
+		 */
+		e1000_rar_set_generic(hw, hw->mac.addr,
+		                      hw->mac.rar_entry_count - 1);
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies that the EEPROM has completed the update.  After updating the
+ *  EEPROM, we need to check bit 15 in work 0x23 for the checksum fix.  If
+ *  the checksum fix is not implemented, we need to set the bit and update
+ *  the checksum.  Otherwise, if bit 15 is set and the checksum is incorrect,
+ *  we need to return bad checksum.
+ **/
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_fix_nvm_checksum_82571");
+
+	if (nvm->type != e1000_nvm_flash_hw)
+		goto out;
+
+	/*
+	 * Check bit 4 of word 10h.  If it is 0, firmware is done updating
+	 * 10h-12h.  Checksum may need to be fixed.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
+	if (ret_val)
+		goto out;
+
+	if (!(data & 0x10)) {
+		/*
+		 * Read 0x23 and check bit 15.  This bit is a 1
+		 * when the checksum has already been fixed.  If
+		 * the checksum is still wrong and this bit is a
+		 * 1, we need to return bad checksum.  Otherwise,
+		 * we need to set this bit to a 1 and update the
+		 * checksum.
+		 */
+		ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
+		if (ret_val)
+			goto out;
+
+		if (!(data & 0x8000)) {
+			data |= 0x8000;
+			ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
+			if (ret_val)
+				goto out;
+			ret_val = e1000_update_nvm_checksum(hw);
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_82571 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_mac_addr_82571");
+	if (e1000_check_alt_mac_addr_generic(hw))
+		ret_val = e1000_read_mac_addr_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82571");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+
+	temp = E1000_READ_REG(hw, E1000_ICRXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICTXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQEC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQMTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c	2022-03-21 12:58:29.858884388 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82541
+ * e1000_82547
+ * e1000_82541_rev_2
+ * e1000_82547_rev_2
+ */
+
+#include "e1000_api.h"
+#include "e1000_82541.h"
+
+static s32  e1000_init_phy_params_82541(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82541(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82541(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82541(struct e1000_hw *hw);
+static s32  e1000_init_hw_82541(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+                                         u16 *duplex);
+static s32  e1000_phy_hw_reset_82541(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82541(struct e1000_hw *hw);
+static s32  e1000_check_for_link_82541(struct e1000_hw *hw);
+static s32  e1000_get_cable_length_igp_82541(struct e1000_hw *hw);
+static s32  e1000_set_d3_lplu_state_82541(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_setup_led_82541(struct e1000_hw *hw);
+static s32  e1000_cleanup_led_82541(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw);
+static s32  e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+                                                     bool link_up);
+static s32  e1000_phy_init_script_82541(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw);
+
+static const u16 e1000_igp_cable_length_table[] =
+    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_cable_length_table) / \
+                 sizeof(e1000_igp_cable_length_table[0]))
+
+struct e1000_dev_spec_82541 {
+	e1000_dsp_config dsp_config;
+	e1000_ffe_config ffe_config;
+	u16 spd_default;
+	bool phy_init_script;
+};
+
+/**
+ *  e1000_init_phy_params_82541 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82541(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82541");
+
+	phy->addr                       = 1;
+	phy->autoneg_mask               = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us             = 10000;
+	phy->type                       = e1000_phy_igp;
+
+	/* Function Pointers */
+	func->check_polarity            = e1000_check_polarity_igp;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_igp;
+	func->get_cable_length          = e1000_get_cable_length_igp_82541;
+	func->get_cfg_done              = e1000_get_cfg_done_generic;
+	func->get_phy_info              = e1000_get_phy_info_igp;
+	func->read_phy_reg              = e1000_read_phy_reg_igp;
+	func->reset_phy                 = e1000_phy_hw_reset_82541;
+	func->set_d3_lplu_state         = e1000_set_d3_lplu_state_82541;
+	func->write_phy_reg             = e1000_write_phy_reg_igp;
+	func->power_up_phy              = e1000_power_up_phy_copper;
+	func->power_down_phy            = e1000_power_down_phy_copper_82541;
+
+	ret_val = e1000_get_phy_id(hw);
+	if (ret_val)
+		goto out;
+
+	/* Verify phy id */
+	if (phy->id != IGP01E1000_I_PHY_ID) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82541 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
+{
+	struct   e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	s32  ret_val = E1000_SUCCESS;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+	DEBUGFUNC("e1000_init_nvm_params_82541");
+
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->type = e1000_nvm_eeprom_spi;
+		eecd |= E1000_EECD_ADDR_BITS;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->type = e1000_nvm_eeprom_spi;
+		eecd &= ~E1000_EECD_ADDR_BITS;
+		break;
+	case e1000_nvm_override_microwire_large:
+		nvm->type = e1000_nvm_eeprom_microwire;
+		eecd |= E1000_EECD_SIZE;
+		break;
+	case e1000_nvm_override_microwire_small:
+		nvm->type = e1000_nvm_eeprom_microwire;
+		eecd &= ~E1000_EECD_SIZE;
+		break;
+	default:
+		nvm->type = eecd & E1000_EECD_TYPE
+		            ? e1000_nvm_eeprom_spi
+		            : e1000_nvm_eeprom_microwire;
+		break;
+	}
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		nvm->address_bits       = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 16 : 8;
+		nvm->delay_usec         = 1;
+		nvm->opcode_bits        = 8;
+		nvm->page_size          = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 32 : 8;
+
+		/* Function Pointers */
+		func->acquire_nvm       = e1000_acquire_nvm_generic;
+		func->read_nvm          = e1000_read_nvm_spi;
+		func->release_nvm       = e1000_release_nvm_generic;
+		func->update_nvm        = e1000_update_nvm_checksum_generic;
+		func->valid_led_default = e1000_valid_led_default_generic;
+		func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+		func->write_nvm         = e1000_write_nvm_spi;
+
+		/*
+		 * nvm->word_size must be discovered after the pointers
+		 * are set so we can verify the size from the nvm image
+		 * itself.  Temporarily set it to a dummy value so the
+		 * read will work.
+		 */
+		nvm->word_size = 64;
+		ret_val = e1000_read_nvm(hw, NVM_CFG, 1, &size);
+		if (ret_val)
+			goto out;
+		size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT;
+		/*
+		 * if size != 0, it can be added to a constant and become
+		 * the left-shift value to set the word_size.  Otherwise,
+		 * word_size stays at 64.
+		 */
+		if (size) {
+			size += NVM_WORD_SIZE_BASE_SHIFT_82541;
+			nvm->word_size = 1 << size;
+		}
+	} else {
+		nvm->address_bits       = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 8 : 6;
+		nvm->delay_usec         = 50;
+		nvm->opcode_bits        = 3;
+		nvm->word_size          = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 256 : 64;
+
+		/* Function Pointers */
+		func->acquire_nvm       = e1000_acquire_nvm_generic;
+		func->read_nvm          = e1000_read_nvm_microwire;
+		func->release_nvm       = e1000_release_nvm_generic;
+		func->update_nvm        = e1000_update_nvm_checksum_generic;
+		func->valid_led_default = e1000_valid_led_default_generic;
+		func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+		func->write_nvm         = e1000_write_nvm_microwire;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_mac_params_82541 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_init_mac_params_82541");
+
+	/* Set media type */
+	hw->phy.media_type = e1000_media_type_copper;
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+
+	/* Function Pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pci_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82541;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82541;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface link setup */
+	func->setup_physical_interface = e1000_setup_copper_link_82541;
+	/* check for link */
+	func->check_for_link = e1000_check_for_link_82541;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_82541;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_82541;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_82541;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82541;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82541);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82541 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82541(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82541");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82541;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82541;
+	hw->func.init_phy_params = e1000_init_phy_params_82541;
+}
+
+/**
+ *  e1000_reset_hw_82541 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82541(struct e1000_hw *hw)
+{
+	u32 ledctl, ctrl, icr, manc;
+
+	DEBUGFUNC("e1000_reset_hw_82541");
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete
+	 * before resetting the device.
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Must reset the Phy before resetting the MAC */
+	if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST));
+		msec_delay(5);
+	}
+
+	DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n");
+	switch (hw->mac.type) {
+	case e1000_82541:
+	case e1000_82541_rev_2:
+		/*
+		 * These controllers can't ack the 64-bit write when
+		 * issuing the reset, so we use IO-mapping as a
+		 * workaround to issue the reset.
+		 */
+		E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+		break;
+	default:
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+		break;
+	}
+
+	/* Wait for NVM reload */
+	msec_delay(20);
+
+	/* Disable HW ARPs on ASF enabled adapters */
+	manc = E1000_READ_REG(hw, E1000_MANC);
+	manc &= ~E1000_MANC_ARP_EN;
+	E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+	if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+		e1000_phy_init_script_82541(hw);
+
+		/* Configure activity LED after Phy reset */
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		ledctl &= IGP_ACTIVITY_LED_MASK;
+		ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	}
+
+	/* Once again, mask the interrupts */
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+	/* Clear any pending interrupt events. */
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_hw_82541 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_82541(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 i, txdctl;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_init_hw_82541");
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		/*
+		 * Avoid back to back register writes by adding the register
+		 * read (flush).  This is to protect against some strange
+		 * bridge configurations that may issue Memory Write Block
+		 * (MWB) to our register space.
+		 */
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+	         E1000_TXDCTL_FULL_TX_DESC_WB;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82541(hw);
+
+	return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_82541 - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+                                        u16 *duplex)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_get_link_up_info_82541");
+
+	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+	if (ret_val)
+		goto out;
+
+	if (!phy->speed_downgraded)
+		goto out;
+
+	/*
+	 * IGP01 PHY may advertise full duplex operation after speed
+	 * downgrade even if it is operating at half duplex.
+	 * Here we set the duplex settings to match the duplex in the
+	 * link partner's capabilities.
+	 */
+	ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &data);
+	if (ret_val)
+		goto out;
+
+	if (!(data & NWAY_ER_LP_NWAY_CAPS)) {
+		*duplex = HALF_DUPLEX;
+	} else {
+		ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &data);
+		if (ret_val)
+			goto out;
+
+		if (*speed == SPEED_100) {
+			if (!(data & NWAY_LPAR_100TX_FD_CAPS))
+				*duplex = HALF_DUPLEX;
+		} else if (*speed == SPEED_10) {
+			if (!(data & NWAY_LPAR_10T_FD_CAPS))
+				*duplex = HALF_DUPLEX;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_82541 - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and relase the semaphore (if necessary).
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 ledctl;
+
+	DEBUGFUNC("e1000_phy_hw_reset_82541");
+
+	ret_val = e1000_phy_hw_reset_generic(hw);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_init_script_82541(hw);
+
+	if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+		/* Configure activity LED after PHY reset */
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		ledctl &= IGP_ACTIVITY_LED_MASK;
+		ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82541 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32  ret_val;
+	u32 ctrl, ledctl;
+
+	DEBUGFUNC("e1000_setup_copper_link_82541");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	hw->phy.reset_disable = FALSE;
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	/* Earlier revs of the IGP phy require us to force MDI. */
+	if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) {
+		dev_spec->dsp_config = e1000_dsp_config_disabled;
+		phy->mdix = 1;
+	} else {
+		dev_spec->dsp_config = e1000_dsp_config_enabled;
+	}
+
+	ret_val = e1000_copper_link_setup_igp(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.autoneg) {
+		if (dev_spec->ffe_config == e1000_ffe_config_active)
+			dev_spec->ffe_config = e1000_ffe_config_enabled;
+	}
+
+	/* Configure activity LED after Phy reset */
+	ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+	ledctl &= IGP_ACTIVITY_LED_MASK;
+	ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+	E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_link_82541 - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_check_for_link_82541(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_check_for_link_82541");
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		ret_val = e1000_config_dsp_after_link_change_82541(hw, FALSE);
+		goto out; /* No link detected */
+	}
+
+	mac->get_link_status = FALSE;
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000_check_downshift_generic(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_config_dsp_after_link_change_82541(hw, TRUE);
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000_config_collision_dist_generic(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error configuring flow control\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_dsp_after_link_change_82541 - Config DSP after link
+ *  @hw: pointer to the HW structure
+ *  @link_up: boolean flag for link up status
+ *
+ *  Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS
+ *  at any other case.
+ *
+ *  82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ *  gigabit link is achieved to improve link quality.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+                                                    bool link_up)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32 ret_val;
+	u32 idle_errs = 0;
+	u16 phy_data, phy_saved_data, speed, duplex, i;
+	u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+	u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+	                                           {IGP01E1000_PHY_AGC_PARAM_A,
+	                                            IGP01E1000_PHY_AGC_PARAM_B,
+	                                            IGP01E1000_PHY_AGC_PARAM_C,
+	                                            IGP01E1000_PHY_AGC_PARAM_D};
+
+	DEBUGFUNC("e1000_config_dsp_after_link_change_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	if (link_up) {
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			DEBUGOUT("Error getting link speed and duplex\n");
+			goto out;
+		}
+
+		if (speed != SPEED_1000) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		if ((dev_spec->dsp_config == e1000_dsp_config_enabled) &&
+		    phy->min_cable_length >= 50) {
+
+			for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+				ret_val = e1000_read_phy_reg(hw,
+				                            dsp_reg_array[i],
+				                            &phy_data);
+				if (ret_val)
+					goto out;
+
+				phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+				ret_val = e1000_write_phy_reg(hw,
+				                             dsp_reg_array[i],
+				                             phy_data);
+				if (ret_val)
+					goto out;
+			}
+			dev_spec->dsp_config = e1000_dsp_config_activated;
+		}
+
+		if ((dev_spec->ffe_config != e1000_ffe_config_enabled) ||
+		    (phy->min_cable_length >= 50)) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		/* clear previous idle error counts */
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		for (i = 0; i < ffe_idle_err_timeout; i++) {
+			usec_delay(1000);
+			ret_val = e1000_read_phy_reg(hw,
+			                            PHY_1000T_STATUS,
+			                            &phy_data);
+			if (ret_val)
+				goto out;
+
+			idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+			if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+				dev_spec->ffe_config = e1000_ffe_config_active;
+
+				ret_val = e1000_write_phy_reg(hw,
+				                  IGP01E1000_PHY_DSP_FFE,
+				                  IGP01E1000_PHY_DSP_FFE_CM_CP);
+				if (ret_val)
+					goto out;
+				break;
+			}
+
+			if (idle_errs)
+				ffe_idle_err_timeout =
+				                 FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+		}
+	} else {
+		if (dev_spec->dsp_config == e1000_dsp_config_activated) {
+			/*
+			 * Save off the current value of register 0x2F5B
+			 * to be restored at the end of the routines.
+			 */
+			ret_val = e1000_read_phy_reg(hw,
+			                            0x2F5B,
+			                            &phy_saved_data);
+			if (ret_val)
+				goto out;
+
+			/* Disable the PHY transmitter */
+			ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+			if (ret_val)
+				goto out;
+
+			msec_delay_irq(20);
+
+			ret_val = e1000_write_phy_reg(hw,
+			                             0x0000,
+			                             IGP01E1000_IEEE_FORCE_GIG);
+			if (ret_val)
+				goto out;
+			for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+				ret_val = e1000_read_phy_reg(hw,
+				                            dsp_reg_array[i],
+				                            &phy_data);
+				if (ret_val)
+					goto out;
+
+				phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+				phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+				ret_val = e1000_write_phy_reg(hw,
+				                             dsp_reg_array[i],
+				                             phy_data);
+				if (ret_val)
+					goto out;
+			}
+
+			ret_val = e1000_write_phy_reg(hw,
+			                       0x0000,
+			                       IGP01E1000_IEEE_RESTART_AUTONEG);
+			if (ret_val)
+				goto out;
+
+			msec_delay_irq(20);
+
+			/* Now enable the transmitter */
+			ret_val = e1000_write_phy_reg(hw,
+			                             0x2F5B,
+			                             phy_saved_data);
+			if (ret_val)
+				goto out;
+
+			dev_spec->dsp_config = e1000_dsp_config_enabled;
+		}
+
+		if (dev_spec->ffe_config != e1000_ffe_config_active) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		/*
+		 * Save off the current value of register 0x2F5B
+		 * to be restored at the end of the routines.
+		 */
+		ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+		if (ret_val)
+			goto out;
+
+		/* Disable the PHY transmitter */
+		ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+		if (ret_val)
+			goto out;
+
+		msec_delay_irq(20);
+
+		ret_val = e1000_write_phy_reg(hw,
+		                             0x0000,
+		                             IGP01E1000_IEEE_FORCE_GIG);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_DSP_FFE,
+		                             IGP01E1000_PHY_DSP_FFE_DEFAULT);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_write_phy_reg(hw,
+		                             0x0000,
+		                             IGP01E1000_IEEE_RESTART_AUTONEG);
+		if (ret_val)
+			goto out;
+
+		msec_delay_irq(20);
+
+		/* Now enable the transmitter */
+		ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+		if (ret_val)
+			goto out;
+
+		dev_spec->ffe_config = e1000_ffe_config_enabled;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which reperesent the
+ *  cobination of course and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.  This is a function pointer entry point called by the
+ *  api module.
+ **/
+static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, data;
+	u16 cur_agc_value, agc_value = 0;
+	u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+	u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+	                                                 {IGP01E1000_PHY_AGC_A,
+	                                                  IGP01E1000_PHY_AGC_B,
+	                                                  IGP01E1000_PHY_AGC_C,
+	                                                  IGP01E1000_PHY_AGC_D};
+
+	DEBUGFUNC("e1000_get_cable_length_igp_82541");
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &data);
+		if (ret_val)
+			goto out;
+
+		cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+		/* Bounds checking */
+		if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+		    (cur_agc_value == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		agc_value += cur_agc_value;
+
+		if (min_agc_value > cur_agc_value)
+			min_agc_value = cur_agc_value;
+	}
+
+	/* Remove the minimal AGC result for length < 50m */
+	if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) {
+		agc_value -= min_agc_value;
+		/* Average the three remaining channels for the length. */
+		agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+	} else {
+		/* Average the channels for the length. */
+		agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+	}
+
+	phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] >
+	                         IGP01E1000_AGC_RANGE)
+	                        ? (e1000_igp_cable_length_table[agc_value] -
+	                           IGP01E1000_AGC_RANGE)
+	                        : 0;
+	phy->max_cable_length = e1000_igp_cable_length_table[agc_value] +
+	                        IGP01E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by the
+ *  api module.
+ **/
+static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d3_lplu_state_82541");
+
+	switch (hw->mac.type) {
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		break;
+	default:
+		ret_val = e1000_set_d3_lplu_state_generic(hw, active);
+		goto out;
+		break;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP01E1000_GMII_FLEX_SPD;
+		ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, data);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP01E1000_GMII_FLEX_SPD;
+		ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                            IGP01E1000_PHY_PORT_CONFIG,
+		                            &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_led_82541 - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.  This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_setup_led_82541(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_setup_led_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	ret_val = e1000_read_phy_reg(hw,
+	                            IGP01E1000_GMII_FIFO,
+	                            &dev_spec->spd_default);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw,
+	                             IGP01E1000_GMII_FIFO,
+	                             (u16)(dev_spec->spd_default &
+	                                        ~IGP01E1000_GMII_SPD));
+	if (ret_val)
+		goto out;
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_82541 - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.  This is a function pointer
+ *  entry point called by the api module.
+ **/
+static s32 e1000_cleanup_led_82541(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_cleanup_led_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	ret_val = e1000_write_phy_reg(hw,
+	                             IGP01E1000_GMII_FIFO,
+	                             dev_spec->spd_default);
+	if (ret_val)
+		goto out;
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_init_script_82541 - Initialize GbE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the IGP PHY.
+ **/
+static s32 e1000_phy_init_script_82541(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+	u32 ret_val;
+	u16 phy_saved_data;
+
+	DEBUGFUNC("e1000_phy_init_script_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	if (!dev_spec->phy_init_script) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/* Delay after phy reset to enable NVM configuration to load */
+	msec_delay(20);
+
+	/*
+	 * Save off the current value of register 0x2F5B to be restored at
+	 * the end of this routine.
+	 */
+	ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+	/* Disabled the PHY transmitter */
+	e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+	msec_delay(20);
+
+	e1000_write_phy_reg(hw, 0x0000, 0x0140);
+
+	msec_delay(5);
+
+	switch (hw->mac.type) {
+	case e1000_82541:
+	case e1000_82547:
+		e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+		e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+		e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+		e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+		e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+		e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+		e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+		e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+		e1000_write_phy_reg(hw, 0x2010, 0x0008);
+		break;
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+		break;
+	default:
+		break;
+	}
+
+	e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+	msec_delay(20);
+
+	/* Now enable the transmitter */
+	e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+	if (hw->mac.type == e1000_82547) {
+		u16 fused, fine, coarse;
+
+		/* Move to analog registers page */
+		e1000_read_phy_reg(hw,
+		                  IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+		                  &fused);
+
+		if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+			e1000_read_phy_reg(hw,
+			                  IGP01E1000_ANALOG_FUSE_STATUS,
+			                  &fused);
+
+			fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+			coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+			if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+				coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+				fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+			} else if (coarse ==
+			           IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+				fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+			fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+			        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+			        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+			e1000_write_phy_reg(hw,
+			                   IGP01E1000_ANALOG_FUSE_CONTROL,
+			                   fused);
+			e1000_write_phy_reg(hw,
+			              IGP01E1000_ANALOG_FUSE_BYPASS,
+			              IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_script_state_82541 - Enable/Disable PHY init script
+ *  @hw: pointer to the HW structure
+ *  @state: boolean value used to enable/disable PHY init script
+ *
+ *  Allows the driver to enable/disable the PHY init script, if the PHY is an
+ *  IGP PHY.  This is a function pointer entry point called by the api module.
+ **/
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+
+	DEBUGFUNC("e1000_init_script_state_82541");
+
+	if (hw->phy.type != e1000_phy_igp) {
+		DEBUGOUT("Initialization script not necessary.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	dev_spec->phy_init_script = state;
+
+out:
+	return;
+}
+
+/**
+ * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82541");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h	2022-03-21 12:58:29.852884446 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* Rx Control - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* Tx Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* Rx Configuration Word - RO */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_TCTL     0x00400  /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended Tx Control - RW */
+#define E1000_TIPG     0x00410  /* Tx Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* Tx Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
+#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDFPCQ(_n)  (0x02430 + (0x4 * (_n)))
+#define E1000_PBRTH    0x02458  /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB   0x025CC  /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD   0x025D0  /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD   0x025D4  /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD   0x025D8  /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL  0x025DC  /* DMA Rx Descriptor uC Control - RW */
+#define E1000_RDTR     0x02820  /* Rx Delay Timer - RW */
+#define E1000_RADV     0x0282C  /* Rx Interrupt Absolute Delay Timer - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)   ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)   ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)   ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)  ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)     ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)     ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)  ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)   ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)   ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)   ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)     ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)     ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)  ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n)    (0x03840 + (_n << 8))
+#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_TDWBAL(_n)  ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n)  ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : (0x0E03C + ((_n) * 0x40)))
+#define E1000_RSRPD    0x02C00  /* Rx Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* Tx DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i)           (0x05400 + ((_i) * 8))
+#define E1000_RAH(_i)           (0x05404 + ((_i) * 8))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
+#define E1000_TDFH     0x03410  /* Tx Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB   0x0357C  /* DMA Tx Descriptor uC Mail Box - RW */
+#define E1000_TDPUAD   0x03580  /* DMA Tx Descriptor uC Addr Command - RW */
+#define E1000_TDPUWD   0x03584  /* DMA Tx Descriptor uC Data Write - RW */
+#define E1000_TDPURD   0x03588  /* DMA Tx Descriptor uC Data  Read  - RW */
+#define E1000_TDPUCTL  0x0358C  /* DMA Tx Descriptor uC Control - RW */
+#define E1000_DTXCTL   0x03590  /* DMA Tx Control - RW */
+#define E1000_TIDV     0x03820  /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV     0x0382C  /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* Tx-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON Rx Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* Rx No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* Rx Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* Rx Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* Rx Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets Tx Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets Rx Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets Rx High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets Tx High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets Rx - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets Tx - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC      0x04044  /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC     0x04124  /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
+#define E1000_RXCSUM   0x05000  /* Rx Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* Rx Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VMD_CTL  0x0581C  /* VMDq Control - RW */
+#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL    0x05B68  /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA      0x0003C /* PHY address - RW */
+#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_DCA_ID    0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
+#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register (_i) - RW */
+#define E1000_MSIXTADD(_i)  (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr low reg 0 - RW */
+#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr upper reg 0 - RW */
+#define E1000_MSIXTMSG(_i)  (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry message reg 0 - RW */
+#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry vector ctrl reg 0 - RW */
+#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
+#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4)) /* Redirection Table - RW Array */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h	2022-03-21 12:58:29.846884505 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+typedef enum {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+} e1000_smart_speed;
+
+s32  e1000_check_downshift_generic(struct e1000_hw *hw);
+s32  e1000_check_polarity_m88(struct e1000_hw *hw);
+s32  e1000_check_polarity_igp(struct e1000_hw *hw);
+s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32  e1000_copper_link_autoneg(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32  e1000_get_phy_id(struct e1000_hw *hw);
+s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32  e1000_phy_setup_autoneg(struct e1000_hw *hw);
+s32  e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32  e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
+s32  e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
+s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                                u32 usec_interval, bool *success);
+s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
+e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+
+#define E1000_MAX_PHY_ADDR                4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
+#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT                22   /* Page Select for BM */
+#define IGP_PAGE_SHIFT                    5
+#define PHY_REG_MASK                      0x1F
+
+
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP01E1000_GMII_FLEX_SPD          0x0010
+#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0008
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define IGP03E1000_PHY_MISC_CTRL          0x1B
+#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
+#define E1000_KMRNCTRLSTA_REN             0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED    0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
+#define IFE_PSC_FORCE_POLARITY             0x0020
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE            0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h	2022-03-21 12:58:29.841884554 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_ICH8LAN_H_
+#define _E1000_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG                 0x0000
+#define ICH_FLASH_HSFSTS                 0x0004
+#define ICH_FLASH_HSFCTL                 0x0006
+#define ICH_FLASH_FADDR                  0x0008
+#define ICH_FLASH_FDATA0                 0x0010
+
+#define ICH_FLASH_READ_COMMAND_TIMEOUT   500
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT  500
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT  3000000
+#define ICH_FLASH_LINEAR_ADDR_MASK       0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT     10
+
+#define ICH_CYCLE_READ                   0
+#define ICH_CYCLE_WRITE                  2
+#define ICH_CYCLE_ERASE                  3
+
+#define FLASH_GFPREG_BASE_MASK           0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT          12
+
+#define E1000_SHADOW_RAM_WORDS           2048
+
+#define ICH_FLASH_SEG_SIZE_256           256
+#define ICH_FLASH_SEG_SIZE_4K            4096
+#define ICH_FLASH_SEG_SIZE_8K            8192
+#define ICH_FLASH_SEG_SIZE_64K           65536
+#define ICH_FLASH_SECTOR_SIZE            4096
+
+#define ICH_FLASH_REG_MAPSIZE            0x00A0
+
+#define E1000_ICH_FWSM_RSPCIPHY          0x00000040 /* Reset PHY on PCI Reset */
+#define E1000_ICH_FWSM_DISSW             0x10000000 /* FW Disables SW Writes */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID          0x00008000
+
+#define E1000_ICH_MNG_IAMT_MODE          0x2
+
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+                                 (ID_LED_DEF1_OFF2 <<  8) | \
+                                 (ID_LED_DEF1_ON2  <<  4) | \
+                                 (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD           0x13
+#define E1000_ICH_NVM_SIG_MASK           0xC000
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT      1500
+
+#define E1000_FEXTNVM_SW_CONFIG        1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M */
+
+#define PCIE_ICH8_SNOOP_ALL   PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES            7
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+                           ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG  PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL    PHY_REG(776, 18) /* Voltage Regulator Control */
+#define IGP3_CAPABILITY PHY_REG(776, 19) /* Capability */
+#define IGP3_PM_CTRL    PHY_REG(769, 20) /* Power Management Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS         0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN           0x0200
+#define IGP3_PM_CTRL_FORCE_PWR_DOWN          0x0020
+
+/*
+ * Additional interrupts need to be handled for ICH family:
+ *  DSW = The FW changed the status of the DISSW bit in FWSM
+ *  PHYINT = The LAN connected device generates an interrupt
+ *  EPRST = Manageability reset event
+ */
+#define IMS_ICH_ENABLE_MASK (\
+    E1000_IMS_DSW   | \
+    E1000_IMS_PHYINT | \
+    E1000_IMS_EPRST)
+
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h	2022-03-21 12:58:29.835884612 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+/*
+ * Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+s32  e1000_blink_led_generic(struct e1000_hw *hw);
+s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32  e1000_commit_fc_settings_generic(struct e1000_hw *hw);
+s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+s32  e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                     u16 *speed, u16 *duplex);
+s32  e1000_id_led_init_generic(struct e1000_hw *hw);
+s32  e1000_led_on_generic(struct e1000_hw *hw);
+s32  e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+	                               u8 *mc_addr_list, u32 mc_addr_count,
+	                               u32 rar_used_count, u32 rar_count);
+s32  e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_set_default_fc_generic(struct e1000_hw *hw);
+s32  e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32  e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_setup_led_generic(struct e1000_hw *hw);
+s32  e1000_setup_link_generic(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                       u32 offset, u8 data);
+
+u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_remove_device_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c	2022-03-21 12:58:29.830884661 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82540
+ * e1000_82545
+ * e1000_82546
+ * e1000_82545_rev_3
+ * e1000_82546_rev_3
+ */
+
+#include "e1000_api.h"
+
+static s32  e1000_init_phy_params_82540(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82540(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82540(struct e1000_hw *hw);
+static s32  e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw);
+static s32  e1000_init_hw_82540(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82540(struct e1000_hw *hw);
+static s32  e1000_set_phy_mode_82540(struct e1000_hw *hw);
+static s32  e1000_set_vco_speed_82540(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82540(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82540 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82540(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	phy->addr                       = 1;
+	phy->autoneg_mask               = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us             = 10000;
+	phy->type                       = e1000_phy_m88;
+
+	/* Function Pointers */
+	func->check_polarity            = e1000_check_polarity_m88;
+	func->commit_phy                = e1000_phy_sw_reset_generic;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_m88;
+	func->get_cable_length          = e1000_get_cable_length_m88;
+	func->get_cfg_done              = e1000_get_cfg_done_generic;
+	func->read_phy_reg              = e1000_read_phy_reg_m88;
+	func->reset_phy                 = e1000_phy_hw_reset_generic;
+	func->write_phy_reg             = e1000_write_phy_reg_m88;
+	func->get_phy_info              = e1000_get_phy_info_m88;
+	func->power_up_phy              = e1000_power_up_phy_copper;
+	func->power_down_phy            = e1000_power_down_phy_copper_82540;
+
+	ret_val = e1000_get_phy_id(hw);
+	if (ret_val)
+		goto out;
+
+	/* Verify phy id */
+	switch (hw->mac.type) {
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		if (phy->id == M88E1011_I_PHY_ID)
+			break;
+		/* Fall Through */
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82540 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82540(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	DEBUGFUNC("e1000_init_nvm_params_82540");
+
+	nvm->type               = e1000_nvm_eeprom_microwire;
+	nvm->delay_usec         = 50;
+	nvm->opcode_bits        = 3;
+	switch (nvm->override) {
+	case e1000_nvm_override_microwire_large:
+		nvm->address_bits       = 8;
+		nvm->word_size          = 256;
+		break;
+	case e1000_nvm_override_microwire_small:
+		nvm->address_bits       = 6;
+		nvm->word_size          = 64;
+		break;
+	default:
+		nvm->address_bits       = eecd & E1000_EECD_SIZE ? 8 : 6;
+		nvm->word_size          = eecd & E1000_EECD_SIZE ? 256 : 64;
+		break;
+	}
+
+	/* Function Pointers */
+	func->acquire_nvm        = e1000_acquire_nvm_generic;
+	func->read_nvm           = e1000_read_nvm_microwire;
+	func->release_nvm        = e1000_release_nvm_generic;
+	func->update_nvm         = e1000_update_nvm_checksum_generic;
+	func->valid_led_default  = e1000_valid_led_default_generic;
+	func->validate_nvm       = e1000_validate_nvm_checksum_generic;
+	func->write_nvm          = e1000_write_nvm_microwire;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82540 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_82540");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82545GM_FIBER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	case E1000_DEV_ID_82545GM_SERDES:
+	case E1000_DEV_ID_82546GB_SERDES:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pci_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82540;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82540;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82540
+	                : e1000_setup_fiber_serdes_link_82540;
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->check_for_link = e1000_check_for_copper_link_generic;
+		break;
+	case e1000_media_type_fiber:
+		func->check_for_link = e1000_check_for_fiber_link_generic;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->check_for_link = e1000_check_for_serdes_link_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/* link info */
+	func->get_link_up_info =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_get_speed_and_duplex_copper_generic
+	                : e1000_get_speed_and_duplex_fiber_serdes_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82540;
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_82540 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * The only function explicitly called by the api module to initialize
+ * all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82540(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82540");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82540;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82540;
+	hw->func.init_phy_params = e1000_init_phy_params_82540;
+}
+
+/**
+ *  e1000_reset_hw_82540 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82540(struct e1000_hw *hw)
+{
+	u32 ctrl, icr, manc;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_reset_hw_82540");
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete
+	 * before resetting the device.
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n");
+	switch (hw->mac.type) {
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+		E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST);
+		break;
+	default:
+		/*
+		 * These controllers can't ack the 64-bit write when
+		 * issuing the reset, so we use IO-mapping as a
+		 * workaround to issue the reset.
+		 */
+		E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+		break;
+	}
+
+	/* Wait for EEPROM reload */
+	msec_delay(5);
+
+	/* Disable HW ARPs on ASF enabled adapters */
+	manc = E1000_READ_REG(hw, E1000_MANC);
+	manc &= ~E1000_MANC_ARP_EN;
+	E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82540 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_82540(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 txdctl, ctrl_ext;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_82540");
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	if (mac->type < e1000_82545_rev_3)
+		E1000_WRITE_REG(hw, E1000_VET, 0);
+
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		/*
+		 * Avoid back to back register writes by adding the register
+		 * read (flush).  This is to protect against some strange
+		 * bridge configurations that may issue Memory Write Block
+		 * (MWB) to our register space.  The *_rev_3 hardware at
+		 * least doesn't respond correctly to every other dword in an
+		 * MWB to our register space.
+		 */
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	if (mac->type < e1000_82545_rev_3)
+		e1000_pcix_mmrbc_workaround_generic(hw);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+	         E1000_TXDCTL_FULL_TX_DESC_WB;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82540(hw);
+
+	if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) ||
+	    (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) {
+		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		/*
+		 * Relaxed ordering must be disabled to avoid a parity
+		 * error crash in a PCI slot.
+		 */
+		ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82540 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_setup_copper_link_82540");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	ret_val = e1000_set_phy_mode_82540(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type == e1000_82545_rev_3 ||
+	    hw->mac.type == e1000_82546_rev_3) {
+		ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &data);
+		if (ret_val)
+			goto out;
+		data |= 0x00000008;
+		ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_copper_link_setup_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the output amplitude to the value in the EEPROM and adjust the VCO
+ *  speed to improve Bit Error Rate (BER) performance.  Configures collision
+ *  distance and flow control for fiber and serdes links.  Upon successful
+ *  setup, poll for link.  This is a function pointer entry point called by
+ *  the api module.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_fiber_serdes_link_82540");
+
+	switch (mac->type) {
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+			/*
+			 * If we're on serdes media, adjust the output
+			 * amplitude to value set in the EEPROM.
+			 */
+			ret_val = e1000_adjust_serdes_amplitude_82540(hw);
+			if (ret_val)
+				goto out;
+		}
+		/* Adjust VCO speed to improve BER performance */
+		ret_val = e1000_set_vco_speed_82540(hw);
+		if (ret_val)
+			goto out;
+	default:
+		break;
+	}
+
+	ret_val = e1000_setup_fiber_serdes_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Adjust the SERDES ouput amplitude based on the EEPROM settings.
+ **/
+static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_adjust_serdes_amplitude_82540");
+
+	ret_val = e1000_read_nvm(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data);
+	if (ret_val)
+		goto out;
+
+	if (nvm_data != NVM_RESERVED_WORD) {
+		/* Adjust serdes output amplitude only. */
+		nvm_data &= NVM_SERDES_AMPLITUDE_MASK;
+		ret_val = e1000_write_phy_reg(hw,
+		                             M88E1000_PHY_EXT_CTRL,
+		                             nvm_data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_vco_speed_82540 - Set VCO speed for better performance
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the VCO speed to improve Bit Error Rate (BER) performance.
+ **/
+static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw)
+{
+	s32  ret_val = E1000_SUCCESS;
+	u16 default_page = 0;
+	u16 phy_data;
+
+	DEBUGFUNC("e1000_set_vco_speed_82540");
+
+	/* Set PHY register 30, page 5, bit 8 to 0 */
+
+	ret_val = e1000_read_phy_reg(hw,
+	                            M88E1000_PHY_PAGE_SELECT,
+	                            &default_page);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Set PHY register 30, page 4, bit 11 to 1 */
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+	                              default_page);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_phy_mode_82540 - Set PHY to class A mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the PHY to class A mode and assumes the following operations will
+ *  follow to enable the new class mode:
+ *    1.  Do a PHY soft reset.
+ *    2.  Restart auto-negotiation or force link.
+ **/
+static s32 e1000_set_phy_mode_82540(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_set_phy_mode_82540");
+
+	if (hw->mac.type != e1000_82545_rev_3)
+		goto out;
+
+	ret_val = e1000_read_nvm(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data);
+	if (ret_val) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) {
+		ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+		                              0x000B);
+		if (ret_val) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		ret_val = e1000_write_phy_reg(hw,
+		                              M88E1000_PHY_GEN_CONTROL,
+		                              0x8104);
+		if (ret_val) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		phy->reset_disable = FALSE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82540");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h	2022-03-21 12:58:29.824884720 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82541_H_
+#define _E1000_82541_H_
+
+#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1)
+
+#define IGP01E1000_PHY_CHANNEL_NUM                    4
+
+#define IGP01E1000_PHY_AGC_A                     0x1172
+#define IGP01E1000_PHY_AGC_B                     0x1272
+#define IGP01E1000_PHY_AGC_C                     0x1472
+#define IGP01E1000_PHY_AGC_D                     0x1872
+
+#define IGP01E1000_PHY_AGC_PARAM_A               0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B               0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C               0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D               0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX             0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS      0x8000
+
+#define IGP01E1000_PHY_DSP_RESET                 0x1F33
+
+#define IGP01E1000_PHY_DSP_FFE                   0x1F35
+#define IGP01E1000_PHY_DSP_FFE_CM_CP             0x0069
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT           0x002A
+
+#define IGP01E1000_IEEE_FORCE_GIG                0x0140
+#define IGP01E1000_IEEE_RESTART_AUTONEG          0x3300
+
+#define IGP01E1000_AGC_LENGTH_SHIFT                   7
+#define IGP01E1000_AGC_RANGE                         10
+
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20                20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100              100
+
+#define IGP01E1000_ANALOG_FUSE_STATUS            0x20D0
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS      0x20D1
+#define IGP01E1000_ANALOG_FUSE_CONTROL           0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS            0x20DE
+
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED     0x0100
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK         0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK       0x0070
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH     0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10         0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1            0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10           0x0500
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK         0xF000
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_MSE_CHANNEL_D                 0x000F
+#define IGP01E1000_MSE_CHANNEL_C                 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B                 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A                 0xF000
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h	2022-03-21 12:58:29.819884768 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void    e1000_init_function_pointers_82542(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82543(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82540(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82571(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82541(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_ich8lan(struct e1000_hw *hw);
+
+s32  e1000_set_mac_type(struct e1000_hw *hw);
+s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32  e1000_init_mac_params(struct e1000_hw *hw);
+s32  e1000_init_nvm_params(struct e1000_hw *hw);
+s32  e1000_init_phy_params(struct e1000_hw *hw);
+void e1000_remove_device(struct e1000_hw *hw);
+s32  e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32  e1000_force_mac_fc(struct e1000_hw *hw);
+s32  e1000_check_for_link(struct e1000_hw *hw);
+s32  e1000_reset_hw(struct e1000_hw *hw);
+s32  e1000_init_hw(struct e1000_hw *hw);
+s32  e1000_setup_link(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
+                                u16 *duplex);
+s32  e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw,
+                               u8 *mc_addr_list, u32 mc_addr_count,
+                               u32 rar_used_count, u32 rar_count);
+s32  e1000_setup_led(struct e1000_hw *hw);
+s32  e1000_cleanup_led(struct e1000_hw *hw);
+s32  e1000_check_reset_block(struct e1000_hw *hw);
+s32  e1000_blink_led(struct e1000_hw *hw);
+s32  e1000_led_on(struct e1000_hw *hw);
+s32  e1000_led_off(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32  e1000_get_cable_length(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+                               u32 offset, u8 data);
+s32  e1000_get_phy_info(struct e1000_hw *hw);
+s32  e1000_phy_hw_reset(struct e1000_hw *hw);
+s32  e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32  e1000_read_mac_addr(struct e1000_hw *hw);
+s32  e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+                     u16 *data);
+s32  e1000_wait_autoneg(struct e1000_hw *hw);
+s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write(struct e1000_hw *hw,
+                             u8 *buffer, u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                                struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info(struct e1000_hw * hw,
+                                    u8 *buffer, u16 length);
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+                                  struct e1000_hw_stats *stats,
+                                  u32 frame_len, u8 *mac_addr,
+                                  u32 max_frame_size);
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
+                                       bool state);
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
+u32  e1000_translate_register_82542(u32 reg);
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state);
+bool e1000_get_laa_state_82571(struct e1000_hw *hw);
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+                                                 bool state);
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the Rx descriptor with EOP set
+ *      error = the 8 bit error field of the Rx descriptor with EOP set
+ *      length = the sum of all the length fields of the Rx descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = TRUE;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = FALSE;
+ *  }
+ *  ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
+    (e1000_tbi_sbp_enabled_82543(a) && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= (max_frame_size + 1))) : \
+          (((length) > min_frame_size) && \
+           ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c	2022-03-21 12:58:29.813884827 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_nvm.h"
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 mask;
+
+	DEBUGFUNC("e1000_shift_out_eec_bits");
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_microwire)
+		eecd &= ~E1000_EECD_DO;
+	else if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+
+		usec_delay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+	DEBUGFUNC("e1000_shift_in_eec_bits");
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		e1000_raise_eec_clk(hw, &eecd);
+
+		eecd = E1000_READ_REG(hw, E1000_EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+
+	DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = E1000_READ_REG(hw, E1000_EERD);
+		else
+			reg = E1000_READ_REG(hw, E1000_EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE) {
+			ret_val = E1000_SUCCESS;
+			break;
+		}
+
+		usec_delay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_acquire_nvm_generic");
+
+	E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		usec_delay(5);
+		eecd = E1000_READ_REG(hw, E1000_EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		DEBUGOUT("Could not acquire NVM grant\n");
+		ret_val = -E1000_ERR_NVM;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	DEBUGFUNC("e1000_standby_nvm");
+
+	if (nvm->type == e1000_nvm_eeprom_microwire) {
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+
+		/* Select EEPROM */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+
+		e1000_lower_eec_clk(hw, &eecd);
+	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	DEBUGFUNC("e1000_stop_nvm");
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		e1000_lower_eec_clk(hw, &eecd);
+	} else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+		/* CS on Microcwire is active-high */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  e1000_release_nvm_generic - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	DEBUGFUNC("e1000_release_nvm_generic");
+
+	e1000_stop_nvm(hw);
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd &= ~E1000_EECD_REQ;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	s32 ret_val = E1000_SUCCESS;
+	u16 timeout = 0;
+	u8 spi_stat_reg;
+
+	DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+	if (nvm->type == e1000_nvm_eeprom_microwire) {
+		/* Clear SK and DI */
+		eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		/* Set CS */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		usec_delay(1);
+		timeout = NVM_MAX_RETRY_SPI;
+
+		/*
+		 * Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out.
+		 */
+		while (timeout) {
+			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+			                         hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			usec_delay(5);
+			e1000_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			DEBUGOUT("SPI NVM Status error\n");
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i = 0;
+	s32 ret_val;
+	u16 word_in;
+	u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+	DEBUGFUNC("e1000_read_nvm_spi");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	e1000_standby_nvm(hw);
+
+	if ((nvm->address_bits == 8) && (offset >= 128))
+		read_opcode |= NVM_A8_OPCODE_SPI;
+
+	/* Send the READ command (opcode + addr) */
+	e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+	e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+	/*
+	 * Read the data.  SPI NVMs increment the address with each byte
+	 * read and will roll over if reading beyond the end.  This allows
+	 * us to read the whole NVM from any offset
+	 */
+	for (i = 0; i < words; i++) {
+		word_in = e1000_shift_in_eec_bits(hw, 16);
+		data[i] = (word_in >> 8) | (word_in << 8);
+	}
+
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_microwire - Reads EEPROM's using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                             u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i = 0;
+	s32 ret_val;
+	u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
+
+	DEBUGFUNC("e1000_read_nvm_microwire");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	for (i = 0; i < words; i++) {
+		/* Send the READ command (opcode + addr) */
+		e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)(offset + i),
+					nvm->address_bits);
+
+		/*
+		 * Read the data.  For microwire, each word requires the
+		 * overhead of setup and tear-down.
+		 */
+		data[i] = e1000_shift_in_eec_bits(hw, 16);
+		e1000_standby_nvm(hw);
+	}
+
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_nvm_eerd");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+		       E1000_NVM_RW_REG_START;
+
+		E1000_WRITE_REG(hw, E1000_EERD, eerd);
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+		           E1000_NVM_RW_REG_DATA);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 widx = 0;
+
+	DEBUGFUNC("e1000_write_nvm_spi");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	msec_delay(10);
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = e1000_ready_nvm_eeprom(hw);
+		if (ret_val)
+			goto release;
+
+		e1000_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+		                         nvm->opcode_bits);
+
+		e1000_standby_nvm(hw);
+
+		/*
+		 * Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+		                         nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+			word_out = (word_out >> 8) | (word_out << 8);
+			e1000_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				e1000_standby_nvm(hw);
+				break;
+			}
+		}
+	}
+
+	msec_delay(10);
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_microwire - Writes EEPROM using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using microwire interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                              u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32  ret_val;
+	u32 eecd;
+	u16 words_written = 0;
+	u16 widx = 0;
+
+	DEBUGFUNC("e1000_write_nvm_microwire");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
+	                         (u16)(nvm->opcode_bits + 2));
+
+	e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+	e1000_standby_nvm(hw);
+
+	while (words_written < words) {
+		e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
+		                         nvm->opcode_bits);
+
+		e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
+		                         nvm->address_bits);
+
+		e1000_shift_out_eec_bits(hw, data[words_written], 16);
+
+		e1000_standby_nvm(hw);
+
+		for (widx = 0; widx < 200; widx++) {
+			eecd = E1000_READ_REG(hw, E1000_EECD);
+			if (eecd & E1000_EECD_DO)
+				break;
+			usec_delay(50);
+		}
+
+		if (widx == 200) {
+			DEBUGOUT("NVM Write did not complete\n");
+			ret_val = -E1000_ERR_NVM;
+			goto release;
+		}
+
+		e1000_standby_nvm(hw);
+
+		words_written++;
+	}
+
+	e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
+	                         (u16)(nvm->opcode_bits + 2));
+
+	e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_pba_num_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
+{
+	s32  ret_val;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_read_pba_num_generic");
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+	*pba_num = (u32)(nvm_data << 16);
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+	*pba_num |= nvm_data;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+	s32  ret_val = E1000_SUCCESS;
+	u16 offset, nvm_data, i;
+
+	DEBUGFUNC("e1000_read_mac_addr");
+
+	for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+		offset = i >> 1;
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			goto out;
+		}
+		hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+		hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+	}
+
+	/* Flip last bit of mac address if we're on second port */
+	if (hw->bus.func == E1000_FUNC_1)
+		hw->mac.perm_addr[5] ^= 1;
+
+	for (i = 0; i < ETH_ADDR_LEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		DEBUGOUT("NVM Checksum Invalid\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32  ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	DEBUGFUNC("e1000_update_nvm_checksum");
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error while updating checksum.\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val) {
+		DEBUGOUT("NVM Write Error while updating checksum.\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reload_nvm_generic - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+
+	DEBUGFUNC("e1000_reload_nvm_generic");
+
+	usec_delay(10);
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/* Function pointers local to this file and not intended for public use */
+
+/**
+ *  e1000_acquire_nvm - Acquire exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  For those silicon families which have implemented a NVM acquire function,
+ *  run the defined function else return success.
+ **/
+s32 e1000_acquire_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.acquire_nvm)
+		return hw->func.acquire_nvm(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  For those silicon families which have implemented a NVM release function,
+ *  run the defined fucntion else return success.
+ **/
+void e1000_release_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.release_nvm)
+		hw->func.release_nvm(hw);
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c	2022-03-21 12:58:29.807884885 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_mac.h"
+#include "e1000_nvm.h"
+#include "e1000_phy.h"
+
+/**
+ *  e1000_init_mac_params - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the MAC
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_mac_params) {
+		ret_val = hw->func.init_mac_params(hw);
+		if (ret_val) {
+			DEBUGOUT("MAC Initialization Error\n");
+			goto out;
+		}
+	} else {
+		DEBUGOUT("mac.init_mac_params was NULL\n");
+		ret_val = -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the NVM
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_nvm_params) {
+		ret_val = hw->func.init_nvm_params(hw);
+		if (ret_val) {
+			DEBUGOUT("NVM Initialization Error\n");
+			goto out;
+		}
+	} else {
+		DEBUGOUT("nvm.init_nvm_params was NULL\n");
+		ret_val = -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_phy_params) {
+		ret_val = hw->func.init_phy_params(hw);
+		if (ret_val) {
+			DEBUGOUT("PHY Initialization Error\n");
+			goto out;
+		}
+	} else {
+		DEBUGOUT("phy.init_phy_params was NULL\n");
+		ret_val =  -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  device ID stored in the hw structure.
+ *  MUST BE FIRST FUNCTION CALLED (explicitly or through
+ *  e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_set_mac_type");
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+		mac->type = e1000_82542;
+		break;
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+		mac->type = e1000_82543;
+		break;
+	case E1000_DEV_ID_82544EI_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82544GC_COPPER:
+	case E1000_DEV_ID_82544GC_LOM:
+		mac->type = e1000_82544;
+		break;
+	case E1000_DEV_ID_82540EM:
+	case E1000_DEV_ID_82540EM_LOM:
+	case E1000_DEV_ID_82540EP:
+	case E1000_DEV_ID_82540EP_LOM:
+	case E1000_DEV_ID_82540EP_LP:
+		mac->type = e1000_82540;
+		break;
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+		mac->type = e1000_82545;
+		break;
+	case E1000_DEV_ID_82545GM_COPPER:
+	case E1000_DEV_ID_82545GM_FIBER:
+	case E1000_DEV_ID_82545GM_SERDES:
+		mac->type = e1000_82545_rev_3;
+		break;
+	case E1000_DEV_ID_82546EB_COPPER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+		mac->type = e1000_82546;
+		break;
+	case E1000_DEV_ID_82546GB_COPPER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82546GB_SERDES:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		mac->type = e1000_82546_rev_3;
+		break;
+	case E1000_DEV_ID_82541EI:
+	case E1000_DEV_ID_82541EI_MOBILE:
+	case E1000_DEV_ID_82541ER_LOM:
+		mac->type = e1000_82541;
+		break;
+	case E1000_DEV_ID_82541ER:
+	case E1000_DEV_ID_82541GI:
+	case E1000_DEV_ID_82541GI_LF:
+	case E1000_DEV_ID_82541GI_MOBILE:
+		mac->type = e1000_82541_rev_2;
+		break;
+	case E1000_DEV_ID_82547EI:
+	case E1000_DEV_ID_82547EI_MOBILE:
+		mac->type = e1000_82547;
+		break;
+	case E1000_DEV_ID_82547GI:
+		mac->type = e1000_82547_rev_2;
+		break;
+	case E1000_DEV_ID_82571EB_COPPER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+		mac->type = e1000_82571;
+		break;
+	case E1000_DEV_ID_82572EI:
+	case E1000_DEV_ID_82572EI_COPPER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82572EI_SERDES:
+		mac->type = e1000_82572;
+		break;
+	case E1000_DEV_ID_82573E:
+	case E1000_DEV_ID_82573E_IAMT:
+	case E1000_DEV_ID_82573L:
+		mac->type = e1000_82573;
+		break;
+	case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+	case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+		mac->type = e1000_80003es2lan;
+		break;
+	case E1000_DEV_ID_ICH8_IFE:
+	case E1000_DEV_ID_ICH8_IFE_GT:
+	case E1000_DEV_ID_ICH8_IFE_G:
+	case E1000_DEV_ID_ICH8_IGP_M:
+	case E1000_DEV_ID_ICH8_IGP_M_AMT:
+	case E1000_DEV_ID_ICH8_IGP_AMT:
+	case E1000_DEV_ID_ICH8_IGP_C:
+		mac->type = e1000_ich8lan;
+		break;
+	case E1000_DEV_ID_ICH9_IFE:
+	case E1000_DEV_ID_ICH9_IFE_GT:
+	case E1000_DEV_ID_ICH9_IFE_G:
+	case E1000_DEV_ID_ICH9_IGP_AMT:
+	case E1000_DEV_ID_ICH9_IGP_C:
+		mac->type = e1000_ich9lan;
+		break;
+	default:
+		/* Should never have loaded on this device */
+		ret_val = -E1000_ERR_MAC_INIT;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_init_funcs - Initializes function pointers
+ *  @hw: pointer to the HW structure
+ *  @init_device: TRUE will initialize the rest of the function pointers
+ *                 getting the device ready for use.  FALSE will only set
+ *                 MAC type and the function pointers for the other init
+ *                 functions.  Passing FALSE will not generate any hardware
+ *                 reads or writes.
+ *
+ *  This function must be called by a driver in order to use the rest
+ *  of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+	s32 ret_val;
+
+	/* Can't do much good without knowing the MAC type. */
+	ret_val = e1000_set_mac_type(hw);
+	if (ret_val) {
+		DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+		goto out;
+	}
+
+	if (!hw->hw_addr) {
+		DEBUGOUT("ERROR: Registers not mapped\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * Init some generic function pointers that are currently all pointing
+	 * to generic implementations. We do this first allowing a driver
+	 * module to override it afterwards.
+	 */
+	hw->func.config_collision_dist = e1000_config_collision_dist_generic;
+	hw->func.rar_set = e1000_rar_set_generic;
+	hw->func.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+	hw->func.mng_host_if_write = e1000_mng_host_if_write_generic;
+	hw->func.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
+	hw->func.mng_enable_host_if = e1000_mng_enable_host_if_generic;
+	hw->func.wait_autoneg = e1000_wait_autoneg_generic;
+	hw->func.reload_nvm = e1000_reload_nvm_generic;
+
+	/*
+	 * Set up the init function pointers. These are functions within the
+	 * adapter family file that sets up function pointers for the rest of
+	 * the functions in that family.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82542:
+		e1000_init_function_pointers_82542(hw);
+		break;
+	case e1000_82543:
+	case e1000_82544:
+		e1000_init_function_pointers_82543(hw);
+		break;
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		e1000_init_function_pointers_82540(hw);
+		break;
+	case e1000_82541:
+	case e1000_82541_rev_2:
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		e1000_init_function_pointers_82541(hw);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+		e1000_init_function_pointers_82571(hw);
+		break;
+	case e1000_80003es2lan:
+		e1000_init_function_pointers_80003es2lan(hw);
+		break;
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		e1000_init_function_pointers_ich8lan(hw);
+		break;
+	default:
+		DEBUGOUT("Hardware not supported\n");
+		ret_val = -E1000_ERR_CONFIG;
+		break;
+	}
+
+	/*
+	 * Initialize the rest of the function pointers. These require some
+	 * register reads/writes in some cases.
+	 */
+	if (!(ret_val) && init_device) {
+		ret_val = e1000_init_mac_params(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_init_nvm_params(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_init_phy_params(hw);
+		if (ret_val)
+			goto out;
+
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_remove_device - Free device specific structure
+ *  @hw: pointer to the HW structure
+ *
+ *  If a device specific structure was allocated, this function will
+ *  free it. This is a function pointer entry point called by drivers.
+ **/
+void e1000_remove_device(struct e1000_hw *hw)
+{
+	if (hw->func.remove_device)
+		hw->func.remove_device(hw);
+}
+
+/**
+ *  e1000_get_bus_info - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adaper is attached and stores it in the hw structure. This is a
+ *  function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+	if (hw->func.get_bus_info)
+		return hw->func.get_bus_info(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  This clears the VLAN filter table on the adapter. This is a function
+ *  pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+	if (hw->func.clear_vfta)
+		hw->func.clear_vfta (hw);
+}
+
+/**
+ *  e1000_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	if (hw->func.write_vfta)
+		hw->func.write_vfta(hw, offset, value);
+}
+
+/**
+ *  e1000_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.  Currently no func pointer
+ *  exists and all implementations are handled in the generic version of this
+ *  function.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+                               u32 mc_addr_count, u32 rar_used_count,
+                               u32 rar_count)
+{
+	if (hw->func.update_mc_addr_list)
+		hw->func.update_mc_addr_list(hw,
+		                             mc_addr_list,
+		                             mc_addr_count,
+		                             rar_used_count,
+		                             rar_count);
+}
+
+/**
+ *  e1000_force_mac_fc - Force MAC flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings. Currently no func pointer exists
+ *  and all implementations are handled in the generic version of this
+ *  function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+	return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ *  e1000_check_for_link - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+	if (hw->func.check_for_link)
+		return hw->func.check_for_link(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_check_mng_mode - Check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+	if (hw->func.check_mng_mode)
+		return hw->func.check_mng_mode(hw);
+
+	return FALSE;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+	return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ *  e1000_reset_hw - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+	if (hw->func.reset_hw)
+		return hw->func.reset_hw(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_init_hw - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation. This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+	if (hw->func.init_hw)
+		return hw->func.init_hw(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_link - Configures link and flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  This configures link and flow control settings for the adapter. This
+ *  is a function pointer entry point called by drivers. While modules can
+ *  also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+	if (hw->func.setup_link)
+		return hw->func.setup_link(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_get_speed_and_duplex - Returns current speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to a 16-bit value to store the speed
+ *  @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ *  This returns the speed and duplex of the adapter in the two 'out'
+ *  variables passed in. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	if (hw->func.get_link_up_info)
+		return hw->func.get_link_up_info(hw, speed, duplex);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_led - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+	if (hw->func.setup_led)
+		return hw->func.setup_led(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_cleanup_led - Restores SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This restores the SW controllable LED to the value saved off by
+ *  e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+	if (hw->func.cleanup_led)
+		return hw->func.cleanup_led(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led - Blink SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This starts the adapter LED blinking. Request the LED to be setup first
+ *  and cleaned up after. This is a function pointer entry point called by
+ *  drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+	if (hw->func.blink_led)
+		return hw->func.blink_led(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+	if (hw->func.led_on)
+		return hw->func.led_on(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+	if (hw->func.led_off)
+		return hw->func.led_off(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_adaptive - Reset adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the adaptive IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+	e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_update_adaptive - Update adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates adapter IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+    return; // TODO
+	e1000_update_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_disable_pcie_master - Disable PCI-Express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. Currently no func pointer exists and all implementations are
+ *  handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+	return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ *  e1000_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+	if (hw->func.config_collision_dist)
+		hw->func.config_collision_dist(hw);
+}
+
+/**
+ *  e1000_rar_set - Sets a receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: address to set the RAR to
+ *  @index: the RAR to set
+ *
+ *  Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	if (hw->func.rar_set)
+		hw->func.rar_set(hw, addr, index);
+}
+
+/**
+ *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ *  @hw: pointer to the HW structure
+ *
+ *  Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+	if (hw->func.validate_mdi_setting)
+		return hw->func.validate_mdi_setting(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mta_set - Sets multicast table bit
+ *  @hw: pointer to the HW structure
+ *  @hash_value: Multicast hash value.
+ *
+ *  This sets the bit in the multicast table corresponding to the
+ *  hash value.  This is a function pointer entry point called by drivers.
+ **/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+	if (hw->func.mta_set)
+		hw->func.mta_set(hw, hash_value);
+}
+
+/**
+ *  e1000_hash_mc_addr - Determines address location in multicast table
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: Multicast address to hash.
+ *
+ *  This hashes an address to determine its location in the multicast
+ *  table. Currently no func pointer exists and all implementations
+ *  are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+	return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ *  e1000_mng_host_if_write - Writes to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
+                            u16 offset, u8 *sum)
+{
+	if (hw->func.mng_host_if_write)
+		return hw->func.mng_host_if_write(hw, buffer, length, offset,
+		                                  sum);
+
+	return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_write_cmd_header - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                               struct e1000_host_mng_command_header *hdr)
+{
+	if (hw->func.mng_write_cmd_header)
+		return hw->func.mng_write_cmd_header(hw, hdr);
+
+	return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_enable_host_if - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operaton
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
+{
+	if (hw->func.mng_enable_host_if)
+		return hw->func.mng_enable_host_if(hw);
+
+	return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_wait_autoneg - Waits for autonegotiation completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for autoneg to complete. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+	if (hw->func.wait_autoneg)
+		return hw->func.wait_autoneg(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block - Verifies PHY can be reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if the PHY is in a state that can be reset or if manageability
+ *  has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+	if (hw->func.check_reset_block)
+		return hw->func.check_reset_block(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg - Reads PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the buffer to store the 16-bit read.
+ *
+ *  Reads the PHY register and returns the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	if (hw->func.read_phy_reg)
+		return hw->func.read_phy_reg(hw, offset, data);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg - Writes PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	if (hw->func.write_phy_reg)
+		return hw->func.write_phy_reg(hw, offset, data);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_kmrn_reg - Reads register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the location to store the 16-bit value read.
+ *
+ *  Reads a register out of the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_write_kmrn_reg - Writes register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes a register to the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_get_cable_length - Retrieves cable length estimation
+ *  @hw: pointer to the HW structure
+ *
+ *  This function estimates the cable length and stores them in
+ *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+	if (hw->func.get_cable_length)
+		return hw->func.get_cable_length(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_info - Retrieves PHY information from registers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function gets some information from various PHY registers and
+ *  populates hw->phy values with it. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+	if (hw->func.get_phy_info)
+		return hw->func.get_phy_info(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_hw_reset - Hard PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a hard PHY reset. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+	if (hw->func.reset_phy)
+		return hw->func.reset_phy(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_commit - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+	if (hw->func.commit_phy)
+		return hw->func.commit_phy(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->func.set_d0_lplu_state)
+		return hw->func.set_d0_lplu_state(hw, active);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->func.set_d3_lplu_state)
+		return hw->func.set_d3_lplu_state(hw, active);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr - Reads MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MAC address out of the adapter and stores it in the HW structure.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+	if (hw->func.read_mac_addr)
+		return hw->func.read_mac_addr(hw);
+
+	return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ *  e1000_read_pba_num - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
+{
+	return e1000_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Validates the NVM checksum is correct. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	if (hw->func.validate_nvm)
+		return hw->func.validate_nvm(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the NVM checksum. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+	if (hw->func.update_nvm)
+		return hw->func.update_nvm(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_reload_nvm - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.reload_nvm)
+		hw->func.reload_nvm(hw);
+}
+
+/**
+ *  e1000_read_nvm - Reads NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to read
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	if (hw->func.read_nvm)
+		return hw->func.read_nvm(hw, offset, words, data);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_write_nvm - Writes to NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to write
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	if (hw->func.write_nvm)
+		return hw->func.write_nvm(hw, offset, words, data);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+                              u8 data)
+{
+	return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+	if (hw->func.power_up_phy)
+		hw->func.power_up_phy(hw);
+
+	e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+	if (hw->func.power_down_phy)
+		hw->func.power_down_phy(hw);
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h	2022-03-21 12:58:29.802884934 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_LSCWE      0x00000010 /* Link Status wake up enable */
+#define E1000_WUC_LSCWO      0x00000020 /* Link Status wake up override */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS  0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET   16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS  0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+#define E1000_WUS_ARP          E1000_WUFC_ARP
+#define E1000_WUS_IPV4         E1000_WUFC_IPV4
+#define E1000_WUS_IPV6         E1000_WUFC_IPV6
+#define E1000_WUS_FLX0         E1000_WUFC_FLX0
+#define E1000_WUS_FLX1         E1000_WUFC_FLX1
+#define E1000_WUS_FLX2         E1000_WUFC_FLX2
+#define E1000_WUS_FLX3         E1000_WUFC_FLX3
+#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+/* Reserved (bits 4,5) in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_CANC           0x04000000 /* Interrupt delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+/* IAME enable bit (27) was removed in >= 82575 */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT   16
+#define E1000_I2CCMD_REG_ADDR         0x00FF0000
+#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
+#define E1000_I2CCMD_PHY_ADDR         0x07000000
+#define E1000_I2CCMD_OPCODE_READ      0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
+#define E1000_I2CCMD_RESET            0x10000000
+#define E1000_I2CCMD_READY            0x20000000
+#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
+#define E1000_I2CCMD_ERROR            0x80000000
+#define E1000_MAX_SGMII_PHY_REG_ADDR  255
+#define E1000_I2CCMD_PHY_TIMEOUT      200
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_ENABLE_MASK                 0x00000007
+#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
+/* Enable IP address filtering */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_CSR_SM   0x8
+
+/* FACTPS Definitions */
+#define E1000_FACTPS_LFS    0x40000000  /* LAN Function Select */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_10            0
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
+#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
+#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
+#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
+#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_10          0
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
+#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
+#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
+#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
+#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion by NVM */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME   20
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_SHIFT 8         /* POPTS shift */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT        10
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE       4
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_SPD_EN             0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_12K 0x000C    /* 12KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB */
+#define E1000_PBA_64K 0x0040    /* 64KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+#define E1000_PBS_24K E1000_PBA_24K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ)
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_CC         0x10000000        /* Receive config change */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                           E1000_GCR_RXDSCW_NO_SNOOP      | \
+                           E1000_GCR_RXDSCR_NO_SNOOP      | \
+                           E1000_GCR_TXD_NO_SNOOP         | \
+                           E1000_GCR_TXDSCW_NO_SNOOP      | \
+                           E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local Tx is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+
+#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
+#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_NVM_RW_REG_DATA   16   /* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_VERSION                0x0005
+#define NVM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define NVM_PHY_CLASS_WORD         0x0007
+#define NVM_INIT_CONTROL1_REG      0x000A
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_FLASH_VERSION          0x0032
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+#define NVM_WORD0F_ANE              0x0800
+#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
+#define NVM_WORD0F_LPLU             0x0001
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_MAC_ADDR_OFFSET        0
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_RESERVED_WORD          0xFFFF
+#define NVM_PHY_CLASS_A            0x8000
+#define NVM_SERDES_AMPLITUDE_MASK  0x000F
+#define NVM_SIZE_MASK              0x1C00
+#define NVM_SIZE_SHIFT             10
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+#define NVM_SWDPIO_EXT_SHIFT       4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE  0x6  /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5  /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7  /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE  0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE  0x10 /* NVM erast/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+#define NVM_STATUS_WEN_SPI         0x02
+#define NVM_STATUS_BP0_SPI         0x04
+#define NVM_STATUS_BP1_SPI         0x08
+#define NVM_STATUS_WPEN_SPI        0x80
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+                              (ID_LED_OFF1_OFF2 <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER        0xE6
+#define PCIX_STATUS_REGISTER_LO      0xE8
+#define PCIX_STATUS_REGISTER_HI      0xEA
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+#define PCIX_STATUS_LO_FUNC_MASK     0x7
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN                 6
+#endif
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1011_I_REV_4     0x04
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define M88_VENDOR           0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+/*
+ * 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c	2022-03-21 12:58:29.796884993 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_manage.h"
+
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8  sum = 0;
+
+	DEBUGFUNC("e1000_calculate_checksum");
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operaton
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw * hw)
+{
+	u32 hicr;
+	s32 ret_val = E1000_SUCCESS;
+	u8  i;
+
+	DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+	/* Check that the host interface is enabled. */
+	hicr = E1000_READ_REG(hw, E1000_HICR);
+	if ((hicr & E1000_HICR_EN) == 0) {
+		DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+		ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+		goto out;
+	}
+	/* check the previous command is completed */
+	for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+		hicr = E1000_READ_REG(hw, E1000_HICR);
+		if (!(hicr & E1000_HICR_C))
+			break;
+		msec_delay_irq(1);
+	}
+
+	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+		DEBUGOUT("Previous command timeout failed .\n");
+		ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_mng_mode_generic - Generic check managament mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the firmware semaphore register and returns true (>0) if
+ *  manageability is enabled, else false (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	DEBUGFUNC("e1000_check_mng_mode_generic");
+
+	fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+	return ((fwsm & E1000_FWSM_MODE_MASK) ==
+	        (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+	struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+	u32 *buffer = (u32 *)&hw->mng_cookie;
+	u32 offset;
+	s32 ret_val, hdr_csum, csum;
+	u8 i, len;
+	bool tx_filter = TRUE;
+
+	DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+	/* No manageability, no filtering */
+	if (!e1000_check_mng_mode(hw)) {
+		tx_filter = FALSE;
+		goto out;
+	}
+
+	/*
+	 * If we can't read from the host interface for whatever
+	 * reason, disable filtering.
+	 */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val != E1000_SUCCESS) {
+		tx_filter = FALSE;
+		goto out;
+	}
+
+	/* Read in the header.  Length and offset are in dwords. */
+	len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+	offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+	for (i = 0; i < len; i++) {
+		*(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+		                                           E1000_HOST_IF,
+		                                           offset + i);
+	}
+	hdr_csum = hdr->checksum;
+	hdr->checksum = 0;
+	csum = e1000_calculate_checksum((u8 *)hdr,
+	                                E1000_MNG_DHCP_COOKIE_LENGTH);
+	/*
+	 * If either the checksums or signature don't match, then
+	 * the cookie area isn't considered valid, in which case we
+	 * take the safe route of assuming Tx filtering is enabled.
+	 */
+	if (hdr_csum != csum)
+		goto out;
+	if (hdr->signature != E1000_IAMT_SIGNATURE)
+		goto out;
+
+	/* Cookie area is valid, make the final check for filtering. */
+	if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+		tx_filter = FALSE;
+
+out:
+	hw->mac.tx_pkt_filtering = tx_filter;
+	return tx_filter;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw * hw, u8 *buffer,
+                                      u16 length)
+{
+	struct e1000_host_mng_command_header hdr;
+	s32 ret_val;
+	u32 hicr;
+
+	DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+	hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+	hdr.command_length = length;
+	hdr.reserved1 = 0;
+	hdr.reserved2 = 0;
+	hdr.checksum = 0;
+
+	/* Enable the host interface */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val)
+		goto out;
+
+	/* Populate the host interface with the contents of "buffer". */
+	ret_val = e1000_mng_host_if_write(hw, buffer, length,
+	                                  sizeof(hdr), &(hdr.checksum));
+	if (ret_val)
+		goto out;
+
+	/* Write the manageability command header */
+	ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+	if (ret_val)
+		goto out;
+
+	/* Tell the ARC a new command is pending. */
+	hicr = E1000_READ_REG(hw, E1000_HICR);
+	E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_mng_write_cmd_header_generic - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw * hw,
+                                    struct e1000_host_mng_command_header * hdr)
+{
+	u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+	DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+	/* Write the whole command header structure with new checksum. */
+
+	hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+	length >>= 2;
+	/* Write the relevant command block into the ram area. */
+	for (i = 0; i < length; i++) {
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+		                            *((u32 *) hdr + i));
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mng_host_if_write_generic - Write to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw * hw, u8 *buffer,
+                                    u16 length, u16 offset, u8 *sum)
+{
+	u8 *tmp;
+	u8 *bufptr = buffer;
+	u32 data = 0;
+	s32 ret_val = E1000_SUCCESS;
+	u16 remaining, i, j, prev_bytes;
+
+	DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+	/* sum = only sum of the data and it is not checksum */
+
+	if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	tmp = (u8 *)&data;
+	prev_bytes = offset & 0x3;
+	offset >>= 2;
+
+	if (prev_bytes) {
+		data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+		for (j = prev_bytes; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+		length -= j - prev_bytes;
+		offset++;
+	}
+
+	remaining = length & 0x3;
+	length -= remaining;
+
+	/* Calculate length in DWORDs */
+	length >>= 2;
+
+	/*
+	 * The device driver writes the relevant command block into the
+	 * ram area.
+	 */
+	for (i = 0; i < length; i++) {
+		for (j = 0; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+	}
+	if (remaining) {
+		for (j = 0; j < sizeof(u32); j++) {
+			if (j < remaining)
+				*(tmp + j) = *bufptr++;
+			else
+				*(tmp + j) = 0;
+
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_enable_mng_pass_thru - Enable processing of ARP's
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to allow ARPs to be processed by the host.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = FALSE;
+
+	DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+	if (!hw->mac.asf_firmware_present)
+		goto out;
+
+	manc = E1000_READ_REG(hw, E1000_MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+		goto out;
+
+	if (hw->mac.arc_subsystem_valid) {
+		fwsm = E1000_READ_REG(hw, E1000_FWSM);
+		factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = TRUE;
+			goto out;
+		}
+	} else {
+		if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = TRUE;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c	2022-03-21 12:58:29.791885041 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_80003es2lan
+ */
+
+#include "e1000_api.h"
+#include "e1000_80003es2lan.h"
+
+static s32  e1000_init_phy_params_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_acquire_phy_80003es2lan(struct e1000_hw *hw);
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw);
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                   u32 offset,
+                                                   u16 *data);
+static s32  e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                    u32 offset,
+                                                    u16 data);
+static s32  e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+                                        u16 words, u16 *data);
+static s32  e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_get_cable_length_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+static s32  e1000_reset_hw_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_init_hw_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static s32  e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32  e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static s32  e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/*
+ * A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] =
+         { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_gg82563_cable_length_table) / \
+                 sizeof(e1000_gg82563_cable_length_table[0]))
+
+/**
+ *  e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_80003es2lan");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type        = e1000_phy_none;
+		goto out;
+	} else {
+		func->power_up_phy = e1000_power_up_phy_copper;
+		func->power_down_phy = e1000_power_down_phy_copper_80003es2lan;
+	}
+
+	phy->addr                = 1;
+	phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us      = 100;
+	phy->type                = e1000_phy_gg82563;
+
+	func->acquire_phy        = e1000_acquire_phy_80003es2lan;
+	func->check_polarity     = e1000_check_polarity_m88;
+	func->check_reset_block  = e1000_check_reset_block_generic;
+	func->commit_phy         = e1000_phy_sw_reset_generic;
+	func->get_cfg_done       = e1000_get_cfg_done_80003es2lan;
+	func->get_phy_info       = e1000_get_phy_info_m88;
+	func->release_phy        = e1000_release_phy_80003es2lan;
+	func->reset_phy          = e1000_phy_hw_reset_generic;
+	func->set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
+
+	func->force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan;
+	func->get_cable_length   = e1000_get_cable_length_80003es2lan;
+	func->read_phy_reg       = e1000_read_phy_reg_gg82563_80003es2lan;
+	func->write_phy_reg      = e1000_write_phy_reg_gg82563_80003es2lan;
+
+	/* This can only be done after all function pointers are setup. */
+	ret_val = e1000_get_phy_id(hw);
+
+	/* Verify phy id */
+	if (phy->id != GG82563_E_PHY_ID) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+	DEBUGFUNC("e1000_init_nvm_params_80003es2lan");
+
+	nvm->opcode_bits        = 8;
+	nvm->delay_usec         = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	nvm->type               = e1000_nvm_eeprom_spi;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+	                  E1000_EECD_SIZE_EX_SHIFT);
+
+	/*
+	 * Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* EEPROM access above 16k is unsupported */
+	if (size > 14)
+		size = 14;
+	nvm->word_size	= 1 << size;
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_nvm_80003es2lan;
+	func->read_nvm          = e1000_read_nvm_eerd;
+	func->release_nvm       = e1000_release_nvm_80003es2lan;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->valid_led_default = e1000_valid_led_default_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+	func->write_nvm         = e1000_write_nvm_80003es2lan;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_80003es2lan");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+	                ? TRUE : FALSE;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pcie_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_80003es2lan;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_80003es2lan;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface link setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_80003es2lan
+	                : e1000_setup_fiber_serdes_link_generic;
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->check_for_link = e1000_check_for_copper_link_generic;
+		break;
+	case e1000_media_type_fiber:
+		func->check_for_link = e1000_check_for_fiber_link_generic;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->check_for_link = e1000_check_for_serdes_link_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/* check management mode */
+	func->check_mng_mode = e1000_check_mng_mode_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* read mac address */
+	func->read_mac_addr = e1000_read_mac_addr_80003es2lan;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_80003es2lan;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_80003es2lan");
+
+	hw->func.init_mac_params = e1000_init_mac_params_80003es2lan;
+	hw->func.init_nvm_params = e1000_init_nvm_params_80003es2lan;
+	hw->func.init_phy_params = e1000_init_phy_params_80003es2lan;
+}
+
+/**
+ *  e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to acquire access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	DEBUGFUNC("e1000_acquire_phy_80003es2lan");
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	mask |= E1000_SWFW_CSR_SM;
+
+	return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_80003es2lan - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	DEBUGFUNC("e1000_release_phy_80003es2lan");
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	mask |= E1000_SWFW_CSR_SM;
+
+	e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the semaphore to access the EEPROM.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_acquire_nvm_80003es2lan");
+
+	ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_acquire_nvm_generic(hw);
+
+	if (ret_val)
+		e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Release the semaphore used to access the EEPROM.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_release_nvm_80003es2lan");
+
+	e1000_release_nvm_generic(hw);
+	e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = E1000_SUCCESS;
+	s32 i = 0, timeout = 200;
+
+	DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan");
+
+	while (i < timeout) {
+		if (e1000_get_hw_semaphore_generic(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/*
+		 * Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
+		e1000_put_hw_semaphore_generic(hw);
+		msec_delay_irq(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+	e1000_put_hw_semaphore_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	DEBUGFUNC("e1000_release_swfw_sync_80003es2lan");
+
+	while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
+	/* Empty */
+
+	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+	e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ *  e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: pointer to the data returned from the operation
+ *
+ *  Read the GG82563 PHY register.  This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                  u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan");
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		goto out;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+	/*
+	 * The "ready" bit in the MDIC register may be incorrectly set
+	 * before the device has completed the "Page Select" MDI
+	 * transaction.  So we wait 200us after each MDI command...
+	 */
+	usec_delay(200);
+
+	/* ...and verify the command was successful. */
+	ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+	if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+		ret_val = -E1000_ERR_PHY;
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+	usec_delay(200);
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+	                                 MAX_PHY_REG_ADDRESS & offset,
+	                                 data);
+
+	usec_delay(200);
+	e1000_release_phy_80003es2lan(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: value to write to the register
+ *
+ *  Write to the GG82563 PHY register.  This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                   u32 offset, u16 data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan");
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		goto out;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+
+	/*
+	 * The "ready" bit in the MDIC register may be incorrectly set
+	 * before the device has completed the "Page Select" MDI
+	 * transaction.  So we wait 200us after each MDI command...
+	 */
+	usec_delay(200);
+
+	/* ...and verify the command was successful. */
+	ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+	if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+		ret_val = -E1000_ERR_PHY;
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+	usec_delay(200);
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+	                                  MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+	usec_delay(200);
+	e1000_release_phy_80003es2lan(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @words: number of words to write
+ *  @data: buffer of data to write to the NVM
+ *
+ *  Write "words" of data to the ESB2 NVM.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+                            u16 words, u16 *data)
+{
+	DEBUGFUNC("e1000_write_nvm_80003es2lan");
+
+	return e1000_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ *  e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ *  @hw: pointer to the HW structure
+ *
+ *  Wait a specific amount of time for manageability processes to complete.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+	DEBUGFUNC("e1000_get_cfg_done_80003es2lan");
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+
+	while (timeout) {
+		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+			break;
+		msec_delay(1);
+		timeout--;
+	}
+	if (!timeout) {
+		DEBUGOUT("MNG configuration cycle has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the speed and duplex settings onto the PHY.  This is a
+ *  function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan");
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("GG82563 PSCR: %X\n", phy_data);
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	usec_delay(1);
+
+	if (hw->phy.autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link "
+		         "on GG82563 phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+		                                     100000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			/*
+			 * We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000_phy_reset_dsp_generic(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+		                                     100000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Resetting the phy means we need to verify the TX_CLK corresponds
+	 * to the link speed.  10Mbps -> 2.5MHz, else 25MHz.
+	 */
+	phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+	if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+		phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+	else
+		phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_80003es2lan - Set approximate cable length
+ *  @hw: pointer to the HW structure
+ *
+ *  Find the approximate cable length as measured by the GG82563 PHY.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	DEBUGFUNC("e1000_get_cable_length_80003es2lan");
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+	phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+	phy->max_cable_length = e1000_gg82563_cable_length_table[index+5];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to speed buffer
+ *  @duplex: pointer to duplex buffer
+ *
+ *  Retrieve the current speed and duplex configuration.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_get_link_up_info_80003es2lan");
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		ret_val = e1000_get_speed_and_duplex_copper_generic(hw,
+		                                                    speed,
+		                                                    duplex);
+		if (ret_val)
+			goto out;
+		if (*speed == SPEED_1000)
+			ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+		else
+			ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw,
+			                                      *duplex);
+	} else {
+		ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw,
+		                                                  speed,
+		                                                  duplex);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Perform a global reset to the ESB2 controller.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl, icr;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_reset_hw_80003es2lan");
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("PCI-E Master disable polling has failed.\n");
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to MAC\n");
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		goto out;
+
+	/* Clear any pending interrupt events. */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_80003es2lan");
+
+	e1000_initialize_hw_bits_80003es2lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+	           E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+	           E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+
+	/* Enable retransmit on late collisions */
+	reg_data = E1000_READ_REG(hw, E1000_TCTL);
+	reg_data |= E1000_TCTL_RTLC;
+	E1000_WRITE_REG(hw, E1000_TCTL, reg_data);
+
+	/* Configure Gigabit Carry Extend Padding */
+	reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT);
+	reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+	reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data);
+
+	/* Configure Transmit Inter-Packet Gap */
+	reg_data = E1000_READ_REG(hw, E1000_TIPG);
+	reg_data &= ~E1000_TIPG_IPGT_MASK;
+	reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TIPG, reg_data);
+
+	reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+	reg_data &= ~0x00100000;
+	E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan");
+
+	if (hw->mac.disable_hw_init_bits)
+		goto out;
+
+	/* Transmit Descriptor Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	if (hw->phy.media_type != e1000_media_type_copper)
+		reg &= ~(1 << 20);
+	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TARC(1));
+	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+out:
+	return;
+}
+
+/**
+ *  e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ *  @hw: pointer to the HW structure
+ *
+ *  Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+	struct   e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl_ext;
+	u16 data;
+
+	DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan");
+
+	if (!phy->reset_disable) {
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+		                             &data);
+		if (ret_val)
+			goto out;
+
+		data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+		/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+		data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+		ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * Options:
+		 *   MDI/MDI-X = 0 (default)
+		 *   0 - Auto for all speeds
+		 *   1 - MDI mode
+		 *   2 - MDI-X mode
+		 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+		 */
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+		switch (phy->mdix) {
+		case 1:
+			data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+			break;
+		case 2:
+			data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+			break;
+		case 0:
+		default:
+			data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+			break;
+		}
+
+		/*
+		 * Options:
+		 *   disable_polarity_correction = 0 (default)
+		 *       Automatic Correction for Reversed Cable Polarity
+		 *   0 - Disabled
+		 *   1 - Enabled
+		 */
+		data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+		if (phy->disable_polarity_correction)
+			data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+		ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, data);
+		if (ret_val)
+			goto out;
+
+		/* SW Reset the PHY so all changes take effect */
+		ret_val = e1000_phy_commit(hw);
+		if (ret_val) {
+			DEBUGOUT("Error Resetting the PHY\n");
+			goto out;
+		}
+
+	}
+
+	/* Bypass Rx and Tx FIFO's */
+	ret_val = e1000_write_kmrn_reg(hw,
+	                        E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+	                        E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+	                                E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_kmrn_reg(hw,
+	                              E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+	                              &data);
+	if (ret_val)
+		goto out;
+	data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+	                               data);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, data);
+	if (ret_val)
+		goto out;
+
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Do not init these registers when the HW is in IAMT mode, since the
+	 * firmware will have already initialized them.  We only initialize
+	 * them if the HW is not in IAMT mode.
+	 */
+	if (!(e1000_check_mng_mode(hw))) {
+		/* Enable Electrical Idle on the PHY */
+		data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+		ret_val = e1000_write_phy_reg(hw,
+		                             GG82563_PHY_PWR_MGMT_CTRL,
+		                             data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw,
+		                            GG82563_PHY_KMRN_MODE_CTRL,
+		                            &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+		ret_val = e1000_write_phy_reg(hw,
+		                             GG82563_PHY_KMRN_MODE_CTRL,
+		                             data);
+
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * Workaround: Disable padding in Kumeran interface in the MAC
+	 * and in the PHY to avoid CRC errors.
+	 */
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data |= GG82563_ICR_DIS_PADDING;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, data);
+	if (ret_val)
+		goto out;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Essentially a wrapper for setting up all things "copper" related.
+ *  This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32  ret_val;
+	u16 reg_data;
+
+	DEBUGFUNC("e1000_setup_copper_link_80003es2lan");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each
+	 * iteration and increase the max iterations when
+	 * polling the phy; this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= 0x3F;
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_read_kmrn_reg(hw,
+	                              E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+	                              &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ *  @hw: pointer to the HW structure
+ *  @duplex: current duplex setting
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u32 tipg;
+	u32 i = 0;
+	u16 reg_data, reg_data2;
+
+	DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = E1000_READ_REG(hw, E1000_TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+
+	do {
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data2);
+		if (ret_val)
+			goto out;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	if (duplex == HALF_DUPLEX)
+		reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+	else
+		reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 reg_data, reg_data2;
+	u32 tipg;
+	u32 i = 0;
+
+	DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = E1000_READ_REG(hw, E1000_TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+
+	do {
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data2);
+		if (ret_val)
+			goto out;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_80003es2lan - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_mac_addr_80003es2lan");
+	if (e1000_check_alt_mac_addr_generic(hw))
+		ret_val = e1000_read_mac_addr_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+
+	temp = E1000_READ_REG(hw, E1000_ICRXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICTXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQEC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQMTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c	2022-03-21 12:58:29.785885100 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include <linux/netdevice.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+
+#include "e1000.h"
+#include "e1000_82541.h"
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+
+#ifdef ETHTOOL_GSTATS
+struct e1000_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
+		      offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+	{ "rx_packets", E1000_STAT(stats.gprc) },
+	{ "tx_packets", E1000_STAT(stats.gptc) },
+	{ "rx_bytes", E1000_STAT(stats.gorc) },
+	{ "tx_bytes", E1000_STAT(stats.gotc) },
+	{ "rx_broadcast", E1000_STAT(stats.bprc) },
+	{ "tx_broadcast", E1000_STAT(stats.bptc) },
+	{ "rx_multicast", E1000_STAT(stats.mprc) },
+	{ "tx_multicast", E1000_STAT(stats.mptc) },
+	{ "rx_errors", E1000_STAT(net_stats.rx_errors) },
+	{ "tx_errors", E1000_STAT(net_stats.tx_errors) },
+	{ "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+	{ "multicast", E1000_STAT(stats.mprc) },
+	{ "collisions", E1000_STAT(stats.colc) },
+	{ "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) },
+	{ "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+	{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+	{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+	{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+	{ "rx_missed_errors", E1000_STAT(stats.mpc) },
+	{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
+	{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+	{ "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_window_errors", E1000_STAT(stats.latecol) },
+	{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+	{ "tx_deferred_ok", E1000_STAT(stats.dc) },
+	{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
+	{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+	{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+	{ "tx_restart_queue", E1000_STAT(restart_queue) },
+	{ "rx_long_length_errors", E1000_STAT(stats.roc) },
+	{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
+	{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
+	{ "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+	{ "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+	{ "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+	{ "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+	{ "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+	{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+	{ "rx_long_byte_count", E1000_STAT(stats.gorc) },
+	{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+	{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+	{ "rx_header_split", E1000_STAT(rx_hdr_split) },
+	{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+	{ "tx_smbus", E1000_STAT(stats.mgptc) },
+	{ "rx_smbus", E1000_STAT(stats.mgprc) },
+	{ "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_QUEUE_STATS_LEN \
+	((((((struct e1000_adapter *)netdev->priv)->num_rx_queues > 1) ? \
+	  ((struct e1000_adapter *)netdev->priv)->num_rx_queues : 0 ) + \
+	 (((((struct e1000_adapter *)netdev->priv)->num_tx_queues > 1) ? \
+	  ((struct e1000_adapter *)netdev->priv)->num_tx_queues : 0 ))) * \
+	(sizeof(struct e1000_queue_stats) / sizeof(u64)))
+#else
+#define E1000_QUEUE_STATS_LEN 0
+#endif
+#define E1000_GLOBAL_STATS_LEN	\
+	sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
+#endif /* ETHTOOL_TEST */
+
+static int e1000_get_settings(struct net_device *netdev,
+                              struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 status;
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+		                   SUPPORTED_10baseT_Full |
+		                   SUPPORTED_100baseT_Half |
+		                   SUPPORTED_100baseT_Full |
+		                   SUPPORTED_1000baseT_Full|
+		                   SUPPORTED_Autoneg |
+		                   SUPPORTED_TP);
+		if (hw->phy.type == e1000_phy_ife)
+			ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+		ecmd->advertising = ADVERTISED_TP;
+
+		if (hw->mac.autoneg == 1) {
+			ecmd->advertising |= ADVERTISED_Autoneg;
+			/* the e1000 autoneg seems to match ethtool nicely */
+			ecmd->advertising |= hw->phy.autoneg_advertised;
+		}
+
+		ecmd->port = PORT_TP;
+		ecmd->phy_address = hw->phy.addr;
+
+		if (hw->mac.type == e1000_82543)
+			ecmd->transceiver = XCVR_EXTERNAL;
+		else
+			ecmd->transceiver = XCVR_INTERNAL;
+
+	} else {
+		ecmd->supported   = (SUPPORTED_1000baseT_Full |
+				     SUPPORTED_FIBRE |
+				     SUPPORTED_Autoneg);
+
+		ecmd->advertising = (ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg);
+
+		ecmd->port = PORT_FIBRE;
+
+		if (hw->mac.type >= e1000_82545)
+			ecmd->transceiver = XCVR_INTERNAL;
+		else
+			ecmd->transceiver = XCVR_EXTERNAL;
+	}
+
+	status = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+
+	if (status & E1000_STATUS_LU) {
+
+		if ((status & E1000_STATUS_SPEED_1000) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->speed = SPEED_1000;
+		else if (status & E1000_STATUS_SPEED_100)
+			ecmd->speed = SPEED_100;
+		else
+			ecmd->speed = SPEED_10;
+
+		if ((status & E1000_STATUS_FD) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+			 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+                              struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* When SoL/IDER sessions are active, autoneg/speed/duplex
+	 * cannot be changed */
+	if (e1000_check_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot change link characteristics "
+		        "when SoL/IDER is active.\n");
+		return -EINVAL;
+	}
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		hw->mac.autoneg = 1;
+		if (hw->phy.media_type == e1000_media_type_fiber)
+			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
+			                             ADVERTISED_FIBRE |
+			                             ADVERTISED_Autoneg;
+		else
+			hw->phy.autoneg_advertised = ecmd->advertising |
+			                             ADVERTISED_TP |
+			                             ADVERTISED_Autoneg;
+		ecmd->advertising = hw->phy.autoneg_advertised;
+		if (adapter->fc_autoneg)
+			hw->fc.original_type = e1000_fc_default;
+	} else {
+		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+			clear_bit(__E1000_RESETTING, &adapter->state);
+			return -EINVAL;
+		}
+	}
+
+	/* reset the link */
+
+	if (netif_running(adapter->netdev)) {
+		e1000_down(adapter);
+		e1000_up(adapter);
+	} else {
+		e1000_reset(adapter);
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+                                 struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	pause->autoneg =
+		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+	if (hw->fc.type == e1000_fc_rx_pause)
+		pause->rx_pause = 1;
+	else if (hw->fc.type == e1000_fc_tx_pause)
+		pause->tx_pause = 1;
+	else if (hw->fc.type == e1000_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+                                struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	adapter->fc_autoneg = pause->autoneg;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (pause->rx_pause && pause->tx_pause)
+		hw->fc.type = e1000_fc_full;
+	else if (pause->rx_pause && !pause->tx_pause)
+		hw->fc.type = e1000_fc_rx_pause;
+	else if (!pause->rx_pause && pause->tx_pause)
+		hw->fc.type = e1000_fc_tx_pause;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		hw->fc.type = e1000_fc_none;
+
+	hw->fc.original_type = hw->fc.type;
+
+	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+		hw->fc.type = e1000_fc_default;
+		if (netif_running(adapter->netdev)) {
+			e1000_down(adapter);
+			e1000_up(adapter);
+		} else {
+			e1000_reset(adapter);
+		}
+	} else {
+		retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
+			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return retval;
+}
+
+static u32 e1000_get_rx_csum(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->rx_csum;
+}
+
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->rx_csum = data;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	else
+		e1000_reset(adapter);
+	return 0;
+}
+
+static u32 e1000_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->hw.mac.type < e1000_82543) {
+		if (!data)
+			return -EINVAL;
+		return 0;
+	}
+
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int e1000_set_tso(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int i;
+	struct net_device *v_netdev;
+	if (!(adapter->flags & E1000_FLAG_HAS_TSO))
+		return data ? -EINVAL : 0;
+
+	if (data) {
+		netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		if (adapter->flags & E1000_FLAG_HAS_TSO6)
+			netdev->features |= NETIF_F_TSO6;
+#endif
+	} else {
+		netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		if (adapter->flags & E1000_FLAG_HAS_TSO6)
+			netdev->features &= ~NETIF_F_TSO6;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+		/* disable TSO on all VLANs if they're present */
+		if (!adapter->vlgrp)
+			goto tso_out;
+		for (i = 0; i < VLAN_N_VID; i++) {
+			v_netdev = vlan_group_get_device(adapter->vlgrp, i);
+			if (!v_netdev)
+				continue;
+
+			v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+			if (adapter->flags & E1000_FLAG_HAS_TSO6)
+				v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+			vlan_group_set_device(adapter->vlgrp, i, v_netdev);
+		}
+#endif
+	}
+
+tso_out:
+	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+	adapter->flags |= E1000_FLAG_TSO_FORCE;
+	return 0;
+}
+#endif /* NETIF_F_TSO */
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+	return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev,
+                           struct ethtool_regs *regs, void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = E1000_READ_REG(hw, E1000_CTRL);
+	regs_buff[1]  = E1000_READ_REG(hw, E1000_STATUS);
+
+	regs_buff[2]  = E1000_READ_REG(hw, E1000_RCTL);
+	regs_buff[3]  = E1000_READ_REG(hw, E1000_RDLEN(0));
+	regs_buff[4]  = E1000_READ_REG(hw, E1000_RDH(0));
+	regs_buff[5]  = E1000_READ_REG(hw, E1000_RDT(0));
+	regs_buff[6]  = E1000_READ_REG(hw, E1000_RDTR);
+
+	regs_buff[7]  = E1000_READ_REG(hw, E1000_TCTL);
+	regs_buff[8]  = E1000_READ_REG(hw, E1000_TDLEN(0));
+	regs_buff[9]  = E1000_READ_REG(hw, E1000_TDH(0));
+	regs_buff[10] = E1000_READ_REG(hw, E1000_TDT(0));
+	regs_buff[11] = E1000_READ_REG(hw, E1000_TIDV);
+
+	regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
+	if (hw->phy.type == e1000_phy_igp) {
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_A);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_B);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[14] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_C);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[15] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_D);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[16] = (u32)phy_data; /* cable length */
+		regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[18] = (u32)phy_data; /* cable polarity */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_PCS_INIT_REG);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[19] = (u32)phy_data; /* cable polarity */
+		regs_buff[20] = 0; /* polarity correction enabled (always) */
+		regs_buff[22] = 0; /* phy receive errors (unavailable) */
+		regs_buff[23] = regs_buff[18]; /* mdix mode */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+	} else {
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+		regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+		regs_buff[18] = regs_buff[13]; /* cable polarity */
+		regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[20] = regs_buff[17]; /* polarity correction */
+		/* phy receive errors */
+		regs_buff[22] = adapter->phy_stats.receive_errors;
+		regs_buff[23] = regs_buff[13]; /* mdix mode */
+	}
+	regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
+	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+	regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
+	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
+	if (hw->mac.type >= e1000_82540 &&
+	    hw->mac.type < e1000_82571 &&
+	    hw->phy.media_type == e1000_media_type_copper) {
+		regs_buff[26] = E1000_READ_REG(hw, E1000_MANC);
+	}
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->hw.nvm.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+                            struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	int first_word, last_word;
+	int ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) *
+			(last_word - first_word + 1), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	if (hw->nvm.type == e1000_nvm_eeprom_spi)
+		ret_val = e1000_read_nvm(hw, first_word,
+		                         last_word - first_word + 1,
+		                         eeprom_buff);
+	else {
+		for (i = 0; i < last_word - first_word + 1; i++)
+			if ((ret_val = e1000_read_nvm(hw, first_word + i, 1,
+			                              &eeprom_buff[i])))
+				break;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+			eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+                            struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	void *ptr;
+	int max_len, first_word, last_word, ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = hw->nvm.word_size * 2;
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (void *)eeprom_buff;
+
+	if (eeprom->offset & 1) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		ret_val = e1000_read_nvm(hw, first_word, 1,
+					    &eeprom_buff[0]);
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+		/* need read/modify/write of last changed EEPROM word */
+		/* only the first byte of the word is being modified */
+		ret_val = e1000_read_nvm(hw, last_word, 1,
+		                  &eeprom_buff[last_word - first_word]);
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_word - first_word + 1; i++)
+		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+	ret_val = e1000_write_nvm(hw, first_word,
+	                          last_word - first_word + 1, eeprom_buff);
+
+	/* Update the checksum over the first part of the EEPROM if needed
+	 * and flush shadow RAM for 82573 controllers */
+	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
+				(hw->mac.type == e1000_82573)))
+		e1000_update_nvm_checksum(hw);
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+                              struct ethtool_drvinfo *drvinfo)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	char firmware_version[32];
+	u16 eeprom_data;
+
+	strncpy(drvinfo->driver,  e1000_driver_name, 32);
+	strncpy(drvinfo->version, e1000_driver_version, 32);
+
+	/* EEPROM image version # is reported as firmware version # for
+	 * 8257{1|2|3} controllers */
+	e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
+	switch (adapter->hw.mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		sprintf(firmware_version, "%d.%d-%d",
+			(eeprom_data & 0xF000) >> 12,
+			(eeprom_data & 0x0FF0) >> 4,
+			eeprom_data & 0x000F);
+		break;
+	default:
+		sprintf(firmware_version, "N/A");
+	}
+
+	strncpy(drvinfo->fw_version, firmware_version, 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = E1000_STATS_LEN;
+	drvinfo->testinfo_len = E1000_TEST_LEN;
+	drvinfo->regdump_len = e1000_get_regs_len(netdev);
+	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+                                struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	e1000_mac_type mac_type = adapter->hw.mac.type;
+	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+
+	ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+		E1000_MAX_82544_RXD;
+	ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+		E1000_MAX_82544_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+                               struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	e1000_mac_type mac_type = adapter->hw.mac.type;
+	struct e1000_tx_ring *tx_ring, *tx_old;
+	struct e1000_rx_ring *rx_ring, *rx_old;
+	int i, err, tx_ring_size, rx_ring_size;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+	rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (netif_running(adapter->netdev))
+		e1000_down(adapter);
+
+	tx_old = adapter->tx_ring;
+	rx_old = adapter->rx_ring;
+
+	err = -ENOMEM;
+	tx_ring = kzalloc(tx_ring_size, GFP_KERNEL);
+	if (!tx_ring)
+		goto err_alloc_tx;
+	/* use a memcpy to save any previously configured
+	 * items like napi structs from having to be
+	 * reinitialized */
+	memcpy(tx_ring, tx_old, tx_ring_size);
+
+	rx_ring = kzalloc(rx_ring_size, GFP_KERNEL);
+	if (!rx_ring)
+		goto err_alloc_rx;
+	memcpy(rx_ring, rx_old, rx_ring_size);
+
+	adapter->tx_ring = tx_ring;
+	adapter->rx_ring = rx_ring;
+
+	rx_ring->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+	rx_ring->count = min(rx_ring->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_RXD : E1000_MAX_82544_RXD));
+	rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	tx_ring->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+	tx_ring->count = min(tx_ring->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_TXD : E1000_MAX_82544_TXD));
+	tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	/* overwrite the counts with the new values */
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		tx_ring[i].count = tx_ring->count;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rx_ring[i].count = rx_ring->count;
+
+	if (netif_running(adapter->netdev)) {
+		/* Try to get new resources before deleting old */
+		if ((err = e1000_setup_all_rx_resources(adapter)))
+			goto err_setup_rx;
+		if ((err = e1000_setup_all_tx_resources(adapter)))
+			goto err_setup_tx;
+
+		/* restore the old in order to free it,
+		 * then add in the new */
+		adapter->rx_ring = rx_old;
+		adapter->tx_ring = tx_old;
+		e1000_free_all_rx_resources(adapter);
+		e1000_free_all_tx_resources(adapter);
+		kfree(tx_old);
+		kfree(rx_old);
+		adapter->rx_ring = rx_ring;
+		adapter->tx_ring = tx_ring;
+		if ((err = e1000_up(adapter)))
+			goto err_setup;
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return 0;
+err_setup_tx:
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	adapter->rx_ring = rx_old;
+	adapter->tx_ring = tx_old;
+	kfree(rx_ring);
+err_alloc_rx:
+	kfree(tx_ring);
+err_alloc_tx:
+	e1000_up(adapter);
+err_setup:
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+			     int reg, int offset, u32 mask, u32 write)
+{                                                                              \
+	u32 pat, val;
+	static const u32 test[] =
+		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+	for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
+		E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
+		                      (test[pat] & write));
+		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
+		if (val != (test[pat] & write & mask)) {
+			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "
+			        "0x%08X expected 0x%08X\n",
+			        E1000_REGISTER(&adapter->hw, reg) + offset,
+			        val, (test[pat] & write & mask));
+			*data = E1000_REGISTER(&adapter->hw, reg);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
+			      int reg, u32 mask, u32 write)
+{
+	u32 val;
+	E1000_WRITE_REG(&adapter->hw, reg, write & mask);
+	val = E1000_READ_REG(&adapter->hw, reg);
+	if ((write & mask) != (val & mask)) {
+		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X"
+		        "expected 0x%08X\n", reg, (val & mask), (write & mask));
+		*data = E1000_REGISTER(&adapter->hw, reg);
+		return 1;
+	}
+	return 0;
+}
+#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
+	do {                                                                   \
+		if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
+			return 1;                                              \
+	} while (0)
+#define REG_PATTERN_TEST(reg, mask, write)                                     \
+	REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
+
+#define REG_SET_AND_CHECK(reg, mask, write)                                    \
+	do {                                                                   \
+		if (reg_set_and_check(adapter, data, reg, mask, write))       \
+			return 1;                                              \
+	} while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	u32 value, before, after;
+	u32 i, toggle;
+
+	/* The status register is Read Only, so a write should fail.
+	 * Some bits that get toggled are ignored.
+	 */
+	switch (mac->type) {
+	/* there are several bits on newer hardware that are r/w */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		toggle = 0x7FFFF3FF;
+		break;
+	case e1000_82573:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		toggle = 0x7FFFF033;
+		break;
+	default:
+		toggle = 0xFFFFF833;
+		break;
+	}
+
+	before = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+	value = (E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle);
+	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, toggle);
+	after = E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle;
+	if (value != after) {
+		DPRINTK(DRV, ERR, "failed STATUS register test got: "
+		        "0x%08X expected: 0x%08X\n", after, value);
+		*data = 1;
+		return 1;
+	}
+	/* restore previous status */
+	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, before);
+
+	if ((mac->type != e1000_ich8lan) &&
+	    (mac->type != e1000_ich9lan)) {
+		REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
+	}
+
+	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
+	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
+
+	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
+
+	before = (((mac->type == e1000_ich8lan) ||
+		   (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE);
+	REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
+	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
+
+	if (mac->type >= e1000_82543) {
+
+		REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
+		if ((mac->type != e1000_ich8lan) &&
+		    (mac->type != e1000_ich9lan))
+			REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
+		for (i = 0; i < mac->rar_entry_count; i++) {
+			REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
+			                       0x8003FFFF, 0xFFFFFFFF);
+		}
+
+	} else {
+
+		REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+		REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFF000, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_TXCW, 0x0000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFF000, 0xFFFFFFFF);
+
+	}
+
+	for (i = 0; i < mac->mta_reg_count; i++)
+		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
+
+	*data = 0;
+	return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+	u16 temp;
+	u16 checksum = 0;
+	u16 i;
+
+	*data = 0;
+	/* Read and add up the contents of the EEPROM */
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
+			*data = 1;
+			break;
+		}
+		checksum += temp;
+	}
+
+	/* If Checksum is not Correct return error else test passed */
+	if ((checksum != (u16) NVM_SUM) && !(*data))
+		*data = 2;
+
+	return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+	struct net_device *netdev = (struct net_device *) data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	adapter->test_icr |= E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+	return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 mask, i=0, shared_int = TRUE;
+	u32 irq = adapter->pdev->irq;
+
+	*data = 0;
+
+	/* NOTE: we don't test MSI interrupts here, yet */
+	/* Hook up test interrupt handler just for this test */
+	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	                 netdev))
+		shared_int = FALSE;
+	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+	         netdev->name, netdev)) {
+		*data = 1;
+		return -1;
+	}
+	DPRINTK(HW, INFO, "testing %s interrupt\n",
+	        (shared_int ? "shared" : "unshared"));
+
+	/* Disable all the interrupts */
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Test each interrupt */
+	for (; i < 10; i++) {
+
+		if (((adapter->hw.mac.type == e1000_ich8lan) ||
+		     (adapter->hw.mac.type == e1000_ich9lan)) && i == 8)
+			continue;
+
+		/* Interrupt to test */
+		mask = 1 << i;
+
+		if (!shared_int) {
+			/* Disable the interrupt to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, E1000_IMC, mask);
+			E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask);
+			msleep(10);
+
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
+		}
+
+		/* Enable the interrupt to be reported in
+		 * the cause register and then force the same
+		 * interrupt and see if one gets posted.  If
+		 * an interrupt was not posted to the bus, the
+		 * test failed.
+		 */
+		adapter->test_icr = 0;
+		E1000_WRITE_REG(&adapter->hw, E1000_IMS, mask);
+		E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask);
+		msleep(10);
+
+		if (!(adapter->test_icr & mask)) {
+			*data = 4;
+			break;
+		}
+
+		if (!shared_int) {
+			/* Disable the other interrupts to be reported in
+			 * the cause register and then force the other
+			 * interrupts and see if any get posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, E1000_IMC,
+			                ~mask & 0x00007FFF);
+			E1000_WRITE_REG(&adapter->hw, E1000_ICS,
+			                ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr) {
+				*data = 5;
+				break;
+			}
+		}
+	}
+
+	/* Disable all the interrupts */
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Unhook test interrupt handler */
+	free_irq(irq, netdev);
+
+	return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i;
+
+	if (tx_ring->desc && tx_ring->buffer_info) {
+		for (i = 0; i < tx_ring->count; i++) {
+			if (tx_ring->buffer_info[i].dma)
+				pci_unmap_single(pdev, tx_ring->buffer_info[i].dma,
+						 tx_ring->buffer_info[i].length,
+						 PCI_DMA_TODEVICE);
+			if (tx_ring->buffer_info[i].skb)
+				dev_kfree_skb(tx_ring->buffer_info[i].skb);
+		}
+	}
+
+	if (rx_ring->desc && rx_ring->buffer_info) {
+		for (i = 0; i < rx_ring->count; i++) {
+			if (rx_ring->buffer_info[i].dma)
+				pci_unmap_single(pdev, rx_ring->buffer_info[i].dma,
+						 E1000_RXBUFFER_2048,
+						 PCI_DMA_FROMDEVICE);
+			if (rx_ring->buffer_info[i].skb)
+				dev_kfree_skb(rx_ring->buffer_info[i].skb);
+		}
+	}
+
+	if (tx_ring->desc) {
+		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+	if (rx_ring->desc) {
+		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+
+	kfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+	kfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	return;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 rctl;
+	int i, ret_val;
+
+	/* Setup Tx descriptor ring and Tx buffers */
+
+	if (!tx_ring->count)
+		tx_ring->count = E1000_DEFAULT_TXD;
+
+	if (!(tx_ring->buffer_info = kcalloc(tx_ring->count,
+	                                     sizeof(struct e1000_buffer),
+	                                     GFP_KERNEL))) {
+		ret_val = 1;
+		goto err_nomem;
+	}
+
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	                                           &tx_ring->dma))) {
+		ret_val = 2;
+		goto err_nomem;
+	}
+	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
+
+	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
+			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
+			tx_ring->count * sizeof(struct e1000_tx_desc));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+			E1000_TCTL_MULR |
+			E1000_TCTL_PSP | E1000_TCTL_EN |
+			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+	for (i = 0; i < tx_ring->count; i++) {
+		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+		struct sk_buff *skb;
+		unsigned int size = 1024;
+
+		if (!(skb = alloc_skb(size, GFP_KERNEL))) {
+			ret_val = 3;
+			goto err_nomem;
+		}
+		skb_put(skb, size);
+		tx_ring->buffer_info[i].skb = skb;
+		tx_ring->buffer_info[i].length = skb->len;
+		tx_ring->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, skb->len,
+				       PCI_DMA_TODEVICE);
+		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+		tx_desc->lower.data = cpu_to_le32(skb->len);
+		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+						   E1000_TXD_CMD_IFCS);
+		if (adapter->hw.mac.type < e1000_82543)
+			tx_desc->lower.data |= E1000_TXD_CMD_RPS;
+		else
+			tx_desc->lower.data |= E1000_TXD_CMD_RS;
+
+		tx_desc->upper.data = 0;
+	}
+
+	/* Setup Rx descriptor ring and Rx buffers */
+
+	if (!rx_ring->count)
+		rx_ring->count = E1000_DEFAULT_RXD;
+
+	if (!(rx_ring->buffer_info = kcalloc(rx_ring->count,
+	                                     sizeof(struct e1000_rx_buffer),
+	                                     GFP_KERNEL))) {
+		ret_val = 4;
+		goto err_nomem;
+	}
+
+	rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
+	if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+	                                           &rx_ring->dma))) {
+		ret_val = 5;
+		goto err_nomem;
+	}
+	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
+			((u64) rx_ring->dma & 0xFFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), ((u64) rx_ring->dma >> 32));
+	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), rx_ring->size);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0);
+	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+
+	for (i = 0; i < rx_ring->count; i++) {
+		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
+		struct sk_buff *skb;
+
+		if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
+				GFP_KERNEL))) {
+			ret_val = 6;
+			goto err_nomem;
+		}
+		skb_reserve(skb, NET_IP_ALIGN);
+		rx_ring->buffer_info[i].skb = skb;
+		rx_ring->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+				       PCI_DMA_FROMDEVICE);
+		rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
+		memset(skb->data, 0x00, skb->len);
+	}
+
+	return 0;
+
+err_nomem:
+	e1000_free_desc_rings(adapter);
+	return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
+	e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
+	e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
+	e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+	u16 phy_reg;
+
+	/* Because we reset the PHY above, we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock.  This
+	 * value defaults back to a 2.5MHz clock when the PHY is reset.
+	 */
+	e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+	e1000_write_phy_reg(&adapter->hw,
+		M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+	/* In addition, because of the s/w reset above, we need to enable
+	 * CRS on TX.  This must be set for both full and half duplex
+	 * operation.
+	 */
+	e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	e1000_write_phy_reg(&adapter->hw,
+		M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	u32 ctrl_reg;
+	u16 phy_reg;
+
+	/* Setup the Device Control Register for PHY loopback test. */
+
+	ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+	ctrl_reg |= (E1000_CTRL_ILOS |		/* Invert Loss-Of-Signal */
+		     E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
+		     E1000_CTRL_FRCDPX |	/* Set the Force Duplex Bit */
+		     E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
+		     E1000_CTRL_FD);		/* Force Duplex to FULL */
+
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg);
+
+	/* Read the PHY Specific Control Register (0x10) */
+	e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+	/* Clear Auto-Crossover bits in PHY Specific Control Register
+	 * (bits 6:5).
+	 */
+	phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+	e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+	/* Perform software reset on the PHY */
+	e1000_phy_commit(&adapter->hw);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8100);
+
+	/* Wait for reset to complete. */
+	udelay(500);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_phy_disable_receiver(adapter);
+
+	/* Set the loopback bit in the PHY control register. */
+	e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
+	phy_reg |= MII_CR_LOOPBACK;
+	e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_reg);
+
+	/* Setup TX_CLK and TX_CRS one more time. */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Check Phy Configuration */
+	e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
+	if (phy_reg != 0x4100)
+		 return 9;
+
+	e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	if (phy_reg != 0x0070)
+		return 10;
+
+	e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
+	if (phy_reg != 0x001A)
+		return 11;
+
+	return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	u32 ctrl_reg = 0;
+	u32 stat_reg = 0;
+
+	adapter->hw.mac.autoneg = FALSE;
+
+	if (adapter->hw.phy.type == e1000_phy_m88) {
+		/* Auto-MDI/MDIX Off */
+		e1000_write_phy_reg(&adapter->hw,
+				    M88E1000_PHY_SPEC_CTRL, 0x0808);
+		/* reset to update Auto-MDI/MDIX */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x9140);
+		/* autoneg off */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8140);
+	} else if (adapter->hw.phy.type == e1000_phy_gg82563)
+		e1000_write_phy_reg(&adapter->hw,
+		                    GG82563_PHY_KMRN_MODE_CTRL,
+		                    0x1CC);
+
+	ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+
+	if (adapter->hw.phy.type == e1000_phy_ife) {
+		/* force 100, set loopback */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x6100);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	} else {
+		/* force 1000, set loopback */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x4140);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	}
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper &&
+	   adapter->hw.phy.type == e1000_phy_m88) {
+		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+	} else {
+		/* Set the ILOS bit on the fiber Nic if half duplex link is
+		 * detected. */
+		stat_reg = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+		if ((stat_reg & E1000_STATUS_FD) == 0)
+			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg);
+
+	/* Disable the receiver on the PHY so when a cable is plugged in, the
+	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+	 */
+	if (adapter->hw.phy.type == e1000_phy_m88)
+		e1000_phy_disable_receiver(adapter);
+
+	udelay(500);
+
+	return 0;
+}
+
+static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	int link = 0;
+
+	/* special requirements for 82571/82572 fiber adapters */
+
+	/* jump through hoops to make sure link is up because serdes
+	 * link is hardwired up */
+	ctrl |= E1000_CTRL_SLU;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/* disable autoneg */
+	ctrl = E1000_READ_REG(hw, E1000_TXCW);
+	ctrl &= ~(1 << 31);
+	E1000_WRITE_REG(hw, E1000_TXCW, ctrl);
+
+	link = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
+
+	if (!link) {
+		/* set invert loss of signal */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_ILOS;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	}
+
+	/* special write to serdes control register to enable SerDes analog
+	 * loopback */
+#define E1000_SERDES_LB_ON 0x410
+	E1000_WRITE_REG(hw, E1000_SCTL, E1000_SERDES_LB_ON);
+	msleep(10);
+
+	return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+	u16 phy_reg = 0;
+	u16 count = 0;
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82543:
+		if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+			/* Attempt to setup Loopback mode on Non-integrated PHY.
+			 * Some PHY registers get corrupted at random, so
+			 * attempt this 10 times.
+			 */
+			while (e1000_nonintegrated_phy_loopback(adapter) &&
+			      count++ < 10);
+			if (count < 11)
+				return 0;
+		}
+		break;
+
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+	case e1000_82547:
+	case e1000_82547_rev_2:
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		return e1000_integrated_phy_loopback(adapter);
+		break;
+
+	default:
+		/* Default PHY loopback work is to read the MII
+		 * control register and assert bit 14 (loopback mode).
+		 */
+		e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
+		phy_reg |= MII_CR_LOOPBACK;
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_reg);
+		return 0;
+		break;
+	}
+
+	return 8;
+}
+
+/* only call this for fiber/serdes connections to es2lan */
+static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrlext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* save CTRL_EXT to restore later, reuse an empty variable (unused
+	   on mac_type 80003es2lan) */
+	adapter->tx_fifo_head = ctrlext;
+
+	/* clear the serdes mode bits, putting the device into mac loopback */
+	ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrlext);
+
+	/* force speed to 1000/FD, link up */
+	ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+	ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
+	         E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/* set mac loopback */
+	ctrl = E1000_READ_REG(hw, E1000_RCTL);
+	ctrl |= E1000_RCTL_LBM_MAC;
+	E1000_WRITE_REG(hw, E1000_RCTL, ctrl);
+
+	/* set testing mode parameters (no need to reset later) */
+#define KMRNCTRLSTA_OPMODE (0x1F << 16)
+#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
+	E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA,
+		(KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
+
+	return 0;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (hw->phy.media_type == e1000_media_type_fiber ||
+	    hw->phy.media_type == e1000_media_type_internal_serdes) {
+		switch (hw->mac.type) {
+		case e1000_80003es2lan:
+			return e1000_set_es2lan_mac_loopback(adapter);
+			break;
+		case e1000_82545:
+		case e1000_82546:
+		case e1000_82545_rev_3:
+		case e1000_82546_rev_3:
+			return e1000_set_phy_loopback(adapter);
+			break;
+		case e1000_82571:
+		case e1000_82572:
+			return e1000_set_82571_fiber_loopback(adapter);
+			break;
+		default:
+			rctl = E1000_READ_REG(hw, E1000_RCTL);
+			rctl |= E1000_RCTL_LBM_TCVR;
+			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+			return 0;
+		}
+	} else if (hw->phy.media_type == e1000_media_type_copper)
+		return e1000_set_phy_loopback(adapter);
+
+	return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+	u16 phy_reg;
+
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+	switch (hw->mac.type) {
+	case e1000_80003es2lan:
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes) {
+			/* restore CTRL_EXT, stealing space from tx_fifo_head */
+			E1000_WRITE_REG(hw, E1000_CTRL_EXT, adapter->tx_fifo_head);
+			adapter->tx_fifo_head = 0;
+		}
+		fallthrough;
+	case e1000_82571:
+	case e1000_82572:
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+			E1000_WRITE_REG(hw, E1000_SCTL, E1000_SERDES_LB_OFF);
+			msleep(10);
+			break;
+		}
+		fallthrough;
+	case e1000_82545:
+	case e1000_82546:
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+	default:
+		hw->mac.autoneg = TRUE;
+		if (hw->phy.type == e1000_phy_gg82563)
+			e1000_write_phy_reg(hw,
+					    GG82563_PHY_KMRN_MODE_CTRL,
+					    0x180);
+		e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+		if (phy_reg & MII_CR_LOOPBACK) {
+			phy_reg &= ~MII_CR_LOOPBACK;
+			e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+			e1000_phy_commit(hw);
+		}
+		break;
+	}
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+                                      unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, j, k, l, lc, good_cnt, ret_val=0;
+	unsigned long time;
+
+	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count - 1);
+
+	/* Calculate the loop count based on the largest descriptor ring
+	 * The idea is to wrap the largest ring a number of times using 64
+	 * send/receive pairs during each loop
+	 */
+
+	if (rx_ring->count <= tx_ring->count)
+		lc = ((tx_ring->count / 64) * 2) + 1;
+	else
+		lc = ((rx_ring->count / 64) * 2) + 1;
+
+	k = l = 0;
+	for (j = 0; j <= lc; j++) { /* loop count loop */
+		for (i = 0; i < 64; i++) { /* send the packets */
+			e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
+					1024);
+			pci_dma_sync_single_for_device(pdev,
+					tx_ring->buffer_info[k].dma,
+				    	tx_ring->buffer_info[k].length,
+				    	PCI_DMA_TODEVICE);
+			if (unlikely(++k == tx_ring->count)) k = 0;
+		}
+		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), k);
+		msleep(200);
+		time = jiffies; /* set the start time for the receive */
+		good_cnt = 0;
+		do { /* receive the sent packets */
+			pci_dma_sync_single_for_cpu(pdev,
+			                rx_ring->buffer_info[l].dma,
+			                E1000_RXBUFFER_2048,
+			                PCI_DMA_FROMDEVICE);
+
+			ret_val = e1000_check_lbtest_frame(
+					rx_ring->buffer_info[l].skb,
+				   	1024);
+			if (!ret_val)
+				good_cnt++;
+			if (unlikely(++l == rx_ring->count)) l = 0;
+			/* time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
+			 * exceeded, break and error off
+			 */
+		} while (good_cnt < 64 && jiffies < (time + 20));
+		if (good_cnt != 64) {
+			ret_val = 13; /* ret_val is the same as mis-compare */
+			break;
+		}
+		if (jiffies >= (time + 20)) {
+			ret_val = 14; /* error code for time out error */
+			break;
+		}
+	} /* end loop count loop */
+	return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+	/* PHY loopback cannot be performed if SoL/IDER
+	 * sessions are active */
+	if (e1000_check_reset_block(&adapter->hw)) {
+		DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+		        "when SoL/IDER is active.\n");
+		*data = 0;
+		goto out;
+	}
+
+	if ((*data = e1000_setup_desc_rings(adapter)))
+		goto out;
+	if ((*data = e1000_setup_loopback_test(adapter)))
+		goto err_loopback;
+	*data = e1000_run_loopback_test(adapter);
+	e1000_loopback_cleanup(adapter);
+
+err_loopback:
+	e1000_free_desc_rings(adapter);
+out:
+	return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+	*data = 0;
+	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+		int i = 0;
+		adapter->hw.mac.serdes_has_link = FALSE;
+
+		/* On some blade server designs, link establishment
+		 * could take as long as 2-3 minutes */
+		do {
+			e1000_check_for_link(&adapter->hw);
+			if (adapter->hw.mac.serdes_has_link == TRUE)
+				return *data;
+			msleep(20);
+		} while (i++ < 3750);
+
+		*data = 1;
+	} else {
+		e1000_check_for_link(&adapter->hw);
+		if (adapter->hw.mac.autoneg)
+			msleep(4000);
+
+		if (!(E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
+			*data = 1;
+		}
+	}
+	return *data;
+}
+
+static int e1000_diag_test_count(struct net_device *netdev)
+{
+	return E1000_TEST_LEN;
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+                            struct ethtool_test *eth_test, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u16 autoneg_advertised;
+	u8 forced_speed_duplex, autoneg;
+	bool if_running = netif_running(netdev);
+
+	set_bit(__E1000_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		/* save speed, duplex, autoneg settings */
+		autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+		forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+		autoneg = adapter->hw.mac.autoneg;
+
+		DPRINTK(HW, INFO, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			e1000_reset(adapter);
+
+		if (e1000_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_eeprom_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_intr_test(adapter, &data[2]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		/* make sure the phy is powered up */
+		if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+			e1000_power_up_phy(&adapter->hw);
+			e1000_setup_link(&adapter->hw);
+		}
+		if (e1000_loopback_test(adapter, &data[3]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* restore speed, duplex, autoneg settings */
+		adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+		adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+		adapter->hw.mac.autoneg = autoneg;
+
+		/* force this routine to wait until autoneg complete/timeout */
+		adapter->hw.phy.autoneg_wait_to_complete = TRUE;
+		e1000_reset(adapter);
+		adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+
+		clear_bit(__E1000_TESTING, &adapter->state);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		DPRINTK(HW, INFO, "online testing starting\n");
+		/* Online tests */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__E1000_TESTING, &adapter->state);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+                               struct ethtool_wolinfo *wol)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 1; /* fail by default */
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		/* these don't support WoL at all */
+		wol->supported = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_COPPER:
+		/* Wake events not supported on port B */
+		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* quad port adapters only support WoL on port A */
+		if (!(adapter->flags & E1000_FLAG_QUAD_PORT_A)) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	default:
+		/* dual port cards only support WoL on port A from now on
+		 * unless it was enabled in the eeprom for port B
+		 * so exclude FUNC_1 ports from having WoL enabled */
+		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+		    !adapter->eeprom_wol) {
+			wol->supported = 0;
+			break;
+		}
+
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+                          struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_UCAST | WAKE_MCAST |
+	                 WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	/* this function will set ->supported = 0 and return 1 if wol is not
+	 * supported by this hardware */
+	if (e1000_wol_exclusion(adapter, wol))
+		return;
+
+	/* apply any specific unsupported masks here */
+	switch (adapter->hw.device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* KSP3 does not support UCAST wake-ups */
+		wol->supported &= ~WAKE_UCAST;
+
+		if (adapter->wol & E1000_WUFC_EX)
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+		break;
+	default:
+		break;
+	}
+
+	if (adapter->wol & E1000_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & E1000_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & E1000_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & E1000_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+
+	return;
+}
+
+static int e1000_set_wol(struct net_device *netdev,
+                         struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (e1000_wol_exclusion(adapter, wol))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		if (wol->wolopts & WAKE_UCAST) {
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_UCAST)
+		adapter->wol |= E1000_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		adapter->wol |= E1000_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		adapter->wol |= E1000_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL	(HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON		0
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+	if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+		e1000_led_off(&adapter->hw);
+	else
+		e1000_led_on(&adapter->hw);
+
+	mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int e1000_phys_id(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (!data)
+		data = INT_MAX;
+
+	if (adapter->hw.mac.type < e1000_82571) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long) adapter;
+		}
+		e1000_setup_led(&adapter->hw);
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+	} else if (adapter->hw.phy.type == e1000_phy_ife) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long) adapter;
+		}
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+		e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
+	} else {
+		e1000_blink_led(&adapter->hw);
+		msleep_interruptible(data * 1000);
+	}
+
+	e1000_led_off(&adapter->hw);
+	clear_bit(E1000_LED_ON, &adapter->led_status);
+	e1000_cleanup_led(&adapter->hw);
+
+	return 0;
+}
+
+static int e1000_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->itr_setting <= 3)
+		ec->rx_coalesce_usecs = adapter->itr_setting;
+	else
+		ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+	return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+	    ((ec->rx_coalesce_usecs > 3) &&
+	     (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+	    (ec->rx_coalesce_usecs == 2))
+		return -EINVAL;
+
+	if (!(adapter->flags & E1000_FLAG_HAS_INTR_MODERATION))
+		return -ENOTSUPP;
+
+	if (ec->rx_coalesce_usecs <= 3) {
+		adapter->itr = 20000;
+		adapter->itr_setting = ec->rx_coalesce_usecs;
+	} else {
+		adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+		adapter->itr_setting = adapter->itr & ~3;
+	}
+
+	if (adapter->itr_setting != 0)
+		E1000_WRITE_REG(&adapter->hw, E1000_ITR,
+			1000000000 / (adapter->itr * 256));
+	else
+		E1000_WRITE_REG(&adapter->hw, E1000_ITR, 0);
+
+	return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	return 0;
+}
+
+static int e1000_get_stats_count(struct net_device *netdev)
+{
+	return E1000_STATS_LEN;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+                                    struct ethtool_stats *stats, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_E1000_MQ
+	u64 *queue_stat;
+	int stat_count = sizeof(struct e1000_queue_stats) / sizeof(u64);
+	int j, k;
+#endif
+	int i;
+
+	e1000_update_stats(adapter);
+	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+#ifdef CONFIG_E1000_MQ
+	if (adapter->num_tx_queues > 1) {
+		for (j = 0; j < adapter->num_tx_queues; j++) {
+			queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
+			for (k = 0; k < stat_count; k++)
+				data[i + k] = queue_stat[k];
+			i += k;
+		}
+	}
+	if (adapter->num_rx_queues > 1) {
+		for (j = 0; j < adapter->num_rx_queues; j++) {
+			queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
+			for (k = 0; k < stat_count; k++)
+				data[i + k] = queue_stat[k];
+			i += k;
+		}
+	}
+#endif
+/*	BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+                              u8 *data)
+{
+#ifdef CONFIG_E1000_MQ
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+#endif
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *e1000_gstrings_test,
+			E1000_TEST_LEN*ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, e1000_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+#ifdef CONFIG_E1000_MQ
+		if (adapter->num_tx_queues > 1) {
+			for (i = 0; i < adapter->num_tx_queues; i++) {
+				sprintf(p, "tx_queue_%u_packets", i);
+				p += ETH_GSTRING_LEN;
+				sprintf(p, "tx_queue_%u_bytes", i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		if (adapter->num_rx_queues > 1) {
+			for (i = 0; i < adapter->num_rx_queues; i++) {
+				sprintf(p, "rx_queue_%u_packets", i);
+				p += ETH_GSTRING_LEN;
+				sprintf(p, "rx_queue_%u_bytes", i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+#endif
+/*		BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+static struct ethtool_ops e1000_ethtool_ops = {
+	.get_settings           = e1000_get_settings,
+	.set_settings           = e1000_set_settings,
+	.get_drvinfo            = e1000_get_drvinfo,
+	.get_regs_len           = e1000_get_regs_len,
+	.get_regs               = e1000_get_regs,
+	.get_wol                = e1000_get_wol,
+	.set_wol                = e1000_set_wol,
+	.get_msglevel           = e1000_get_msglevel,
+	.set_msglevel           = e1000_set_msglevel,
+	.nway_reset             = e1000_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_eeprom_len         = e1000_get_eeprom_len,
+	.get_eeprom             = e1000_get_eeprom,
+	.set_eeprom             = e1000_set_eeprom,
+	.get_ringparam          = e1000_get_ringparam,
+	.set_ringparam          = e1000_set_ringparam,
+	.get_pauseparam         = e1000_get_pauseparam,
+	.set_pauseparam         = e1000_set_pauseparam,
+	.get_rx_csum            = e1000_get_rx_csum,
+	.set_rx_csum            = e1000_set_rx_csum,
+	.get_tx_csum            = e1000_get_tx_csum,
+	.set_tx_csum            = e1000_set_tx_csum,
+	.get_sg                 = ethtool_op_get_sg,
+	.set_sg                 = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso                = ethtool_op_get_tso,
+	.set_tso                = e1000_set_tso,
+#endif
+	.self_test_count        = e1000_diag_test_count,
+	.self_test              = e1000_diag_test,
+	.get_strings            = e1000_get_strings,
+	.phys_id                = e1000_phys_id,
+	.get_stats_count        = e1000_get_stats_count,
+	.get_ethtool_stats      = e1000_get_ethtool_stats,
+#ifdef ETHTOOL_GPERMADDR
+	.get_perm_addr          = ethtool_op_get_perm_addr,
+#endif
+	.get_coalesce           = e1000_get_coalesce,
+	.set_coalesce           = e1000_set_coalesce,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
+#endif	/* SIOCETHTOOL */
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/Makefile	2022-03-21 12:58:29.779885158 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000_NEW) += rt_e1000_new.o
+
+rt_e1000_new-y := \
+	e1000_80003es2lan.o \
+	e1000_82540.o \
+	e1000_82541.o \
+	e1000_82542.o \
+	e1000_82543.o \
+	e1000_82571.o \
+	e1000_api.o \
+	e1000_ich8lan.o \
+	e1000_mac.o \
+	e1000_main.o \
+	e1000_manage.o \
+	e1000_nvm.o \
+	e1000_param.o \
+	e1000_phy.o
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c	2022-03-21 12:58:29.774885207 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/netdevice.h>
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when e1000_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define E1000_PARAM(X, desc) \
+	static const int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \
+	MODULE_PARM_DESC(X, desc);
+#else
+#define E1000_PARAM(X, desc) \
+	static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	static unsigned int num_##X = 0; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *        on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *  Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV                   0
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV                   0
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *   hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV                   0
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR                    0
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int e1000_validate_option(unsigned int *value,
+                                           const struct e1000_option *opt,
+                                           struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void e1000_check_options(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int bd = adapter->bd_number;
+	if (bd >= E1000_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = E1000_MAX_NIC;
+#endif
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_TXD),
+			.def  = E1000_DEFAULT_TXD,
+			.arg  = { .r = { .min = E1000_MIN_TXD }}
+		};
+		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+		int i;
+		opt.arg.r.max = hw->mac.type < e1000_82544 ?
+			E1000_MAX_TXD : E1000_MAX_82544_TXD;
+
+#ifdef module_param_array
+		if (num_TxDescriptors > bd) {
+#endif
+			tx_ring->count = TxDescriptors[bd];
+			e1000_validate_option(&tx_ring->count, &opt, adapter);
+			tx_ring->count = ALIGN(tx_ring->count,
+			                       REQ_TX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			tx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_RXD),
+			.def  = E1000_DEFAULT_RXD,
+			.arg  = { .r = { .min = E1000_MIN_RXD }}
+		};
+		struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+		int i;
+		opt.arg.r.max = hw->mac.type < e1000_82544 ? E1000_MAX_RXD :
+			E1000_MAX_82544_RXD;
+
+#ifdef module_param_array
+		if (num_RxDescriptors > bd) {
+#endif
+			rx_ring->count = RxDescriptors[bd];
+			e1000_validate_option(&rx_ring->count, &opt, adapter);
+			rx_ring->count = ALIGN(rx_ring->count,
+			                       REQ_RX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			rx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_XsumRX > bd) {
+#endif
+			unsigned int rx_csum = XsumRX[bd];
+			e1000_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+#ifdef module_param_array
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+#endif
+	}
+	{ /* Flow Control */
+
+		struct e1000_opt_list fc_list[] =
+			{{ e1000_fc_none,    "Flow Control Disabled" },
+			 { e1000_fc_rx_pause,"Flow Control Receive Only" },
+			 { e1000_fc_tx_pause,"Flow Control Transmit Only" },
+			 { e1000_fc_full,    "Flow Control Enabled" },
+			 { e1000_fc_default, "Flow Control Hardware Default" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = e1000_fc_default,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+#ifdef module_param_array
+		if (num_FlowControl > bd) {
+#endif
+			unsigned int fc = FlowControl[bd];
+			e1000_validate_option(&fc, &opt, adapter);
+			hw->fc.original_type = fc;
+			hw->fc.type = fc;
+#ifdef module_param_array
+		} else {
+			hw->fc.original_type = opt.def;
+			hw->fc.type = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxIntDelay > bd) {
+#endif
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxAbsIntDelay > bd) {
+#endif
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY }}
+		};
+
+		/* modify min and default if 82573 for slow ping w/a,
+		 * a value greater than 8 needs to be set for RDTR */
+
+#ifdef module_param_array
+		if (num_RxIntDelay > bd) {
+#endif
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_RxAbsIntDelay > bd) {
+#endif
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Interrupt Throttling Rate */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+#ifdef module_param_array
+		if (num_InterruptThrottleRate > bd) {
+#endif
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+				        opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				DPRINTK(PROBE, INFO,
+				        "%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			default:
+				e1000_validate_option(&adapter->itr, &opt,
+				        adapter);
+				/* save the setting, because the dynamic bits change itr */
+				/* clear the lower two bits because they are
+				 * used as control */
+				adapter->itr_setting = adapter->itr & ~3;
+				break;
+			}
+#ifdef module_param_array
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 20000;
+		}
+#endif
+	}
+	{ /* Smart Power Down */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+#ifdef module_param_array
+		if (num_SmartPowerDownEnable > bd) {
+#endif
+			unsigned int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			adapter->flags |= spd ? E1000_FLAG_SMART_POWER_DOWN : 0;
+#ifdef module_param_array
+		} else {
+			adapter->flags &= ~E1000_FLAG_SMART_POWER_DOWN;
+		}
+#endif
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_KumeranLockLoss > bd) {
+#endif
+			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			if (hw->mac.type == e1000_ich8lan)
+				e1000_set_kmrn_lock_loss_workaround_ich8lan(hw,
+				                                kmrn_lock_loss);
+#ifdef module_param_array
+		} else {
+			if (hw->mac.type == e1000_ich8lan)
+				e1000_set_kmrn_lock_loss_workaround_ich8lan(hw,
+				                                       opt.def);
+		}
+#endif
+	}
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		e1000_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		e1000_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+static void e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+	if ((Speed[bd] != OPTION_UNSET)) {
+#else
+	if (num_Speed > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((Duplex[bd] != OPTION_UNSET)) {
+#else
+	if (num_Duplex > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
+#else
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+#endif
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+static void e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int speed, dplx, an;
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+#endif
+
+	{ /* Speed */
+		struct e1000_opt_list speed_list[] = {{          0, "" },
+						      {   SPEED_10, "" },
+						      {  SPEED_100, "" },
+						      { SPEED_1000, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+#ifdef module_param_array
+		if (num_Speed > bd) {
+#endif
+			speed = Speed[bd];
+			e1000_validate_option(&speed, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			speed = opt.def;
+		}
+#endif
+	}
+	{ /* Duplex */
+		struct e1000_opt_list dplx_list[] = {{           0, "" },
+						     { HALF_DUPLEX, "" },
+						     { FULL_DUPLEX, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_reset_block(hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+#ifdef module_param_array
+		if (num_Duplex > bd) {
+#endif
+			dplx = Duplex[bd];
+			e1000_validate_option(&dplx, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			dplx = opt.def;
+		}
+#endif
+	}
+
+#ifdef module_param_array
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+#else
+	if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
+#endif
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		struct e1000_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+#ifdef module_param_array
+		if (num_AutoNeg > bd) {
+#endif
+			an = AutoNeg[bd];
+			e1000_validate_option(&an, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			an = opt.def;
+		}
+#endif
+		hw->phy.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+#ifdef module_param_array
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+#else
+		if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
+#endif
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_10_HALF |
+		                             ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_10_FULL |
+		                             ADVERTISE_100_FULL |
+		                             ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_10_HALF |
+		                             ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_10_HALF;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_10_FULL;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_100_HALF |
+		                             ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_100_HALF;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		goto full_duplex_only;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		fallthrough;
+	case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h	2022-03-21 12:58:29.768885265 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82542                    0x1000
+#define E1000_DEV_ID_82543GC_FIBER            0x1001
+#define E1000_DEV_ID_82543GC_COPPER           0x1004
+#define E1000_DEV_ID_82544EI_COPPER           0x1008
+#define E1000_DEV_ID_82544EI_FIBER            0x1009
+#define E1000_DEV_ID_82544GC_COPPER           0x100C
+#define E1000_DEV_ID_82544GC_LOM              0x100D
+#define E1000_DEV_ID_82540EM                  0x100E
+#define E1000_DEV_ID_82540EM_LOM              0x1015
+#define E1000_DEV_ID_82540EP_LOM              0x1016
+#define E1000_DEV_ID_82540EP                  0x1017
+#define E1000_DEV_ID_82540EP_LP               0x101E
+#define E1000_DEV_ID_82545EM_COPPER           0x100F
+#define E1000_DEV_ID_82545EM_FIBER            0x1011
+#define E1000_DEV_ID_82545GM_COPPER           0x1026
+#define E1000_DEV_ID_82545GM_FIBER            0x1027
+#define E1000_DEV_ID_82545GM_SERDES           0x1028
+#define E1000_DEV_ID_82546EB_COPPER           0x1010
+#define E1000_DEV_ID_82546EB_FIBER            0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER      0x101D
+#define E1000_DEV_ID_82546GB_COPPER           0x1079
+#define E1000_DEV_ID_82546GB_FIBER            0x107A
+#define E1000_DEV_ID_82546GB_SERDES           0x107B
+#define E1000_DEV_ID_82546GB_PCIE             0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER      0x1099
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_82541EI                  0x1013
+#define E1000_DEV_ID_82541EI_MOBILE           0x1018
+#define E1000_DEV_ID_82541ER_LOM              0x1014
+#define E1000_DEV_ID_82541ER                  0x1078
+#define E1000_DEV_ID_82541GI                  0x1076
+#define E1000_DEV_ID_82541GI_LF               0x107C
+#define E1000_DEV_ID_82541GI_MOBILE           0x1077
+#define E1000_DEV_ID_82547EI                  0x1019
+#define E1000_DEV_ID_82547EI_MOBILE           0x101A
+#define E1000_DEV_ID_82547GI                  0x1075
+#define E1000_DEV_ID_82571EB_COPPER           0x105E
+#define E1000_DEV_ID_82571EB_FIBER            0x105F
+#define E1000_DEV_ID_82571EB_SERDES           0x1060
+#define E1000_DEV_ID_82571EB_SERDES_DUAL      0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD      0x10DA
+#define E1000_DEV_ID_82571EB_QUAD_COPPER      0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER      0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER       0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP   0x10BC
+#define E1000_DEV_ID_82572EI_COPPER           0x107D
+#define E1000_DEV_ID_82572EI_FIBER            0x107E
+#define E1000_DEV_ID_82572EI_SERDES           0x107F
+#define E1000_DEV_ID_82572EI                  0x10B9
+#define E1000_DEV_ID_82573E                   0x108B
+#define E1000_DEV_ID_82573E_IAMT              0x108C
+#define E1000_DEV_ID_82573L                   0x109A
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT   0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT   0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT   0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT   0x10BB
+#define E1000_DEV_ID_ICH8_IGP_M_AMT           0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT             0x104A
+#define E1000_DEV_ID_ICH8_IGP_C               0x104B
+#define E1000_DEV_ID_ICH8_IFE                 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT              0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G               0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M               0x104D
+#define E1000_DEV_ID_ICH9_IGP_AMT             0x10BD
+#define E1000_DEV_ID_ICH9_IGP_C               0x294C
+#define E1000_DEV_ID_ICH9_IFE                 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT              0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G               0x10C2
+
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+
+typedef enum {
+	e1000_undefined = 0,
+	e1000_82542,
+	e1000_82543,
+	e1000_82544,
+	e1000_82540,
+	e1000_82545,
+	e1000_82545_rev_3,
+	e1000_82546,
+	e1000_82546_rev_3,
+	e1000_82541,
+	e1000_82541_rev_2,
+	e1000_82547,
+	e1000_82547_rev_2,
+	e1000_82571,
+	e1000_82572,
+	e1000_82573,
+	e1000_80003es2lan,
+	e1000_ich8lan,
+	e1000_ich9lan,
+	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
+} e1000_mac_type;
+
+typedef enum {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_eeprom_microwire,
+	e1000_nvm_flash_hw,
+	e1000_nvm_flash_sw
+} e1000_nvm_type;
+
+typedef enum {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large,
+	e1000_nvm_override_microwire_small,
+	e1000_nvm_override_microwire_large
+} e1000_nvm_override;
+
+typedef enum {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+} e1000_phy_type;
+
+typedef enum {
+	e1000_bus_type_unknown = 0,
+	e1000_bus_type_pci,
+	e1000_bus_type_pcix,
+	e1000_bus_type_pci_express,
+	e1000_bus_type_reserved
+} e1000_bus_type;
+
+typedef enum {
+	e1000_bus_speed_unknown = 0,
+	e1000_bus_speed_33,
+	e1000_bus_speed_66,
+	e1000_bus_speed_100,
+	e1000_bus_speed_120,
+	e1000_bus_speed_133,
+	e1000_bus_speed_2500,
+	e1000_bus_speed_5000,
+	e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+typedef enum {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_pcie_x8 = 8,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+} e1000_bus_width;
+
+typedef enum {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+} e1000_fc_type;
+
+typedef enum {
+	e1000_ffe_config_enabled = 0,
+	e1000_ffe_config_active,
+	e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+	e1000_dsp_config_disabled = 0,
+	e1000_dsp_config_enabled,
+	e1000_dsp_config_activated,
+	e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+	u64 buffer_addr; /* Address of the descriptor's data buffer */
+	u16 length;      /* Length of data DMAed into data buffer */
+	u16 csum;        /* Packet checksum */
+	u8  status;      /* Descriptor status */
+	u8  errors;      /* Descriptor Errors */
+	u16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+	struct {
+		u64 buffer_addr;
+		u64 reserved;
+	} read;
+	struct {
+		struct {
+			u32 mrq;              /* Multiple Rx Queues */
+			union {
+				u32 rss;            /* RSS Hash */
+				struct {
+					u16 ip_id;  /* IP id */
+					u16 csum;   /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length;
+			u16 vlan;             /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+	struct {
+		/* one buffer for protocol header(s), three data buffers */
+		u64 buffer_addr[MAX_PS_BUFFERS];
+	} read;
+	struct {
+		struct {
+			u32 mrq;              /* Multiple Rx Queues */
+			union {
+				u32 rss;              /* RSS Hash */
+				struct {
+					u16 ip_id;    /* IP id */
+					u16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length0;          /* length of buffer 0 */
+			u16 vlan;             /* VLAN tag */
+		} middle;
+		struct {
+			u16 header_status;
+			u16 length[3];        /* length of buffers 1-3 */
+		} upper;
+		u64 reserved;
+	} wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+	u64 buffer_addr;      /* Address of the descriptor's data buffer */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 cso;        /* Checksum offset */
+			u8 cmd;        /* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 css;        /* Checksum start */
+			u16 special;
+		} fields;
+	} upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+	union {
+		u32 ip_config;
+		struct {
+			u8 ipcss;      /* IP checksum start */
+			u8 ipcso;      /* IP checksum offset */
+			u16 ipcse;     /* IP checksum end */
+		} ip_fields;
+	} lower_setup;
+	union {
+		u32 tcp_config;
+		struct {
+			u8 tucss;      /* TCP checksum start */
+			u8 tucso;      /* TCP checksum offset */
+			u16 tucse;     /* TCP checksum end */
+		} tcp_fields;
+	} upper_setup;
+	u32 cmd_and_length;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 hdr_len;    /* Header length */
+			u16 mss;       /* Maximum segment size */
+		} fields;
+	} tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+	u64 buffer_addr;   /* Address of the descriptor's buffer address */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 typ_len_ext;
+			u8 cmd;
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 popts;      /* Packet Options */
+			u16 special;
+		} fields;
+	} upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 tor;
+	u64 tot;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+	u64 cbtmpc;
+	u64 htdpmc;
+	u64 cbrdpc;
+	u64 cbrmpc;
+	u64 rpthc;
+	u64 hgptc;
+	u64 htcbdpc;
+	u64 hgorc;
+	u64 hgotc;
+	u64 lenerrs;
+	u64 scvpc;
+	u64 hrmpc;
+};
+
+struct e1000_phy_stats {
+	u32 idle_errors;
+	u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+
+struct e1000_functions {
+	/* Function pointers for the MAC. */
+	s32  (*init_mac_params)(struct e1000_hw *);
+	s32  (*blink_led)(struct e1000_hw *);
+	s32  (*check_for_link)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *hw);
+	s32  (*cleanup_led)(struct e1000_hw *);
+	void (*clear_hw_cntrs)(struct e1000_hw *);
+	void (*clear_vfta)(struct e1000_hw *);
+	s32  (*get_bus_info)(struct e1000_hw *);
+	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+	s32  (*led_on)(struct e1000_hw *);
+	s32  (*led_off)(struct e1000_hw *);
+	void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32,
+	                            u32);
+	void (*remove_device)(struct e1000_hw *);
+	s32  (*reset_hw)(struct e1000_hw *);
+	s32  (*init_hw)(struct e1000_hw *);
+	s32  (*setup_link)(struct e1000_hw *);
+	s32  (*setup_physical_interface)(struct e1000_hw *);
+	s32  (*setup_led)(struct e1000_hw *);
+	void (*write_vfta)(struct e1000_hw *, u32, u32);
+	void (*mta_set)(struct e1000_hw *, u32);
+	void (*config_collision_dist)(struct e1000_hw*);
+	void (*rar_set)(struct e1000_hw*, u8*, u32);
+	s32  (*read_mac_addr)(struct e1000_hw*);
+	s32  (*validate_mdi_setting)(struct e1000_hw*);
+	s32  (*mng_host_if_write)(struct e1000_hw*, u8*, u16, u16, u8*);
+	s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
+                      struct e1000_host_mng_command_header*);
+	s32  (*mng_enable_host_if)(struct e1000_hw*);
+	s32  (*wait_autoneg)(struct e1000_hw*);
+
+	/* Function pointers for the PHY. */
+	s32  (*init_phy_params)(struct e1000_hw *);
+	s32  (*acquire_phy)(struct e1000_hw *);
+	s32  (*check_polarity)(struct e1000_hw *);
+	s32  (*check_reset_block)(struct e1000_hw *);
+	s32  (*commit_phy)(struct e1000_hw *);
+	s32  (*force_speed_duplex)(struct e1000_hw *);
+	s32  (*get_cfg_done)(struct e1000_hw *hw);
+	s32  (*get_cable_length)(struct e1000_hw *);
+	s32  (*get_phy_info)(struct e1000_hw *);
+	s32  (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
+	void (*release_phy)(struct e1000_hw *);
+	s32  (*reset_phy)(struct e1000_hw *);
+	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32  (*write_phy_reg)(struct e1000_hw *, u32, u16);
+	void (*power_up_phy)(struct e1000_hw *);
+	void (*power_down_phy)(struct e1000_hw *);
+
+	/* Function pointers for the NVM. */
+	s32  (*init_nvm_params)(struct e1000_hw *);
+	s32  (*acquire_nvm)(struct e1000_hw *);
+	s32  (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release_nvm)(struct e1000_hw *);
+	void (*reload_nvm)(struct e1000_hw *);
+	s32  (*update_nvm)(struct e1000_hw *);
+	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+	s32  (*validate_nvm)(struct e1000_hw *);
+	s32  (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+	u8 addr[6];
+	u8 perm_addr[6];
+
+	e1000_mac_type type;
+
+	u32 collision_delta;
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 mc_filter_type;
+	u32 tx_packet_delta;
+	u32 txcw;
+
+	u16 current_ifs_val;
+	u16 ifs_max_val;
+	u16 ifs_min_val;
+	u16 ifs_ratio;
+	u16 ifs_step_size;
+	u16 mta_reg_count;
+	u16 rar_entry_count;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool arc_subsystem_valid;
+	bool asf_firmware_present;
+	bool autoneg;
+	bool autoneg_failed;
+	bool disable_av;
+	bool disable_hw_init_bits;
+	bool get_link_status;
+	bool ifs_params_forced;
+	bool in_ifs_mode;
+	bool report_tx_early;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+	e1000_phy_type type;
+
+	e1000_1000t_rx_status local_rx;
+	e1000_1000t_rx_status remote_rx;
+	e1000_ms_type ms_type;
+	e1000_ms_type original_ms_type;
+	e1000_rev_polarity cable_polarity;
+	e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	e1000_media_type media_type;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool reset_disable;
+	bool speed_downgraded;
+	bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+	e1000_nvm_type type;
+	e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	e1000_bus_type type;
+	e1000_bus_speed speed;
+	e1000_bus_width width;
+
+	u32 snoop;
+
+	u16 func;
+	u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+	u32 high_water;     /* Flow control high-water mark */
+	u32 low_water;      /* Flow control low-water mark */
+	u16 pause_time;     /* Flow control pause timer */
+	bool send_xon;      /* Flow control send XON */
+	bool strict_ieee;   /* Strict IEEE mode */
+	e1000_fc_type type; /* Type of flow control */
+	e1000_fc_type original_type;
+};
+
+struct e1000_hw {
+	void *back;
+	void *dev_spec;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+	unsigned long io_base;
+
+	struct e1000_functions func;
+	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	u32 dev_spec_size;
+
+	u16 device_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_device_id;
+	u16 vendor_id;
+
+	u8  revision_id;
+};
+
+/* These functions must be implemented by drivers */
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+s32  e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size);
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_free_dev_spec_struct(struct e1000_hw *hw);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c	2022-03-21 12:58:29.762885324 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82543
+ * e1000_82544
+ */
+
+#include "e1000_api.h"
+#include "e1000_82543.h"
+
+static s32  e1000_init_phy_params_82543(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82543(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82543(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+                                     u16 *data);
+static s32  e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+                                      u16 data);
+static s32  e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_82543(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82543(struct e1000_hw *hw);
+static s32  e1000_init_hw_82543(struct e1000_hw *hw);
+static s32  e1000_setup_link_82543(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82543(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_link_82543(struct e1000_hw *hw);
+static s32  e1000_check_for_copper_link_82543(struct e1000_hw *hw);
+static s32  e1000_check_for_fiber_link_82543(struct e1000_hw *hw);
+static s32  e1000_led_on_82543(struct e1000_hw *hw);
+static s32  e1000_led_off_82543(struct e1000_hw *hw);
+static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
+                                   u32 value);
+static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value);
+static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
+static s32  e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
+static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
+static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+static s32  e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+static u16  e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw);
+static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+                                           u16 count);
+static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw);
+static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state);
+
+struct e1000_dev_spec_82543 {
+	u32  tbi_compatibility;
+	bool dma_fairness;
+	bool init_phy_disabled;
+};
+
+/**
+ *  e1000_init_phy_params_82543 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82543(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82543");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type               = e1000_phy_none;
+		goto out;
+	} else {
+		func->power_up_phy      = e1000_power_up_phy_copper;
+		func->power_down_phy    = e1000_power_down_phy_copper;
+	}
+
+	phy->addr                       = 1;
+	phy->autoneg_mask               = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us             = 10000;
+	phy->type                       = e1000_phy_m88;
+
+	/* Function Pointers */
+	func->check_polarity            = e1000_check_polarity_m88;
+	func->commit_phy                = e1000_phy_sw_reset_generic;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_82543;
+	func->get_cable_length          = e1000_get_cable_length_m88;
+	func->get_cfg_done              = e1000_get_cfg_done_generic;
+	func->read_phy_reg              = (hw->mac.type == e1000_82543)
+	                                  ? e1000_read_phy_reg_82543
+	                                  : e1000_read_phy_reg_m88;
+	func->reset_phy                 = (hw->mac.type == e1000_82543)
+	                                  ? e1000_phy_hw_reset_82543
+	                                  : e1000_phy_hw_reset_generic;
+	func->write_phy_reg             = (hw->mac.type == e1000_82543)
+	                                  ? e1000_write_phy_reg_82543
+	                                  : e1000_write_phy_reg_m88;
+	func->get_phy_info              = e1000_get_phy_info_m88;
+
+	/*
+	 * The external PHY of the 82543 can be in a funky state.
+	 * Resetting helps us read the PHY registers for acquiring
+	 * the PHY ID.
+	 */
+	if (!e1000_init_phy_disabled_82543(hw)) {
+		ret_val = e1000_phy_hw_reset(hw);
+		if (ret_val) {
+			DEBUGOUT("Resetting PHY during init failed.\n");
+			goto out;
+		}
+		msec_delay(20);
+	}
+
+	ret_val = e1000_get_phy_id(hw);
+	if (ret_val)
+		goto out;
+
+	/* Verify phy id */
+	switch (hw->mac.type) {
+	case e1000_82543:
+		if (phy->id != M88E1000_E_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		break;
+	case e1000_82544:
+		if (phy->id != M88E1000_I_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82543 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+
+	DEBUGFUNC("e1000_init_nvm_params_82543");
+
+	nvm->type               = e1000_nvm_eeprom_microwire;
+	nvm->word_size          = 64;
+	nvm->delay_usec         = 50;
+	nvm->address_bits       =  6;
+	nvm->opcode_bits        =  3;
+
+	/* Function Pointers */
+	func->read_nvm          = e1000_read_nvm_microwire;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->valid_led_default = e1000_valid_led_default_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+	func->write_nvm         = e1000_write_nvm_microwire;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82543 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_init_mac_params_82543");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82544EI_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pci_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82543;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82543;
+	/* link setup */
+	func->setup_link = e1000_setup_link_82543;
+	/* physical interface setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82543
+	                : e1000_setup_fiber_link_82543;
+	/* check for link */
+	func->check_for_link =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_check_for_copper_link_82543
+	                : e1000_check_for_fiber_link_82543;
+	/* link info */
+	func->get_link_up_info =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_get_speed_and_duplex_copper_generic
+	                : e1000_get_speed_and_duplex_fiber_serdes_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_82543;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_82543;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_82543;
+	func->led_off = e1000_led_off_82543;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82543;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82543);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+	if (ret_val)
+		goto out;
+
+	/* Set tbi compatibility */
+	if ((hw->mac.type != e1000_82543) ||
+	    (hw->phy.media_type == e1000_media_type_fiber))
+		e1000_set_tbi_compatibility_82543(hw, FALSE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82543 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82543(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82543");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82543;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82543;
+	hw->func.init_phy_params = e1000_init_phy_params_82543;
+}
+
+/**
+ *  e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns the curent status of 10-bit Interface (TBI) compatibility
+ *  (enabled/disabled).
+ **/
+static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+	bool state = FALSE;
+
+	DEBUGFUNC("e1000_tbi_compatibility_enabled_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	state = (dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED)
+	        ? TRUE : FALSE;
+
+out:
+	return state;
+}
+
+/**
+ *  e1000_set_tbi_compatibility_82543 - Set TBI compatibility
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable TBI compatibility
+ *
+ *  Enables or disabled 10-bit Interface (TBI) compatibility.
+ **/
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+
+	DEBUGFUNC("e1000_set_tbi_compatibility_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	if (state)
+		dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED;
+	else
+		dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED;
+
+out:
+	return;
+}
+
+/**
+ *  e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns the curent status of 10-bit Interface (TBI) store bad packet (SBP)
+ *  (enabled/disabled).
+ **/
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+	bool state = FALSE;
+
+	DEBUGFUNC("e1000_tbi_sbp_enabled_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	state = (dev_spec->tbi_compatibility & TBI_SBP_ENABLED)
+	        ? TRUE : FALSE;
+
+out:
+	return state;
+}
+
+/**
+ *  e1000_set_tbi_sbp_82543 - Set TBI SBP
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable TBI store bad packet
+ *
+ *  Enables or disabled 10-bit Interface (TBI) store bad packet (SBP).
+ **/
+static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+
+	DEBUGFUNC("e1000_set_tbi_sbp_82543");
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (state && e1000_tbi_compatibility_enabled_82543(hw))
+		dev_spec->tbi_compatibility |= TBI_SBP_ENABLED;
+	else
+		dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED;
+
+	return;
+}
+
+/**
+ *  e1000_init_phy_disabled_82543 - Returns init PHY status
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns the current status of whether PHY initialization is disabled.
+ *  True if PHY initialization is disabled else false.
+ **/
+static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+	bool ret_val;
+
+	DEBUGFUNC("e1000_init_phy_disabled_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		ret_val = FALSE;
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = FALSE;
+		goto out;
+	}
+
+	ret_val = dev_spec->init_phy_disabled;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
+ *  @hw: pointer to the HW structure
+ *  @stats: Struct containing statistic register values
+ *  @frame_len: The length of the frame in question
+ *  @mac_addr: The Ethernet destination address of the frame in question
+ *  @max_frame_size: The maximum frame size
+ *
+ *  Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ **/
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+                                  struct e1000_hw_stats *stats, u32 frame_len,
+                                  u8 *mac_addr, u32 max_frame_size)
+{
+	if (!(e1000_tbi_sbp_enabled_82543(hw)))
+		goto out;
+
+	/* First adjust the frame length. */
+	frame_len--;
+	/*
+	 * We need to adjust the statistics counters, since the hardware
+	 * counters overcount this packet as a CRC error and undercount
+	 * the packet as a good packet
+	 */
+	/* This packet should not be counted as a CRC error.    */
+	stats->crcerrs--;
+	/* This packet does count as a Good Packet Received.    */
+	stats->gprc++;
+
+	/* Adjust the Good Octets received counters             */
+	stats->gorc += frame_len;
+
+	/*
+	 * Is this a broadcast or multicast?  Check broadcast first,
+	 * since the test for a multicast frame will test positive on
+	 * a broadcast frame.
+	 */
+	if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
+		/* Broadcast packet */
+		stats->bprc++;
+	else if (*mac_addr & 0x01)
+		/* Multicast packet */
+		stats->mprc++;
+
+	/*
+	 * In this case, the hardware has overcounted the number of
+	 * oversize frames.
+	 */
+	if ((frame_len == max_frame_size) && (stats->roc > 0))
+		stats->roc--;
+
+	/*
+	 * Adjust the bin counters when the extra byte put the frame in the
+	 * wrong bin. Remember that the frame_len was adjusted above.
+	 */
+	if (frame_len == 64) {
+		stats->prc64++;
+		stats->prc127--;
+	} else if (frame_len == 127) {
+		stats->prc127++;
+		stats->prc255--;
+	} else if (frame_len == 255) {
+		stats->prc255++;
+		stats->prc511--;
+	} else if (frame_len == 511) {
+		stats->prc511++;
+		stats->prc1023--;
+	} else if (frame_len == 1023) {
+		stats->prc1023++;
+		stats->prc1522--;
+	} else if (frame_len == 1522) {
+		stats->prc1522++;
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_read_phy_reg_82543 - Read PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY at offset and stores the information read to data.
+ **/
+static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	u32 mdic;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_phy_reg_82543");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * We must first send a preamble through the MDIO pin to signal the
+	 * beginning of an MII instruction.  This is done by sending 32
+	 * consecutive "1" bits.
+	 */
+	e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+	/*
+	 * Now combine the next few fields that are required for a read
+	 * operation.  We use this method instead of calling the
+	 * e1000_shift_out_mdi_bits routine five different times.  The format
+	 * of an MII read instruction consists of a shift out of 14 bits and
+	 * is defined as follows:
+	 * 	<Preamble><SOF><Op Code><Phy Addr><Offset>
+	 * followed by a shift in of 18 bits.  This first two bits shifted in
+	 * are TurnAround bits used to avoid contention on the MDIO pin when a
+	 * READ operation is performed.  These two bits are thrown away
+	 * followed by a shift in of 16 bits which contains the desired data.
+	 */
+	mdic = (offset | (hw->phy.addr << 5) |
+		(PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+	e1000_shift_out_mdi_bits_82543(hw, mdic, 14);
+
+	/*
+	 * Now that we've shifted out the read command to the MII, we need to
+	 * "shift in" the 16-bit value (18 total bits) of the requested PHY
+	 * register address.
+	 */
+	*data = e1000_shift_in_mdi_bits_82543(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_82543 - Write PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be written
+ *  @data: pointer to the data to be written at offset
+ *
+ *  Writes data to the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	u32 mdic;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_phy_reg_82543");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * We'll need to use the SW defined pins to shift the write command
+	 * out to the PHY. We first send a preamble to the PHY to signal the
+	 * beginning of the MII instruction.  This is done by sending 32
+	 * consecutive "1" bits.
+	 */
+	e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+	/*
+	 * Now combine the remaining required fields that will indicate a
+	 * write operation. We use this method instead of calling the
+	 * e1000_shift_out_mdi_bits routine for each field in the command. The
+	 * format of a MII write instruction is as follows:
+	 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+	 */
+	mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) |
+	        (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+	mdic <<= 16;
+	mdic |= (u32) data;
+
+	e1000_shift_out_mdi_bits_82543(hw, mdic, 32);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_raise_mdi_clk_82543 - Raise Management Data Input clock
+ *  @hw: pointer to the HW structure
+ *  @ctrl: pointer to the control register
+ *
+ *  Raise the management data input clock by setting the MDC bit in the control
+ *  register.
+ **/
+static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+	/*
+	 * Raise the clock input to the Management Data Clock (by setting the
+	 * MDC bit), and then delay a sufficient amount of time.
+	 */
+	E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC));
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(10);
+}
+
+/**
+ *  e1000_lower_mdi_clk_82543 - Lower Management Data Input clock
+ *  @hw: pointer to the HW structure
+ *  @ctrl: pointer to the control register
+ *
+ *  Lower the management data input clock by clearing the MDC bit in the
+ *  control register.
+ **/
+static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+	/*
+	 * Lower the clock input to the Management Data Clock (by clearing the
+	 * MDC bit), and then delay a sufficient amount of time.
+	 */
+	E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC));
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(10);
+}
+
+/**
+ *  e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the PHY
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the PHY.  So, the value in the
+ *  "data" parameter will be shifted out to the PHY one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+                                           u16 count)
+{
+	u32 ctrl, mask;
+
+	/*
+	 * We need to shift "count" number of bits out to the PHY.  So, the
+	 * value in the "data" parameter will be shifted out to the PHY one
+	 * bit at a time.  In order to do this, "data" must be broken down
+	 * into bits.
+	 */
+	mask = 0x01;
+	mask <<= (count -1);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+	ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+	while (mask) {
+		/*
+		 * A "1" is shifted out to the PHY by setting the MDIO bit to
+		 * "1" and then raising and lowering the Management Data Clock.
+		 * A "0" is shifted out to the PHY by setting the MDIO bit to
+		 * "0" and then raising and lowering the clock.
+		 */
+		if (data & mask) ctrl |= E1000_CTRL_MDIO;
+		else ctrl &= ~E1000_CTRL_MDIO;
+
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		E1000_WRITE_FLUSH(hw);
+
+		usec_delay(10);
+
+		e1000_raise_mdi_clk_82543(hw, &ctrl);
+		e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+		mask >>= 1;
+	}
+}
+
+/**
+ *  e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  In order to read a register from the PHY, we need to shift 18 bits
+ *  in from the PHY.  Bits are "shifted in" by raising the clock input to
+ *  the PHY (setting the MDC bit), and then reading the value of the data out
+ *  MDIO bit.
+ **/
+static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	u16 data = 0;
+	u8 i;
+
+	/*
+	 * In order to read a register from the PHY, we need to shift in a
+	 * total of 18 bits from the PHY.  The first two bit (turnaround)
+	 * times are used to avoid contention on the MDIO pin when a read
+	 * operation is performed.  These two bits are ignored by us and
+	 * thrown away.  Bits are "shifted in" by raising the input to the
+	 * Management Data Clock (setting the MDC bit) and then reading the
+	 * value of the MDIO bit.
+	 */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/*
+	 * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+	 * input.
+	 */
+	ctrl &= ~E1000_CTRL_MDIO_DIR;
+	ctrl &= ~E1000_CTRL_MDIO;
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Raise and lower the clock before reading in the data.  This accounts
+	 * for the turnaround bits.  The first clock occurred when we clocked
+	 * out the last bit of the Register Address.
+	 */
+	e1000_raise_mdi_clk_82543(hw, &ctrl);
+	e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+	for (data = 0, i = 0; i < 16; i++) {
+		data <<= 1;
+		e1000_raise_mdi_clk_82543(hw, &ctrl);
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		/* Check to see if we shifted in a "1". */
+		if (ctrl & E1000_CTRL_MDIO)
+			data |= 1;
+		e1000_lower_mdi_clk_82543(hw, &ctrl);
+	}
+
+	e1000_raise_mdi_clk_82543(hw, &ctrl);
+	e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+	return data;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the function to force speed and duplex for the m88 PHY, and
+ *  if the PHY is not auto-negotiating and the speed is forced to 10Mbit,
+ *  then call the function for polarity reversal workaround.
+ **/
+static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_82543");
+
+	ret_val = e1000_phy_force_speed_duplex_m88(hw);
+	if (ret_val)
+		goto out;
+
+	if (!hw->mac.autoneg &&
+	    (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED))
+		ret_val = e1000_polarity_reversal_workaround_82543(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal
+ *  @hw: pointer to the HW structure
+ *
+ *  When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity
+ *  inadvertantly.  To workaround the issue, we disable the transmitter on
+ *  the PHY until we have established the link partner's link parameters.
+ **/
+static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 mii_status_reg;
+	u16 i;
+	bool link;
+
+	/* Polarity reversal workaround for forced 10F/10H links. */
+
+	/* Disable the transmitter on the PHY */
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * This loop will early-out if the NO link condition has been met.
+	 * In other words, DO NOT use e1000_phy_has_link_generic() here.
+	 */
+	for (i = PHY_FORCE_TIME; i > 0; i--) {
+		/*
+		 * Read the MII Status Register and wait for Link Status bit
+		 * to be clear.
+		 */
+
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+			break;
+		msec_delay_irq(100);
+	}
+
+	/* Recommended delay time after link has been lost */
+	msec_delay_irq(1000);
+
+	/* Now we will re-enable the transmitter on the PHY */
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+	if (ret_val)
+		goto out;
+	msec_delay_irq(50);
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+	if (ret_val)
+		goto out;
+	msec_delay_irq(50);
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+	if (ret_val)
+		goto out;
+	msec_delay_irq(50);
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Read the MII Status Register and wait for Link Status bit
+	 * to be set.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link);
+	if (ret_val)
+		goto out;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_82543 - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the PHY_RESET_DIR bit in the extended device control register
+ *  to put the PHY into a reset and waits for completion.  Once the reset
+ *  has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out
+ *  of reset.  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw)
+{
+	struct e1000_functions *func = &hw->func;
+	u32 ctrl_ext;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_phy_hw_reset_82543");
+
+	/*
+	 * Read the Extended Device Control Register, assert the PHY_RESET_DIR
+	 * bit to put the PHY into reset...
+	 */
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	/* ...then take it out of reset. */
+	ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+
+	usec_delay(150);
+
+	ret_val = func->get_cfg_done(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_82543 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82543(struct e1000_hw *hw)
+{
+	u32 ctrl, icr;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_reset_hw_82543");
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	e1000_set_tbi_sbp_82543(hw, FALSE);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete before
+	 * resetting the device
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n");
+	if (hw->mac.type == e1000_82543) {
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+	} else {
+		/*
+		 * The 82544 can't ACK the 64-bit write when issuing the
+		 * reset, so use IO-mapping as a workaround.
+		 */
+		E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+	}
+
+	/*
+	 * After MAC reset, force reload of NVM to restore power-on
+	 * settings to device.
+	 */
+	e1000_reload_nvm(hw);
+	msec_delay(2);
+
+	/* Masking off and clearing any pending interrupts */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82543 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82543 *dev_spec;
+	u32 ctrl;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_82543");
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Disabling VLAN filtering */
+	E1000_WRITE_REG(hw, E1000_VET, 0);
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/*
+	 * Set the PCI priority bit correctly in the CTRL register.  This
+	 * determines if the adapter gives priority to receives, or if it
+	 * gives equal priority to transmits and receives.
+	 */
+	if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) {
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+	}
+
+	e1000_pcix_mmrbc_workaround_generic(hw);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82543(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_82543 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM to determine the initial polarity value and write the
+ *  extended device control register with the information before calling
+ *  the generic setup link function, which does the following:
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82543(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+	s32  ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_setup_link_82543");
+
+	/*
+	 * Take the 4 bits from NVM word 0xF that determine the initial
+	 * polarity value for the SW controlled pins, and setup the
+	 * Extended Device Control reg with that info.
+	 * This is needed because one of the SW controlled pins is used for
+	 * signal detection.  So this should be done before phy setup.
+	 */
+	if (hw->mac.type == e1000_82543) {
+		ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+		ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) <<
+		            NVM_SWDPIO_EXT_SHIFT);
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	}
+
+	ret_val = e1000_setup_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82543 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_setup_copper_link_82543");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU;
+	/*
+	 * With 82543, we need to force speed and duplex on the MAC
+	 * equal to what the PHY speed and duplex configuration is.
+	 * In addition, we need to perform a hardware reset on the
+	 * PHY to take it out of reset.
+	 */
+	if (hw->mac.type == e1000_82543) {
+		ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		ret_val = e1000_phy_hw_reset(hw);
+		if (ret_val)
+			goto out;
+		hw->phy.reset_disable = FALSE;
+	} else {
+		ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	}
+
+	/* Set MDI/MDI-X, Polarity Reversal, and downshift settings */
+	ret_val = e1000_copper_link_setup_m88(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		DEBUGOUT("Forcing Speed and Duplex\n");
+		ret_val = e1000_phy_force_speed_duplex_82543(hw);
+		if (ret_val) {
+			DEBUGOUT("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw,
+	                                     COPPER_LINK_UP_LIMIT,
+	                                     10,
+	                                     &link);
+	if (ret_val)
+		goto out;
+
+
+	if (link) {
+		DEBUGOUT("Valid link established!!!\n");
+		/* Config the MAC and PHY after link is up */
+		if (hw->mac.type == e1000_82544) {
+			e1000_config_collision_dist_generic(hw);
+		} else {
+			ret_val = e1000_config_mac_to_phy_82543(hw);
+			if (ret_val)
+				goto out;
+		}
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+	} else {
+		DEBUGOUT("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_link_82543 - Setup link for fiber
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber links.  Upon
+ *  successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_setup_fiber_link_82543");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Take the link out of reset */
+	ctrl &= ~E1000_CTRL_LRST;
+
+	e1000_config_collision_dist_generic(hw);
+
+	ret_val = e1000_commit_fc_settings_generic(hw);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT("Auto-negotiation enabled\n");
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+	msec_delay(1);
+
+	/*
+	 * For these adapters, the SW defineable pin 1 is cleared when the
+	 * optics detect a signal.  If we have a signal, then poll for a
+	 * "Link-Up" indication.
+	 */
+	if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+	} else {
+		DEBUGOUT("No signal detected\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_copper_link_82543 - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the phy for link, if link exists, do the following:
+ *   - check for downshift
+ *   - do polarity workaround (if necessary)
+ *   - configure collision distance
+ *   - configure flow control after link up
+ *   - configure tbi compatibility
+ **/
+static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 icr, rctl;
+	s32 ret_val;
+	u16 speed, duplex;
+	bool link;
+
+	DEBUGFUNC("e1000_check_for_copper_link_82543");
+
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = FALSE;
+
+	e1000_check_downshift_generic(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we can return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		/*
+		 * If speed and duplex are forced to 10H or 10F, then we will
+		 * implement the polarity reversal workaround.  We disable
+		 * interrupts first, and upon returning, place the devices
+		 * interrupt state to its previous value except for the link
+		 * status change interrupt which will happened due to the
+		 * execution of this workaround.
+		 */
+		if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) {
+			E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+			ret_val = e1000_polarity_reversal_workaround_82543(hw);
+			icr = E1000_READ_REG(hw, E1000_ICR);
+			E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC));
+			E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
+		}
+
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+	 * have Si on board that is 82544 or newer, Auto
+	 * Speed Detection takes care of MAC speed/duplex
+	 * configuration.  So we only need to configure Collision
+	 * Distance in the MAC.  Otherwise, we need to force
+	 * speed/duplex on the MAC to the current PHY speed/duplex
+	 * settings.
+	 */
+	if (mac->type == e1000_82544)
+		e1000_config_collision_dist_generic(hw);
+	else {
+		ret_val = e1000_config_mac_to_phy_82543(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring MAC to PHY settings\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error configuring flow control\n");
+	}
+
+	/*
+	 * At this point we know that we are on copper and we have
+	 * auto-negotiated link.  These are conditions for checking the link
+	 * partner capability register.  We use the link speed to determine if
+	 * TBI compatibility needs to be turned on or off.  If the link is not
+	 * at gigabit speed, then TBI compatibility is not needed.  If we are
+	 * at gigabit speed, we turn on TBI compatibility.
+	 */
+	if (e1000_tbi_compatibility_enabled_82543(hw)) {
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			DEBUGOUT("Error getting link speed and duplex\n");
+			return ret_val;
+		}
+		if (speed != SPEED_1000) {
+			/*
+			 * If link speed is not set to gigabit speed,
+			 * we do not need to enable TBI compatibility.
+			 */
+			if (e1000_tbi_sbp_enabled_82543(hw)) {
+				/*
+				 * If we previously were in the mode,
+				 * turn it off.
+				 */
+				e1000_set_tbi_sbp_82543(hw, FALSE);
+				rctl = E1000_READ_REG(hw, E1000_RCTL);
+				rctl &= ~E1000_RCTL_SBP;
+				E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+			}
+		} else {
+			/*
+			 * If TBI compatibility is was previously off,
+			 * turn it on. For compatibility with a TBI link
+			 * partner, we will store bad packets. Some
+			 * frames have an additional byte on the end and
+			 * will look like CRC errors to to the hardware.
+			 */
+			if (!e1000_tbi_sbp_enabled_82543(hw)) {
+				e1000_set_tbi_sbp_82543(hw, TRUE);
+				rctl = E1000_READ_REG(hw, E1000_RCTL);
+				rctl |= E1000_RCTL_SBP;
+				E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+			}
+		}
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_fiber_link_82543 - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw, ctrl, status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_check_for_fiber_link_82543");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), the cable is plugged in (we have signal),
+	 * and our link partner is not trying to auto-negotiate with us (we
+	 * are receiving idles or data), we need to force link up. We also
+	 * need to give auto-negotiation time to complete, in case the cable
+	 * was just plugged in. The autoneg_failed flag does this.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */
+	if ((!(ctrl & E1000_CTRL_SWDPIN1)) &&
+	    (!(status & E1000_STATUS_LU)) &&
+	    (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			ret_val = 0;
+			goto out;
+		}
+		DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring flow control\n");
+			goto out;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = TRUE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings
+ *  @hw: pointer to the HW structure
+ *
+ *  For the 82543 silicon, we need to set the MAC to match the settings
+ *  of the PHY, even if the PHY is auto-negotiating.
+ **/
+static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 phy_data;
+
+	DEBUGFUNC("e1000_config_mac_to_phy_82543");
+
+	/* Set the bits to force speed and duplex */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+	/*
+	 * Set up duplex in the Device Control and Transmit Control
+	 * registers depending on negotiated values.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	ctrl &= ~E1000_CTRL_FD;
+	if (phy_data & M88E1000_PSSR_DPLX)
+		ctrl |= E1000_CTRL_FD;
+
+	e1000_config_collision_dist_generic(hw);
+
+	/*
+	 * Set up speed in the Device Control register depending on
+	 * negotiated values.
+	 */
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+		ctrl |= E1000_CTRL_SPD_1000;
+	else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+		ctrl |= E1000_CTRL_SPD_100;
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_vfta_82543 - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table.
+ **/
+static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	u32 temp;
+
+	DEBUGFUNC("e1000_write_vfta_82543");
+
+	if ((hw->mac.type == e1000_82544) && (offset & 1)) {
+		temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1);
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp);
+		E1000_WRITE_FLUSH(hw);
+	} else {
+		e1000_write_vfta_generic(hw, offset, value);
+	}
+}
+
+/**
+ *  e1000_mta_set_82543 - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta, temp;
+
+	DEBUGFUNC("e1000_mta_set_82543");
+
+	hash_reg = (hash_value >> 5);
+
+	/*
+	 * If we are on an 82544 and we are trying to write an odd offset
+	 * in the MTA, save off the previous entry before writing and
+	 * restore the old value after writing.
+	 */
+	if ((hw->mac.type == e1000_82544) && (hash_reg & 1)) {
+		hash_reg &= (hw->mac.mta_reg_count - 1);
+		hash_bit = hash_value & 0x1F;
+		mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+		mta |= (1 << hash_bit);
+		temp = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg - 1);
+
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg - 1, temp);
+		E1000_WRITE_FLUSH(hw);
+	} else {
+		e1000_mta_set_generic(hw, hash_value);
+	}
+}
+
+/**
+ *  e1000_led_on_82543 - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_on_82543(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_on_82543");
+
+	if (hw->mac.type == e1000_82544 &&
+	    hw->phy.media_type == e1000_media_type_copper) {
+		/* Clear SW-defineable Pin 0 to turn on the LED */
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	} else {
+		/* Fiber 82544 and all 82543 use this method */
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	}
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_82543 - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_off_82543(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_off_82543");
+
+	if (hw->mac.type == e1000_82544 &&
+	    hw->phy.media_type == e1000_media_type_copper) {
+		/* Set SW-defineable Pin 0 to turn off the LED */
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	} else {
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	}
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82543");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h	2022-03-21 12:58:29.757885373 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+
+#include <rtnet_port.h>
+/* NAPI enable/disable flags here */
+
+
+#ifdef _E1000_H_
+#ifdef CONFIG_E1000_NAPI
+#define NAPI
+#endif
+#ifdef E1000_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef _IGB_H_
+#define NAPI
+#endif
+
+#ifdef _IXGB_H_
+#ifdef CONFIG_IXGB_NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+
+#ifdef DRIVER_E1000
+#define adapter_struct e1000_adapter
+#endif
+
+
+// RTNET settings
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#undef NETIF_F_TSO
+#undef NETIF_F_HW_VLAN_TX
+#undef CONFIG_NET_POLL_CONTROLLER
+#ifdef ETHTOOL_GPERMADDR
+#undef ETHTOOL_GPERMADDR
+#endif
+
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_E1000_NAPI
+#define CONFIG_E1000_NAPI
+#endif
+#ifndef CONFIG_IXGB_NAPI
+#define CONFIG_IXGB_NAPI
+#endif
+#else
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_IXGB_NAPI
+#endif
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#endif
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#endif
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x)	kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+   just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1		0x17		/* Reserved...		*/
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+	u32 cmd;
+	char driver[32];
+	char version[32];
+	char fw_version[32];
+	char bus_info[32];
+	char reserved1[32];
+	char reserved2[16];
+	u32 n_stats;
+	u32 testinfo_len;
+	u32 eedump_len;
+	u32 regdump_len;
+};
+
+struct ethtool_stats {
+	u32 cmd;
+	u32 n_stats;
+	u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+	ETH_SS_TEST             = 0,
+	ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+	u32 cmd;            /* ETHTOOL_GSTRINGS */
+	u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+	u32 len;            /* number of strings in the string set */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+	ETH_TEST_FL_OFFLINE	= (1 << 0),
+	ETH_TEST_FL_FAILED	= (1 << 1),
+};
+struct ethtool_test {
+	u32 cmd;
+	u32 flags;
+	u32 reserved;
+	u32 len;
+	u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+	u32 cmd;
+	u32 magic;
+	u32 offset;
+	u32 len;
+	u8 data[0];
+};
+
+struct ethtool_value {
+	u32 cmd;
+	u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS		0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+	u32 cmd;
+	u32 version; /* driver-specific, indicates different chips/revs */
+	u32 len; /* bytes */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK		0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+	u32	cmd;	/* ETHTOOL_{G,S}COALESCE */
+
+	/* How many usecs to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	rx_coalesce_usecs;
+
+	/* How many packets to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause RX interrupts to never be
+	 * generated.
+	 */
+	u32	rx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	rx_coalesce_usecs_irq;
+	u32	rx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	tx_coalesce_usecs;
+
+	/* How many packets to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause TX interrupts to never be
+	 * generated.
+	 */
+	u32	tx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	tx_coalesce_usecs_irq;
+	u32	tx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay in-memory statistics
+	 * block updates.  Some drivers do not have an in-memory
+	 * statistic block, and in such cases this value is ignored.
+	 * This value must not be zero.
+	 */
+	u32	stats_block_coalesce_usecs;
+
+	/* Adaptive RX/TX coalescing is an algorithm implemented by
+	 * some drivers to improve latency under low packet rates and
+	 * improve throughput under high packet rates.  Some drivers
+	 * only implement one of RX or TX adaptive coalescing.  Anything
+	 * not implemented by the driver causes these values to be
+	 * silently ignored.
+	 */
+	u32	use_adaptive_rx_coalesce;
+	u32	use_adaptive_tx_coalesce;
+
+	/* When the packet rate (measured in packets per second)
+	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+	 * used.
+	 */
+	u32	pkt_rate_low;
+	u32	rx_coalesce_usecs_low;
+	u32	rx_max_coalesced_frames_low;
+	u32	tx_coalesce_usecs_low;
+	u32	tx_max_coalesced_frames_low;
+
+	/* When the packet rate is below pkt_rate_high but above
+	 * pkt_rate_low (both measured in packets per second) the
+	 * normal {rx,tx}_* coalescing parameters are used.
+	 */
+
+	/* When the packet rate is (measured in packets per second)
+	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+	 * used.
+	 */
+	u32	pkt_rate_high;
+	u32	rx_coalesce_usecs_high;
+	u32	rx_max_coalesced_frames_high;
+	u32	tx_coalesce_usecs_high;
+	u32	tx_max_coalesced_frames_high;
+
+	/* How often to do adaptive coalescing packet rate sampling,
+	 * measured in seconds.  Must not be zero.
+	 */
+	u32	rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+	u32	cmd;	/* ETHTOOL_{G,S}RINGPARAM */
+
+	/* Read only attributes.  These indicate the maximum number
+	 * of pending RX/TX ring entries the driver will allow the
+	 * user to set.
+	 */
+	u32	rx_max_pending;
+	u32	rx_mini_max_pending;
+	u32	rx_jumbo_max_pending;
+	u32	tx_max_pending;
+
+	/* Values changeable by the user.  The valid values are
+	 * in the range 1 to the "*_max_pending" counterpart above.
+	 */
+	u32	rx_pending;
+	u32	rx_mini_pending;
+	u32	rx_jumbo_pending;
+	u32	tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+	u32	cmd;	/* ETHTOOL_{G,S}PAUSEPARAM */
+
+	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+	 * being true) the user may set 'autoneg' here non-zero to have the
+	 * pause parameters be auto-negotiated too.  In such a case, the
+	 * {rx,tx}_pause values below determine what capabilities are
+	 * advertised.
+	 *
+	 * If 'autoneg' is zero or the link is not being auto-negotiated,
+	 * then {rx,tx}_pause force the driver to use/not-use pause
+	 * flow control.
+	 */
+	u32	autoneg;
+	u32	rx_pause;
+	u32	tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
+					    * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
+					    * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST		0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN	32
+#endif
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+			       PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+			       ~PCI_COMMAND_INVALIDATE);
+#endif
+
+
+#undef HAVE_PCI_ERS
+
+#endif /* _KCOMPAT_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h	2022-03-21 12:58:29.751885431 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                     u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+                                       u8 *buffer, u16 length);
+
+typedef enum {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+} e1000_mng_mode;
+
+#define E1000_FACTPS_MNGCG    0x20000000
+
+#define E1000_FWSM_MODE_MASK  0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE                  0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+
+#define E1000_HICR_EN              0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C               0x02
+#define E1000_HICR_SV              0x04  /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET        0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE  0x544D4149
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c	2022-03-21 12:58:29.746885480 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82542 (rev 1 & 2)
+ */
+
+#include "e1000_api.h"
+
+static s32  e1000_init_phy_params_82542(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82542(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82542(struct e1000_hw *hw);
+static s32  e1000_get_bus_info_82542(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82542(struct e1000_hw *hw);
+static s32  e1000_init_hw_82542(struct e1000_hw *hw);
+static s32  e1000_setup_link_82542(struct e1000_hw *hw);
+static s32  e1000_led_on_82542(struct e1000_hw *hw);
+static s32  e1000_led_off_82542(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw);
+
+struct e1000_dev_spec_82542 {
+	bool dma_fairness;
+};
+
+/**
+ *  e1000_init_phy_params_82542 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82542(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82542");
+
+	phy->type               = e1000_phy_none;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82542 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+
+	DEBUGFUNC("e1000_init_nvm_params_82542");
+
+	nvm->address_bits       =  6;
+	nvm->delay_usec         = 50;
+	nvm->opcode_bits        =  3;
+	nvm->type               = e1000_nvm_eeprom_microwire;
+	nvm->word_size          = 64;
+
+	/* Function Pointers */
+	func->read_nvm          = e1000_read_nvm_microwire;
+	func->release_nvm       = e1000_stop_nvm;
+	func->write_nvm         = e1000_write_nvm_microwire;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82542 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_82542");
+
+	/* Set media type */
+	hw->phy.media_type = e1000_media_type_fiber;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_82542;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82542;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82542;
+	/* link setup */
+	func->setup_link = e1000_setup_link_82542;
+	/* phy/fiber/serdes setup */
+	func->setup_physical_interface = e1000_setup_fiber_serdes_link_generic;
+	/* check for link */
+	func->check_for_link = e1000_check_for_fiber_link_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_82542;
+	func->led_off = e1000_led_off_82542;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82542;
+	/* link info */
+	func->get_link_up_info = e1000_get_speed_and_duplex_fiber_serdes_generic;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82542);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82542 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82542(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82542");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82542;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82542;
+	hw->func.init_phy_params = e1000_init_phy_params_82542;
+}
+
+/**
+ *  e1000_get_bus_info_82542 - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adaper is attached and stores it in the hw structure.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_get_bus_info_82542(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_get_bus_info_82542");
+
+	hw->bus.type = e1000_bus_type_pci;
+	hw->bus.speed = e1000_bus_speed_unknown;
+	hw->bus.width = e1000_bus_width_unknown;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_hw_82542 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82542(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val = E1000_SUCCESS;
+	u32 ctrl, icr;
+
+	DEBUGFUNC("e1000_reset_hw_82542");
+
+	if (hw->revision_id == E1000_REVISION_2) {
+		DEBUGOUT("Disabling MWI on 82542 rev 2\n");
+		e1000_pci_clear_mwi(hw);
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete before
+	 * resetting the device
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n");
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	e1000_reload_nvm(hw);
+	msec_delay(2);
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	if (hw->revision_id == E1000_REVISION_2) {
+		if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+			e1000_pci_set_mwi(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82542 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_82542(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82542 *dev_spec;
+	s32 ret_val = E1000_SUCCESS;
+	u32 ctrl;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_82542");
+
+	dev_spec = (struct e1000_dev_spec_82542 *)hw->dev_spec;
+
+	/* Disabling VLAN filtering */
+	E1000_WRITE_REG(hw, E1000_VET, 0);
+	e1000_clear_vfta(hw);
+
+	/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+	if (hw->revision_id == E1000_REVISION_2) {
+		DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+		e1000_pci_clear_mwi(hw);
+		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
+		E1000_WRITE_FLUSH(hw);
+		msec_delay(5);
+	}
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+	if (hw->revision_id == E1000_REVISION_2) {
+		E1000_WRITE_REG(hw, E1000_RCTL, 0);
+		E1000_WRITE_FLUSH(hw);
+		msec_delay(1);
+		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+			e1000_pci_set_mwi(hw);
+	}
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/*
+	 * Set the PCI priority bit correctly in the CTRL register.  This
+	 * determines if the adapter gives priority to receives, or if it
+	 * gives equal priority to transmits and receives.
+	 */
+	if (dev_spec->dma_fairness) {
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+	}
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link_82542(hw);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82542(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_82542 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_setup_link_82542(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_link_82542");
+
+	ret_val = e1000_set_default_fc_generic(hw);
+	if (ret_val)
+		goto out;
+
+	hw->fc.type &= ~e1000_fc_tx_pause;
+
+	if (mac->report_tx_early == 1)
+		hw->fc.type &= ~e1000_fc_rx_pause;
+
+	/*
+	 * We want to save off the original Flow Control configuration just in
+	 * case we get disconnected and then reconnected into a different hub
+	 * or switch with different Flow Control capabilities.
+	 */
+	hw->fc.original_type = hw->fc.type;
+
+	DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type);
+
+	/* Call the necessary subroutine to configure the link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	DEBUGOUT("Initializing Flow Control address, type and timer regs\n");
+
+	E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+	E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_led_on_82542 - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_on_82542(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_on_82542");
+
+	ctrl |= E1000_CTRL_SWDPIN0;
+	ctrl |= E1000_CTRL_SWDPIO0;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_82542 - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_off_82542(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_off_82542");
+
+	ctrl &= ~E1000_CTRL_SWDPIN0;
+	ctrl |= E1000_CTRL_SWDPIO0;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_translate_register_82542 - Translate the proper regiser offset
+ *  @reg: e1000 register to be read
+ *
+ *  Registers in 82542 are located in different offsets than other adapters
+ *  even though they function in the same manner.  This function takes in
+ *  the name of the register to read and returns the correct offset for
+ *  82542 silicon.
+ **/
+u32 e1000_translate_register_82542(u32 reg)
+{
+	/*
+	 * Some of the 82542 registers are located at different
+	 * offsets than they are in newer adapters.
+	 * Despite the difference in location, the registers
+	 * function in the same manner.
+	 */
+	switch (reg) {
+	case E1000_RA:
+		reg = 0x00040;
+		break;
+	case E1000_RDTR:
+		reg = 0x00108;
+		break;
+	case E1000_RDBAL(0):
+		reg = 0x00110;
+		break;
+	case E1000_RDBAH(0):
+		reg = 0x00114;
+		break;
+	case E1000_RDLEN(0):
+		reg = 0x00118;
+		break;
+	case E1000_RDH(0):
+		reg = 0x00120;
+		break;
+	case E1000_RDT(0):
+		reg = 0x00128;
+		break;
+	case E1000_RDBAL(1):
+		reg = 0x00138;
+		break;
+	case E1000_RDBAH(1):
+		reg = 0x0013C;
+		break;
+	case E1000_RDLEN(1):
+		reg = 0x00140;
+		break;
+	case E1000_RDH(1):
+		reg = 0x00148;
+		break;
+	case E1000_RDT(1):
+		reg = 0x00150;
+		break;
+	case E1000_FCRTH:
+		reg = 0x00160;
+		break;
+	case E1000_FCRTL:
+		reg = 0x00168;
+		break;
+	case E1000_MTA:
+		reg = 0x00200;
+		break;
+	case E1000_TDBAL(0):
+		reg = 0x00420;
+		break;
+	case E1000_TDBAH(0):
+		reg = 0x00424;
+		break;
+	case E1000_TDLEN(0):
+		reg = 0x00428;
+		break;
+	case E1000_TDH(0):
+		reg = 0x00430;
+		break;
+	case E1000_TDT(0):
+		reg = 0x00438;
+		break;
+	case E1000_TIDV:
+		reg = 0x00440;
+		break;
+	case E1000_VFTA:
+		reg = 0x00600;
+		break;
+	case E1000_TDFH:
+		reg = 0x08010;
+		break;
+	case E1000_TDFT:
+		reg = 0x08018;
+		break;
+	default:
+		break;
+	}
+
+	return reg;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82542");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c	2022-03-21 12:58:29.740885539 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+
+
+// RTNET defines...
+#ifdef NETIF_F_TSO
+#undef NETIF_F_TSO
+#endif
+
+#ifdef NETIF_F_TSO6
+#undef NETIF_F_TSO6
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#undef NETIF_F_HW_VLAN_TX
+#endif
+
+#ifdef CONFIG_E1000_NAPI
+#undef CONFIG_E1000_NAPI
+#endif
+
+#ifdef MAX_SKB_FRAGS
+#undef MAX_SKB_FRAGS
+#endif
+
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#endif
+
+#ifdef CONFIG_E1000_MQ
+#undef CONFIG_E1000_MQ
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifdef CONFIG_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef HAVE_PCI_ERS
+#error "STOP it here"
+#undef HAVE_PCI_ERS
+#endif
+
+#ifdef USE_REBOOT_NOTIFIER
+#undef USE_REBOOT_NOTIFIER
+#endif
+
+#ifdef HAVE_TX_TIMEOUT
+#undef HAVE_TX_TIMEOUT
+#endif
+
+
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
+#include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCGMIIPHY
+#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
+
+#include "e1000.h"
+
+#ifdef HAVE_PCI_ERS
+#error "STOP it here"
+#endif
+
+
+
+char e1000_driver_name[MODULE_NAME_LEN] = "rt_e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+
+#ifdef CONFIG_E1000_NAPI
+#define DRV_NAPI "-NAPI"
+#else
+#define DRV_NAPI
+#endif
+
+
+#define DRV_DEBUG
+
+#define DRV_HW_PERF
+
+/*
+ * Port to rtnet based on e1000 driver version 7.6.15.5 (22-Sep-2008 Mathias Koehrer)
+ *
+ * */
+
+#define DRV_VERSION "7.6.15.5" DRV_NAPI DRV_DEBUG DRV_HW_PERF " ported to RTnet"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
+
+// RTNET wrappers
+#define kmalloc(a,b) rtdm_malloc(a)
+#define vmalloc(a) rtdm_malloc(a)
+#define kfree(a) rtdm_free(a)
+#define vfree(a) rtdm_free(a)
+#define skb_reserve(a,b) rtskb_reserve(a,b)
+#define net_device rtnet_device
+#define sk_buff rtskb
+#define netdev_priv(a) a->priv
+// ----------------------
+
+
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+
+#define PCI_ID_LIST_PCI  \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82542), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82543GC_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82543GC_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544EI_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544EI_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544GC_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544GC_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545EM_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545EM_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541EI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541ER_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EM_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541EI_MOBILE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547EI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547EI_MOBILE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_QUAD_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP_LP), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_SERDES), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547GI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI_MOBILE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541ER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_SERDES), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI_LF), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_PCIE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_QUAD_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)
+
+#define PCI_ID_LIST_PCIE  \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_M_AMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_AMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_C), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_M), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_FIBER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_FIBER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_SERDES), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573E), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573E_IAMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_COPPER_DPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_SERDES_DPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573L), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_FIBER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES_DUAL), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES_QUAD), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_COPPER_SPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_SERDES_SPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_COPPER_LP), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571PT_QUAD_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE_GT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE_G), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IGP_AMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IGP_C), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE_G), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE_GT)
+
+
+
+
+static struct pci_device_id e1000_pci_tbl[] = {
+    PCI_ID_LIST_PCI,
+    PCI_ID_LIST_PCIE,
+	/* required last entry */
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+static struct pci_device_id e1000_pcipure_tbl[] = {
+    PCI_ID_LIST_PCI,
+	/* required last entry */
+	{0,}
+};
+
+static struct pci_device_id e1000_pcie_tbl[] = {
+    PCI_ID_LIST_PCIE,
+	/* required last entry */
+	{0,}
+};
+
+
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+#ifdef CONFIG_E1000_MQ
+static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
+#endif
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure(struct e1000_adapter *adapter);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring);
+static void e1000_set_multi(struct net_device *netdev);
+static void e1000_update_phy_info_task(struct work_struct *work);
+static void e1000_watchdog_task(struct work_struct *work);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
+static int e1000_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+				 struct e1000_tx_ring *tx_ring);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+#ifdef CONFIG_E1000_MQ
+static int e1000_subqueue_xmit_frame(struct sk_buff *skb,
+				     struct net_device *netdev, int queue);
+#endif
+static void e1000_phy_read_status(struct e1000_adapter *adapter);
+#if 0
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+#endif
+static int  e1000_intr(rtdm_irq_t *irq_handle);
+static int e1000_intr_msi(rtdm_irq_t *irq_handle);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+#ifdef CONFIG_E1000_NAPI
+static int e1000_poll(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    int *work_done, int work_to_do);
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+				       int *work_done, int work_to_do);
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+					  struct e1000_rx_ring *rx_ring,
+					  int *work_done, int work_to_do);
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+					 struct e1000_rx_ring *rx_ring,
+					 int cleaned_count);
+#else
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+					 nanosecs_abs_t *time_stamp);
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+					 nanosecs_abs_t *time_stamp);
+#endif
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count);
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+				      struct e1000_rx_ring *rx_ring,
+				      int cleaned_count);
+#if 0
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+#ifdef SIOCGMIIPHY
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+#endif
+#endif
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct sk_buff *skb);
+
+#ifdef NETIF_F_HW_VLAN_TX
+static void e1000_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+#endif
+
+// static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+#ifndef USE_REBOOT_NOTIFIER
+// static void e1000_shutdown(struct pci_dev *pdev);
+#else
+static int e1000_notify_reboot(struct notifier_block *, unsigned long event,
+			       void *ptr);
+static struct notifier_block e1000_notifier_reboot = {
+	.notifier_call	= e1000_notify_reboot,
+	.next		= NULL,
+	.priority	= 0
+};
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+	"Maximum size of packet that is copied to a new buffer on receive");
+
+
+#ifdef HAVE_PCI_ERS
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+		     pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+	.error_detected = e1000_io_error_detected,
+	.slot_reset = e1000_io_slot_reset,
+	.resume = e1000_io_resume,
+};
+#endif
+
+static struct pci_driver e1000_driver = {
+	.name     = e1000_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = e1000_remove,
+#ifdef HAVE_PCI_ERS
+	.err_handler = &e1000_err_handler
+#endif
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define MAX_UNITS 8
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (eg. 1,0,1)");
+
+
+static int local_debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(local_debug, int, 0);
+MODULE_PARM_DESC(local_debug, "Debug level (0=none,...,16=all)");
+
+/* The parameter 'pciif' might be used to use this driver for
+ * PCI or PCIe only NICs.
+ * This allows to reflect the situation that newer Linux kernels
+ * have two different (non real time) drivers for the e1000:
+ * e1000 for PCI only
+ * e1000e for PCIe only
+ *
+ * Using the 'pciif' parameter allows to load the driver
+ *  modprobe rt_e1000 pciif=pci
+ * to use it as PCI only
+ * and a
+ *  modprobe rt_e1000 -o rt_e1000e pciif=pcie
+ * allows to load a second instance of this driver named 'rt_e1000e'
+ *
+ * If the 'pciif' paramter is not specified, all (PCI and PCIe) e1000
+ * NICs will be used.
+ * */
+static char *pciif = "all";
+module_param(pciif, charp, 0);
+MODULE_PARM_DESC(pciif, "PCI Interface: 'all' (default), 'pci', 'pcie'");
+
+
+//#define register_netdev(a) rt_register_rtnetdev(a)
+//#define unregister_netdev(a) rt_unregister_rtnetdev(a)
+//#define free_netdev(a) rtdev_free(a)
+//#define netif_stop_queue(a) rtnetif_stop_queue(a)
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+	int ret;
+    strcpy(e1000_driver_name, THIS_MODULE->name);
+	printk(KERN_INFO "%s - %s version %s (pciif: %s)\n",
+	       e1000_driver_string, e1000_driver_name, e1000_driver_version, pciif);
+
+	printk(KERN_INFO "%s\n", e1000_copyright);
+
+
+    if (0 == strcmp(pciif, "pcie"))
+    {
+	// PCIe only
+	    e1000_driver.id_table = e1000_pcie_tbl;
+    }
+    else if (0 == strcmp(pciif, "pci"))
+    {
+	// PCI only
+	    e1000_driver.id_table = e1000_pcipure_tbl;
+    }
+
+	ret = pci_register_driver(&e1000_driver);
+#ifdef USE_REBOOT_NOTIFIER
+	if (ret >= 0) {
+		register_reboot_notifier(&e1000_notifier_reboot);
+	}
+#endif
+	if (copybreak != COPYBREAK_DEFAULT) {
+		if (copybreak == 0)
+			printk(KERN_INFO "e1000: copybreak disabled\n");
+		else
+			printk(KERN_INFO "e1000: copybreak enabled for "
+			       "packets <= %u bytes\n", copybreak);
+	}
+	return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+#ifdef USE_REBOOT_NOTIFIER
+	unregister_reboot_notifier(&e1000_notifier_reboot);
+#endif
+	pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	if (adapter->flags & E1000_FLAG_HAS_MSI) {
+		err = pci_enable_msi(adapter->pdev);
+		if (!err)
+			adapter->flags |= E1000_FLAG_MSI_ENABLED;
+	}
+    rt_stack_connect(netdev, &STACK_manager);
+	if (adapter->flags & E1000_FLAG_MSI_ENABLED) {
+		err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq, e1000_intr_msi,
+				  0, netdev->name, netdev);
+		if (!err) {
+			return err;
+		} else {
+			adapter->flags &= ~E1000_FLAG_MSI_ENABLED;
+			pci_disable_msi(adapter->pdev);
+		}
+	}
+	err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq,
+			       e1000_intr, RTDM_IRQTYPE_SHARED, netdev->name,
+			       netdev);
+	if (err)
+		DPRINTK(PROBE, ERR, "Unable to allocate interrupt Error: %d\n",
+			err);
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	// struct net_device *netdev = adapter->netdev;
+
+	rtdm_irq_free(&adapter->irq_handle);
+
+	if (adapter->flags & E1000_FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~E1000_FLAG_MSI_ENABLED;
+	}
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	atomic_inc(&adapter->irq_sem);
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+		E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
+		E1000_WRITE_FLUSH(&adapter->hw);
+	}
+}
+#ifdef NETIF_F_HW_VLAN_TX
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+	if (adapter->vlgrp) {
+		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+			if (adapter->hw.mng_cookie.status &
+				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+				e1000_vlan_rx_add_vid(netdev, vid);
+				adapter->mng_vlan_id = vid;
+			} else {
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
+
+			if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+					(vid != old_vid) &&
+			    !vlan_group_get_device(adapter->vlgrp, old_vid))
+				e1000_vlan_rx_kill_vid(netdev, old_vid);
+		} else {
+			adapter->mng_vlan_id = vid;
+		}
+	}
+}
+#endif
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware taken over control of h/w */
+	switch (adapter->hw.mac.type) {
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
+		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
+				swsm & ~E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ **/
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware know the driver has taken over */
+	switch (adapter->hw.mac.type) {
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
+		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
+				swsm | E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	e1000_set_multi(netdev);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	e1000_restore_vlan(adapter);
+#endif
+	e1000_init_manageability(adapter);
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	/* call E1000_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+		adapter->alloc_rx_buf(adapter, ring,
+				      E1000_DESC_UNUSED(ring));
+	}
+
+#ifdef CONFIG_E1000_MQ
+	e1000_setup_queue_mapping(adapter);
+#endif
+
+	// adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+static void e1000_napi_enable_all(struct e1000_adapter *adapter)
+{
+#ifdef CONFIG_E1000_NAPI
+	int i;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_enable(&adapter->rx_ring[i].napi);
+#endif
+}
+
+static void e1000_napi_disable_all(struct e1000_adapter *adapter)
+{
+#ifdef CONFIG_E1000_NAPI
+	int i;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_disable(&adapter->rx_ring[i].napi);
+#endif
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+	/* hardware has been reset, we need to reload some things */
+	e1000_configure(adapter);
+
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	e1000_napi_enable_all(adapter);
+
+	e1000_irq_enable(adapter);
+
+	/* fire a link change interrupt to start the watchdog */
+	// E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+	return 0;
+}
+
+static void e1000_down_and_stop(struct e1000_adapter *adapter)
+{
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__E1000_DOWN, &adapter->state);
+
+	cancel_work_sync(&adapter->reset_task);
+	cancel_delayed_work_sync(&adapter->watchdog_task);
+	cancel_delayed_work_sync(&adapter->phy_info_task);
+	cancel_delayed_work_sync(&adapter->fifo_stall_task);
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl, rctl;
+
+	e1000_down_and_stop(adapter);
+
+	/* disable receives in the hardware */
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+#ifdef NETIF_F_LLTX
+	rtnetif_stop_queue(netdev);
+#else
+	rtnetif_tx_disable(netdev);
+#endif
+
+	/* disable transmits in the hardware */
+	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	E1000_WRITE_FLUSH(&adapter->hw);
+	msleep(10);
+
+	e1000_napi_disable_all(adapter);
+
+	e1000_irq_disable(adapter);
+
+	// netdev->tx_queue_len = adapter->tx_queue_len;
+	rtnetif_carrier_off(netdev);
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	e1000_reset(adapter);
+	e1000_clean_all_tx_rings(adapter);
+	e1000_clean_all_rx_rings(adapter);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+	e1000_down(adapter);
+	e1000_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_fc_info *fc = &adapter->hw.fc;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+	bool legacy_pba_adjust = FALSE;
+	u16 hwm;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+
+	switch (mac->type) {
+	case e1000_82542:
+	case e1000_82543:
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+		legacy_pba_adjust = TRUE;
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		legacy_pba_adjust = TRUE;
+		pba = E1000_PBA_30K;
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		pba = E1000_PBA_38K;
+		break;
+	case e1000_82573:
+		pba = E1000_PBA_20K;
+		break;
+	case e1000_ich8lan:
+		pba = E1000_PBA_8K;
+		break;
+	case e1000_ich9lan:
+#define E1000_PBA_10K 0x000A
+		pba = E1000_PBA_10K;
+		break;
+	case e1000_undefined:
+	case e1000_num_macs:
+		break;
+	}
+
+	if (legacy_pba_adjust == TRUE) {
+		if (adapter->max_frame_size > E1000_RXBUFFER_8192)
+			pba -= 8; /* allocate more FIFO for Tx */
+
+		if (mac->type == e1000_82547) {
+			adapter->tx_fifo_head = 0;
+			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+			adapter->tx_fifo_size =
+				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+		}
+	} else if (adapter->max_frame_size > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+		/* adjust PBA for jumbo frames */
+		E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB. */
+		pba = E1000_READ_REG(&adapter->hw, E1000_PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* the tx fifo also stores 16 bytes of information about the tx
+		 * but don't include ethernet FCS because hardware appends it */
+		min_tx_space = (adapter->max_frame_size +
+				sizeof(struct e1000_tx_desc) -
+				ETHERNET_FCS_SIZE) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = adapter->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* PCI/PCIx hardware has PBA alignment constraints */
+			switch (mac->type) {
+			case e1000_82545 ... e1000_82546_rev_3:
+				pba &= ~(E1000_PBA_8K - 1);
+				break;
+			default:
+				break;
+			}
+
+			/* if short on rx space, rx wins and must trump tx
+			 * adjustment or use Early Receive if available */
+			if (pba < min_rx_space) {
+				switch (mac->type) {
+				case e1000_82573:
+				case e1000_ich9lan:
+					/* ERT enabled in e1000_configure_rx */
+					break;
+				default:
+					pba = min_rx_space;
+					break;
+				}
+			}
+		}
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+
+	/* flow control settings */
+	/* The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, and
+	 * - the full Rx FIFO size minus the early receive size (for parts
+	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
+	 * - the full Rx FIFO size minus one full frame */
+	hwm = min(((pba << 10) * 9 / 10),
+		  ((mac->type == e1000_82573 || mac->type == e1000_ich9lan) ?
+		      (u16)((pba << 10) - (E1000_ERT_2048 << 3)) :
+		      ((pba << 10) - adapter->max_frame_size)));
+
+	fc->high_water = hwm & 0xFFF8;	/* 8-byte granularity */
+	fc->low_water = fc->high_water - 8;
+
+	if (mac->type == e1000_80003es2lan)
+		fc->pause_time = 0xFFFF;
+	else
+		fc->pause_time = E1000_FC_PAUSE_TIME;
+	fc->send_xon = 1;
+	fc->type = fc->original_type;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(&adapter->hw);
+
+	/* For 82573 and ICHx if AMT is enabled, let the firmware know
+	 * that the network interface is in control */
+	if (((adapter->hw.mac.type == e1000_82573) ||
+	     (adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	if (mac->type >= e1000_82544)
+		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+
+	if (e1000_init_hw(&adapter->hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+#ifdef NETIF_F_HW_VLAN_TX
+	e1000_update_mng_vlan(adapter);
+#endif
+	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+	if (mac->type >= e1000_82544 &&
+	    mac->type <= e1000_82547_rev_2 &&
+	    mac->autoneg == 1 &&
+	    adapter->hw.phy.autoneg_advertised == ADVERTISE_1000_FULL) {
+		u32 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		/* clear phy power management bit if we are in gig only mode,
+		 * which if enabled will attempt negotiation to 100Mb, which
+		 * can cause a loss of link at power off or driver unload */
+		ctrl &= ~E1000_CTRL_SWDPIN3;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+	}
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC)
+#define E1000_GCR_DISABLE_TIMEOUT_MECHANISM 0x80000000
+	if (adapter->hw.mac.type == e1000_82571) {
+		/* work around pSeries hardware by disabling timeouts */
+		u32 gcr = E1000_READ_REG(&adapter->hw, E1000_GCR);
+		gcr |= E1000_GCR_DISABLE_TIMEOUT_MECHANISM;
+		E1000_WRITE_REG(&adapter->hw, E1000_GCR, gcr);
+	}
+#endif
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	e1000_reset_adaptive(&adapter->hw);
+	e1000_get_phy_info(&adapter->hw);
+
+	if (!(adapter->flags & E1000_FLAG_SMART_POWER_DOWN) &&
+	    (mac->type == e1000_82571 || mac->type == e1000_82572)) {
+		u16 phy_data = 0;
+		/* speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed */
+		e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				   &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				    phy_data);
+	}
+
+	e1000_release_manageability(adapter);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int e1000_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct e1000_adapter *adapter;
+
+	static int cards_found = 0;
+	static int global_quad_port_a = 0; /* global ksp3 port a indication */
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+
+    if (cards[cards_found++] == 0)
+    {
+	return -ENODEV;
+    }
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
+	    !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
+		pci_using_dac = 1;
+	} else {
+		if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
+		    (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
+			E1000_ERR("No usable DMA configuration, aborting\n");
+			goto err_dma;
+		}
+		pci_using_dac = 0;
+	}
+
+	if ((err = pci_request_regions(pdev, e1000_driver_name)))
+		goto err_pci_reg;
+
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+#ifdef CONFIG_E1000_MQ
+	netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter) +
+							(sizeof(struct net_device_subqueue) *
+								E1000_MAX_TX_QUEUES), 16);
+#else
+	netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter),
+				2 * E1000_DEFAULT_RXD + E1000_DEFAULT_TXD);
+#endif
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+    memset(netdev->priv, 0, sizeof(struct e1000_adapter));
+    rt_rtdev_connect(netdev, &RTDEV_manager);
+
+	// SET_NETDEV_DEV(netdev, &pdev->dev);
+    netdev->vers = RTDEV_VERS_2_0;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev->priv;
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+	adapter->msg_enable = (1 << local_debug) - 1;
+
+	err = -EIO;
+	adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+				      pci_resource_len(pdev, BAR_0));
+	if (!adapter->hw.hw_addr)
+		goto err_ioremap;
+
+	for (i = BAR_1; i <= BAR_5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+			adapter->hw.io_base = pci_resource_start(pdev, i);
+			break;
+		}
+	}
+
+	netdev->open = &e1000_open;
+	netdev->stop = &e1000_close;
+	netdev->hard_start_xmit = &e1000_xmit_frame;
+#ifdef CONFIG_E1000_MQ
+	netdev->hard_start_subqueue_xmit = &e1000_subqueue_xmit_frame;
+#endif
+#ifdef HAVE_TX_TIMEOUT
+	netdev->tx_timeout = &e1000_tx_timeout;
+	netdev->watchdog_timeo = 5 * HZ;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+	netdev->vlan_rx_register = e1000_vlan_rx_register;
+	netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
+	netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	netdev->poll_controller = e1000_netpoll;
+#endif
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	adapter->bd_number = cards_found;
+
+	/* setup the private structure */
+	if ((err = e1000_sw_init(adapter)))
+		goto err_sw_init;
+
+	err = -EIO;
+	/* Flash BAR mapping must happen after e1000_sw_init
+	 * because it depends on mac.type */
+	if (((adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		adapter->hw.flash_address = ioremap(pci_resource_start(pdev, 1),
+						    pci_resource_len(pdev, 1));
+		if (!adapter->hw.flash_address)
+			goto err_flashmap;
+	}
+
+	if ((err = e1000_init_mac_params(&adapter->hw)))
+		goto err_hw_init;
+
+	if ((err = e1000_init_nvm_params(&adapter->hw)))
+		goto err_hw_init;
+
+	if ((err = e1000_init_phy_params(&adapter->hw)))
+		goto err_hw_init;
+
+	e1000_get_bus_info(&adapter->hw);
+
+	e1000_init_script_state_82541(&adapter->hw, TRUE);
+	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
+
+	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+	adapter->hw.mac.adaptive_ifs = FALSE;
+
+	/* Copper options */
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		adapter->hw.phy.mdix = AUTO_ALL_MODES;
+		adapter->hw.phy.disable_polarity_correction = FALSE;
+		adapter->hw.phy.ms_type = E1000_MASTER_SLAVE;
+	}
+
+	if (e1000_check_reset_block(&adapter->hw))
+		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+#ifdef MAX_SKB_FRAGS
+	if (adapter->hw.mac.type >= e1000_82543) {
+#ifdef NETIF_F_HW_VLAN_TX
+		netdev->features = NETIF_F_SG |
+				   NETIF_F_HW_CSUM |
+				   NETIF_F_HW_VLAN_TX |
+				   NETIF_F_HW_VLAN_RX |
+				   NETIF_F_HW_VLAN_FILTER;
+		if ((adapter->hw.mac.type == e1000_ich8lan) ||
+		    (adapter->hw.mac.type == e1000_ich9lan))
+			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
+#else
+		netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
+#endif
+	}
+
+#ifdef NETIF_F_TSO
+	if ((adapter->hw.mac.type >= e1000_82544) &&
+	   (adapter->hw.mac.type != e1000_82547)) {
+		adapter->flags |= E1000_FLAG_HAS_TSO;
+		netdev->features |= NETIF_F_TSO;
+	}
+
+#ifdef NETIF_F_TSO6
+	if (adapter->hw.mac.type > e1000_82547_rev_2) {
+		adapter->flags |= E1000_FLAG_HAS_TSO6;
+		netdev->features |= NETIF_F_TSO6;
+	}
+#endif
+#endif
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+#endif
+#ifdef NETIF_F_LLTX
+	netdev->features |= NETIF_F_LLTX;
+#endif
+
+	/* Hardware features, flags and workarounds */
+	if (adapter->hw.mac.type >= e1000_82571) {
+		adapter->flags |= E1000_FLAG_INT_ASSERT_AUTO_MASK;
+		adapter->flags |= E1000_FLAG_HAS_MSI;
+		adapter->flags |= E1000_FLAG_HAS_MANC2H;
+	}
+
+	if (adapter->hw.mac.type >= e1000_82540) {
+		adapter->flags |= E1000_FLAG_HAS_SMBUS;
+		adapter->flags |= E1000_FLAG_HAS_INTR_MODERATION;
+	}
+
+	if (adapter->hw.mac.type == e1000_82543)
+		adapter->flags |= E1000_FLAG_BAD_TX_CARRIER_STATS_FD;
+
+	/* In rare occasions, ESB2 systems would end up started without
+	 * the RX unit being turned on. */
+	if (adapter->hw.mac.type == e1000_80003es2lan)
+		adapter->flags |= E1000_FLAG_RX_NEEDS_RESTART;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+
+	/* before reading the NVM, reset the controller to
+	 * put the device in a known good starting state */
+
+	e1000_reset_hw(&adapter->hw);
+
+	/* make sure we don't intercept ARP packets until we're up */
+	e1000_release_manageability(adapter);
+
+	/* make sure the NVM is good */
+
+	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+		DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* copy the MAC address out of the NVM */
+
+	if (e1000_read_mac_addr(&adapter->hw))
+		DPRINTK(PROBE, ERR, "NVM Read Error\n");
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
+			  e1000_82547_tx_fifo_stall_task);
+	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+	e1000_check_options(adapter);
+
+	/* Initial Wake on LAN setting
+	 * If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82542:
+	case e1000_82543:
+		break;
+	case e1000_82544:
+		e1000_read_nvm(&adapter->hw,
+			NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_82544_APM;
+		break;
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		/* APME bit in EEPROM is mapped to WUC.APME */
+		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
+		eeprom_apme_mask = E1000_WUC_APME;
+		break;
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82571:
+	case e1000_80003es2lan:
+		if (adapter->hw.bus.func == 1) {
+			e1000_read_nvm(&adapter->hw,
+				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+			break;
+		}
+		fallthrough;
+	default:
+		e1000_read_nvm(&adapter->hw,
+			NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+		break;
+	}
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/* now that we have the eeprom settings, apply the special cases
+	 * where the eeprom may be wrong or the board simply won't support
+	 * wake on lan on a particular port */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting */
+		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		    E1000_STATUS_FUNC_1)
+			adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+		/* if quad port adapter, disable WoL on all but port A */
+		if (global_quad_port_a != 0)
+			adapter->eeprom_wol = 0;
+		else
+			adapter->flags |= E1000_FLAG_QUAD_PORT_A;
+		/* Reset for multiple quad port adapters */
+		if (++global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+
+	/* print bus type/speed/width info */
+	{
+	struct e1000_hw *hw = &adapter->hw;
+	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+		((hw->bus.type == e1000_bus_type_pcix) ? "-X" :
+		 (hw->bus.type == e1000_bus_type_pci_express ? " Express":"")),
+		((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		 (hw->bus.speed == e1000_bus_speed_133) ? "133MHz" :
+		 (hw->bus.speed == e1000_bus_speed_120) ? "120MHz" :
+		 (hw->bus.speed == e1000_bus_speed_100) ? "100MHz" :
+		 (hw->bus.speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+		((hw->bus.width == e1000_bus_width_64) ? "64-bit" :
+		 (hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+		 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+		 "32-bit"));
+	}
+
+	for (i = 0; i < 6; i++)
+		printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+	/* reset the hardware with the new settings */
+	e1000_reset(adapter);
+
+	/* If the controller is 82573 or ICH and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (((adapter->hw.mac.type != e1000_82573) &&
+	     (adapter->hw.mac.type != e1000_ich8lan) &&
+	     (adapter->hw.mac.type != e1000_ich9lan)) ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	/* tell the stack to leave us alone until e1000_open() is called */
+	rtnetif_carrier_off(netdev);
+	rtnetif_stop_queue(netdev);
+
+	strcpy(netdev->name, "rteth%d");
+	err = rt_register_rtnetdev(netdev);
+	if (err)
+		goto err_register;
+
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+	cards_found++;
+	return 0;
+
+err_register:
+err_hw_init:
+	e1000_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+
+	e1000_remove_device(&adapter->hw);
+err_flashmap:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void e1000_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	e1000_down_and_stop(adapter);
+
+	e1000_release_manageability(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	rt_unregister_rtnetdev(netdev);
+
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	e1000_remove_device(&adapter->hw);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_regions(pdev);
+
+	rtdev_free(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+#ifdef CONFIG_E1000_NAPI
+	int i;
+#endif
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETHERNET_FCS_SIZE;
+	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
+
+	/* Initialize the hardware-specific values */
+	if (e1000_setup_init_funcs(hw, FALSE)) {
+		DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
+		return -EIO;
+	}
+
+#ifdef CONFIG_E1000_MQ
+	/* Number of supported queues.
+	 * TODO: It's assumed num_rx_queues >= num_tx_queues, since multi-rx
+	 * queues are much more interesting.  Is it worth coding for the
+	 * possibility (however improbable) of num_tx_queues > num_rx_queues?
+	 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+		adapter->num_tx_queues = 2;
+		adapter->num_rx_queues = 2;
+		break;
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		if ((adapter->hw.device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
+		    (adapter->hw.device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+		    (adapter->hw.device_id == E1000_DEV_ID_ICH9_IGP_AMT)) {
+			adapter->num_tx_queues = 2;
+			adapter->num_rx_queues = 2;
+			break;
+		}
+		fallthrough; /* remaining ICH SKUs do not support MQ */
+	default:
+		/* All hardware before 82571 only have 1 queue each for Rx/Tx.
+		 * However, the 82571 family does not have MSI-X, so multi-
+		 * queue isn't enabled.
+		 * It'd be wise not to mess with this default case. :) */
+		adapter->num_tx_queues = 1;
+		adapter->num_rx_queues = 1;
+		netdev->egress_subqueue_count = 0;
+		break;
+	}
+	adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
+	adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
+
+	if ((adapter->num_tx_queues > 1) || (adapter->num_rx_queues > 1)) {
+		netdev->egress_subqueue = (struct net_device_subqueue *)
+					   ((void *)adapter +
+					    sizeof(struct e1000_adapter));
+		netdev->egress_subqueue_count = adapter->num_tx_queues;
+		DPRINTK(DRV, INFO, "Multiqueue Enabled: RX queues = %u, "
+			"TX queues = %u\n", adapter->num_rx_queues,
+			adapter->num_tx_queues);
+	}
+#else
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+#endif
+
+	if (e1000_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *rx_ring = &adapter->rx_ring[i];
+		netif_napi_add(adapter->netdev, &rx_ring->napi, e1000_poll, 64);
+	}
+	rtdm_lock_init(&adapter->tx_queue_lock);
+#ifdef CONFIG_E1000_MQ
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rtdm_lock_init(&adapter->tx_ring[i].tx_queue_lock);
+#endif
+#endif
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	atomic_set(&adapter->irq_sem, 0);
+	e1000_irq_disable(adapter);
+
+	rtdm_lock_init(&adapter->stats_lock);
+
+	set_bit(__E1000_DOWN, &adapter->state);
+	return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_E1000_MQ
+	adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+#endif
+
+	return E1000_SUCCESS;
+}
+
+#ifdef CONFIG_E1000_MQ
+static void e1000_setup_queue_mapping(struct e1000_adapter *adapter)
+{
+	int i, cpu;
+
+	lock_cpu_hotplug();
+	i = 0;
+	for_each_online_cpu(cpu) {
+		*per_cpu_ptr(adapter->cpu_tx_ring, cpu) =
+			     &adapter->tx_ring[i % adapter->num_tx_queues];
+		i++;
+	}
+	unlock_cpu_hotplug();
+}
+#endif
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	u32 icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+	DPRINTK(HW,INFO, "icr is %08X\n", icr);
+	if (icr & E1000_ICR_RXSEQ) {
+		adapter->flags |= E1000_FLAG_HAS_MSI;
+		wmb();
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	/* poll_enable hasn't been called yet, so don't need disable */
+	/* clear any pending events */
+	E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+	/* free the real vector and request a test handler */
+	e1000_free_irq(adapter);
+
+	err = pci_enable_msi(adapter->pdev);
+	err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
+			  netdev->name, netdev);
+	if (err) {
+		pci_disable_msi(adapter->pdev);
+		goto msi_test_failed;
+	}
+
+	/* our temporary test variable */
+	adapter->flags &= ~E1000_FLAG_HAS_MSI;
+	wmb();
+
+	e1000_irq_enable(adapter);
+
+	/* fire an unusual interrupt on the test handler */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXSEQ);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	msleep(50);
+
+	e1000_irq_disable(adapter);
+
+	rmb();
+	if (!(adapter->flags & E1000_FLAG_HAS_MSI)) {
+		adapter->flags |= E1000_FLAG_HAS_MSI;
+		err = -EIO;
+		DPRINTK(HW, INFO, "MSI interrupt test failed!\n");
+	}
+
+	free_irq(adapter->pdev->irq, netdev);
+	pci_disable_msi(adapter->pdev);
+
+	if (err == -EIO)
+		goto msi_test_failed;
+
+	/* okay so the test worked, restore settings */
+	DPRINTK(HW, INFO, "MSI interrupt test succeeded!\n");
+msi_test_failed:
+	/* restore the original vector, even if it failed */
+	e1000_request_irq(adapter);
+	return err;
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds and INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+	int err;
+	u16 pci_cmd;
+
+	if (!(adapter->flags & E1000_FLAG_MSI_ENABLED) ||
+	    !(adapter->flags & E1000_FLAG_HAS_MSI))
+		return 0;
+
+	/* disable SERR in case the MSI write causes a master abort */
+	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+			      pci_cmd & ~PCI_COMMAND_SERR);
+
+	err = e1000_test_msi_interrupt(adapter);
+
+	/* restore previous setting of command word */
+	pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+
+	/* success ! */
+	if (!err)
+		return 0;
+
+	/* EIO means MSI test failed */
+	if (err != -EIO)
+		return err;
+
+	/* back to INTx mode */
+	DPRINTK(PROBE, WARNING, "MSI interrupt test failed, using legacy "
+		"interrupt.\n");
+
+	e1000_free_irq(adapter);
+	adapter->flags &= ~E1000_FLAG_HAS_MSI;
+
+	err = e1000_request_irq(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int err;
+	/* disallow open during test */
+	if (test_bit(__E1000_TESTING, &adapter->state))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+	err = e1000_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = e1000_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		e1000_power_up_phy(&adapter->hw);
+		e1000_setup_link(&adapter->hw);
+	}
+
+#ifdef NETIF_F_HW_VLAN_TX
+	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) {
+		e1000_update_mng_vlan(adapter);
+	}
+#endif
+
+	/* For 82573 and ICHx if AMT is enabled, let the firmware know
+	 * that the network interface is now open */
+	if (((adapter->hw.mac.type == e1000_82573) ||
+	     (adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.  */
+	e1000_configure(adapter);
+
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* work around PCIe errata with MSI interrupts causing some chipsets to
+	 * ignore e1000 MSI messages, which means we need to test our MSI
+	 * interrupt now */
+	err = e1000_test_msi(adapter);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Interrupt allocation failed\n");
+		goto err_req_irq;
+	}
+
+	/* From here on the code is the same as e1000_up() */
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	e1000_napi_enable_all(adapter);
+
+	schedule_delayed_work(&adapter->watchdog_task, 1);
+	e1000_irq_enable(adapter);
+
+	/* fire a link status change interrupt to start the watchdog */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+
+	return E1000_SUCCESS;
+
+err_req_irq:
+	e1000_release_hw_control(adapter);
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && adapter->hw.mac.type >= e1000_82540 &&
+	   adapter->hw.phy.media_type == e1000_media_type_copper)
+		e1000_power_down_phy(&adapter->hw);
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+	e1000_reset(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+	e1000_down(adapter);
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && adapter->hw.mac.type >= e1000_82540 &&
+	   adapter->hw.phy.media_type == e1000_media_type_copper)
+		e1000_power_down_phy(&adapter->hw);
+	e1000_free_irq(adapter);
+
+	e1000_free_all_tx_resources(adapter);
+	e1000_free_all_rx_resources(adapter);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	/* kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it) */
+	if ((adapter->hw.mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	     !(adapter->vlgrp &&
+	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+	}
+#endif
+
+	/* For 82573 and ICHx if AMT is enabled, let the firmware know
+	 * that the network interface is now closed */
+	if (((adapter->hw.mac.type == e1000_82573) ||
+	     (adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_release_hw_control(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter,
+				       void *start, unsigned long len)
+{
+	unsigned long begin = (unsigned long) start;
+	unsigned long end = begin + len;
+
+	/* First rev 82545 and 82546 need to not allow any memory
+	 * write location to cross 64k boundary due to errata 23 */
+	if (adapter->hw.mac.type == e1000_82545 ||
+	    adapter->hw.mac.type == e1000_82546) {
+		return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
+	}
+
+	return TRUE;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	tx_ring->buffer_info = vmalloc(size);
+	if (!tx_ring->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+					     &tx_ring->dma);
+	if (!tx_ring->desc) {
+setup_tx_desc_die:
+		vfree(tx_ring->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, tx_ring->desc, tx_ring->size)) {
+		void *olddesc = tx_ring->desc;
+		dma_addr_t olddma = tx_ring->dma;
+		DPRINTK(TX_ERR, ERR, "tx_ring align check failed: %u bytes "
+				     "at %p\n", tx_ring->size, tx_ring->desc);
+		/* Try again, without freeing the previous */
+		tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+						     &tx_ring->dma);
+		/* Failed allocation, critical failure */
+		if (!tx_ring->desc) {
+			pci_free_consistent(pdev, tx_ring->size, olddesc,
+					    olddma);
+			goto setup_tx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, tx_ring->desc,
+					   tx_ring->size)) {
+			/* give up */
+			pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
+					    tx_ring->dma);
+			pci_free_consistent(pdev, tx_ring->size, olddesc,
+					    olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the transmit descriptor ring\n");
+			vfree(tx_ring->buffer_info);
+			return -ENOMEM;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, tx_ring->size, olddesc,
+					    olddma);
+		}
+	}
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	rtdm_lock_init(&tx_ring->tx_lock);
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * @adapter: board private structure
+ *
+ * this allocates tx resources for all queues, return 0 on success, negative
+ * on failure
+ **/
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_tx_resources(adapter,
+							&adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	u64 tdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
+	int i;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		tdba = adapter->tx_ring[i].dma;
+		tdlen = adapter->tx_ring[i].count * sizeof(struct e1000_tx_desc);
+		E1000_WRITE_REG(hw, E1000_TDBAL(i), (tdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, E1000_TDBAH(i), (tdba >> 32));
+		E1000_WRITE_REG(hw, E1000_TDLEN(i), tdlen);
+		E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+		E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+		adapter->tx_ring[i].tdh = E1000_REGISTER(hw, E1000_TDH(i));
+		adapter->tx_ring[i].tdt = E1000_REGISTER(hw, E1000_TDT(i));
+	}
+
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+	if (adapter->hw.mac.type <= e1000_82547_rev_2 &&
+	    (hw->phy.media_type == e1000_media_type_fiber ||
+	     hw->phy.media_type == e1000_media_type_internal_serdes))
+		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+	else
+		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+	switch (hw->mac.type) {
+	case e1000_82542:
+		tipg = DEFAULT_82542_TIPG_IPGT;
+		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+		break;
+	case e1000_80003es2lan:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+		break;
+	default:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+		break;
+	}
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+
+	E1000_WRITE_REG(hw, E1000_TIDV, adapter->tx_int_delay);
+	if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION)
+		E1000_WRITE_REG(hw, E1000_TADV, adapter->tx_abs_int_delay);
+
+	/* Program the Transmit Control Register */
+
+	tctl = E1000_READ_REG(hw, E1000_TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	if (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572) {
+		tarc = E1000_READ_REG(hw, E1000_TARC(0));
+		/* set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later */
+#define SPEED_MODE_BIT (1 << 21)
+		tarc |= SPEED_MODE_BIT;
+		E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
+	} else if (hw->mac.type == e1000_80003es2lan) {
+		tarc = E1000_READ_REG(hw, E1000_TARC(0));
+		tarc |= 1;
+		E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
+		tarc = E1000_READ_REG(hw, E1000_TARC(1));
+		tarc |= 1;
+		E1000_WRITE_REG(hw, E1000_TARC(1), tarc);
+	}
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+	if (hw->mac.type < e1000_82543)
+		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+	else
+		adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	/* Cache if we're 82544 running in PCI-X because we'll
+	 * need this to apply a workaround later in the send path. */
+	if (hw->mac.type == e1000_82544 &&
+	    hw->bus.type == e1000_bus_type_pcix)
+		adapter->pcix_82544 = 1;
+
+	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
+	rx_ring->buffer_info = vmalloc(size);
+	if (!rx_ring->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rx_ring->buffer_info, 0, size);
+
+	rx_ring->ps_page = kcalloc(rx_ring->count, sizeof(struct e1000_ps_page),
+				   GFP_KERNEL);
+	if (!rx_ring->ps_page) {
+		vfree(rx_ring->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	rx_ring->ps_page_dma = kcalloc(rx_ring->count,
+				       sizeof(struct e1000_ps_page_dma),
+				       GFP_KERNEL);
+	if (!rx_ring->ps_page_dma) {
+		vfree(rx_ring->buffer_info);
+		kfree(rx_ring->ps_page);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	if (adapter->hw.mac.type <= e1000_82547_rev_2)
+		desc_len = sizeof(struct e1000_rx_desc);
+	else
+		desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+
+	rx_ring->size = rx_ring->count * desc_len;
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+					     &rx_ring->dma);
+
+	if (!rx_ring->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+		vfree(rx_ring->buffer_info);
+		kfree(rx_ring->ps_page);
+		kfree(rx_ring->ps_page_dma);
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, rx_ring->desc, rx_ring->size)) {
+		void *olddesc = rx_ring->desc;
+		dma_addr_t olddma = rx_ring->dma;
+		DPRINTK(RX_ERR, ERR, "rx_ring align check failed: %u bytes "
+				     "at %p\n", rx_ring->size, rx_ring->desc);
+		/* Try again, without freeing the previous */
+		rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+						     &rx_ring->dma);
+		/* Failed allocation, critical failure */
+		if (!rx_ring->desc) {
+			pci_free_consistent(pdev, rx_ring->size, olddesc,
+					    olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, rx_ring->desc,
+					   rx_ring->size)) {
+			/* give up */
+			pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
+					    rx_ring->dma);
+			pci_free_consistent(pdev, rx_ring->size, olddesc,
+					    olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, rx_ring->size, olddesc,
+					    olddma);
+		}
+	}
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	/* set up ring defaults */
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->rx_skb_top = NULL;
+	rx_ring->adapter = adapter;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * @adapter: board private structure
+ *
+ * this allocates rx resources for all queues, return 0 on success, negative
+ * on failure
+ **/
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_rx_resources(adapter,
+							&adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	u32 rctl, rfctl;
+	u32 psrctl = 0;
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+	u32 pages = 0;
+#endif
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* disable the stripping of CRC because it breaks
+	 * BMC firmware connected over SMBUS
+	if (adapter->hw.mac.type > e1000_82543)
+		rctl |= E1000_RCTL_SECRC;
+	*/
+
+	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
+		rctl |= E1000_RCTL_SBP;
+	else
+		rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case E1000_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case E1000_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+	}
+
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+	/* 82571 and greater support packet-split where the protocol
+	 * header is placed in skb->data and the packet data is
+	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
+	 * In the case of a non-split, skb->data is linearly filled,
+	 * followed by the page buffers.  Therefore, skb->data is
+	 * sized to hold the largest protocol header.
+	 */
+	/* allocations using alloc_page take too long for regular MTU
+	 * so only enable packet split for jumbo frames */
+	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+	if ((adapter->hw.mac.type >= e1000_82571) && (pages <= 3) &&
+	    PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
+		adapter->rx_ps_pages = pages;
+	else
+		adapter->rx_ps_pages = 0;
+#endif
+
+	if (adapter->rx_ps_pages) {
+		/* Configure extra packet-split registers */
+		rfctl = E1000_READ_REG(&adapter->hw, E1000_RFCTL);
+		rfctl |= E1000_RFCTL_EXTEN;
+		/* disable packet split support for IPv6 extension headers,
+		 * because some malformed IPv6 headers can hang the RX */
+		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+			  E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
+		E1000_WRITE_REG(&adapter->hw, E1000_RFCTL, rfctl);
+
+		/* disable the stripping of CRC because it breaks
+		 * BMC firmware connected over SMBUS */
+		rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
+
+		psrctl |= adapter->rx_ps_bsize0 >>
+			E1000_PSRCTL_BSIZE0_SHIFT;
+
+		switch (adapter->rx_ps_pages) {
+		case 3:
+			psrctl |= PAGE_SIZE <<
+				E1000_PSRCTL_BSIZE3_SHIFT;
+			fallthrough;
+		case 2:
+			psrctl |= PAGE_SIZE <<
+				E1000_PSRCTL_BSIZE2_SHIFT;
+			fallthrough;
+		case 1:
+			psrctl |= PAGE_SIZE >>
+				E1000_PSRCTL_BSIZE1_SHIFT;
+			break;
+		}
+
+		E1000_WRITE_REG(&adapter->hw, E1000_PSRCTL, psrctl);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	adapter->flags &= ~E1000_FLAG_RX_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	u64 rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
+	int i;
+
+	if (adapter->rx_ps_pages) {
+		/* this is a 32 byte descriptor */
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(union e1000_rx_desc_packet_split);
+		adapter->clean_rx = e1000_clean_rx_irq_ps;
+		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+#ifdef CONFIG_E1000_NAPI
+	} else if (adapter->netdev->mtu > MAXIMUM_ETHERNET_VLAN_SIZE) {
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(struct e1000_rx_desc);
+		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+#endif
+	} else {
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(struct e1000_rx_desc);
+		adapter->clean_rx = e1000_clean_rx_irq;
+		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+	}
+
+	/* disable receives while setting up the descriptors */
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	E1000_WRITE_FLUSH(hw);
+	mdelay(10);
+
+	/* set the Receive Delay Timer Register */
+	E1000_WRITE_REG(hw, E1000_RDTR, adapter->rx_int_delay);
+
+	if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) {
+		E1000_WRITE_REG(hw, E1000_RADV, adapter->rx_abs_int_delay);
+		if (adapter->itr_setting != 0)
+			E1000_WRITE_REG(hw, E1000_ITR,
+				1000000000 / (adapter->itr * 256));
+	}
+
+	if (hw->mac.type >= e1000_82571) {
+		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		/* Reset delay timers after every interrupt */
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+#ifdef CONFIG_E1000_NAPI
+		/* Auto-Mask interrupts upon ICR access */
+		ctrl_ext |= E1000_CTRL_EXT_IAME;
+		E1000_WRITE_REG(hw, E1000_IAM, 0xffffffff);
+#endif
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rdba = adapter->rx_ring[i].dma;
+		E1000_WRITE_REG(hw, E1000_RDBAL(i), (rdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, E1000_RDBAH(i), (rdba >> 32));
+		E1000_WRITE_REG(hw, E1000_RDLEN(i), rdlen);
+		E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+		E1000_WRITE_REG(hw, E1000_RDT(i), 0);
+		adapter->rx_ring[i].rdh = E1000_REGISTER(hw, E1000_RDH(i));
+		adapter->rx_ring[i].rdt = E1000_REGISTER(hw, E1000_RDT(i));
+	}
+
+#ifdef CONFIG_E1000_MQ
+	if (adapter->num_rx_queues > 1) {
+		u32 random[10];
+		u32 reta, mrqc;
+		int i;
+
+		get_random_bytes(&random[0], 40);
+
+		switch (adapter->num_rx_queues) {
+		default:
+			reta = 0x00800080;
+			mrqc = E1000_MRQC_ENABLE_RSS_2Q;
+			break;
+		}
+
+		/* Fill out redirection table */
+		for (i = 0; i < 32; i++)
+			E1000_WRITE_REG_ARRAY(hw, E1000_RETA, i, reta);
+		/* Fill out hash function seeds */
+		for (i = 0; i < 10; i++)
+			E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK, i, random[i]);
+
+		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+			 E1000_MRQC_RSS_FIELD_IPV4_TCP);
+
+		E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
+		/* Multiqueue and packet checksumming are mutually exclusive. */
+		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		rxcsum |= E1000_RXCSUM_PCSD;
+		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+	} else if (hw->mac.type >= e1000_82543) {
+#else
+	if (hw->mac.type >= e1000_82543) {
+#endif /* CONFIG_E1000_MQ */
+		/* Enable 82543 Receive Checksum Offload for TCP and UDP */
+		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		if (adapter->rx_csum == TRUE) {
+			rxcsum |= E1000_RXCSUM_TUOFL;
+
+			/* Enable 82571 IPv4 payload checksum for UDP fragments
+			 * Must be used in conjunction with packet-split. */
+			if ((hw->mac.type >= e1000_82571) &&
+			    (adapter->rx_ps_pages)) {
+				rxcsum |= E1000_RXCSUM_IPPCSE;
+			}
+		} else {
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+			/* don't need to clear IPPCSE as it defaults to 0 */
+		}
+		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+	}
+
+	/* Enable early receives on supported devices, only takes effect when
+	 * packet size is equal or larger than the specified value (in 8 byte
+	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */
+	if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan) &&
+	    (adapter->netdev->mtu > ETH_DATA_LEN))
+		E1000_WRITE_REG(hw, E1000_ERT, E1000_ERT_2048);
+
+	/* Enable Receives */
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+					     struct e1000_buffer *buffer_info)
+{
+	if (buffer_info->dma) {
+		pci_unmap_page(adapter->pdev,
+				buffer_info->dma,
+				buffer_info->length,
+				PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
+	if (buffer_info->skb) {
+		kfree_rtskb(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->last_tx_tso = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->tdh);
+	writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+	kfree(rx_ring->ps_page);
+	rx_ring->ps_page = NULL;
+	kfree(rx_ring->ps_page_dma);
+	rx_ring->ps_page_dma = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring)
+{
+	struct e1000_rx_buffer *buffer_info;
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i, j;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->dma &&
+		    adapter->clean_rx == e1000_clean_rx_irq) {
+			pci_unmap_single(pdev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
+#ifdef CONFIG_E1000_NAPI
+		} else if (buffer_info->dma &&
+			   adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
+			pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
+				       PCI_DMA_FROMDEVICE);
+#endif
+		} else if (buffer_info->dma &&
+			   adapter->clean_rx == e1000_clean_rx_irq_ps) {
+			pci_unmap_single(pdev, buffer_info->dma,
+					 adapter->rx_ps_bsize0,
+					 PCI_DMA_FROMDEVICE);
+		}
+		buffer_info->dma = 0;
+		if (buffer_info->page) {
+			put_page(buffer_info->page);
+			buffer_info->page = NULL;
+		}
+		if (buffer_info->skb) {
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+		ps_page = &rx_ring->ps_page[i];
+		ps_page_dma = &rx_ring->ps_page_dma[i];
+		for (j = 0; j < adapter->rx_ps_pages; j++) {
+			if (!ps_page->ps_page[j]) break;
+			pci_unmap_page(pdev,
+				       ps_page_dma->ps_page_dma[j],
+				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			ps_page_dma->ps_page_dma[j] = 0;
+			put_page(ps_page->ps_page[j]);
+			ps_page->ps_page[j] = NULL;
+		}
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	/* there also may be some cached data from a chained receive */
+	if (rx_ring->rx_skb_top) {
+		kfree_rtskb(rx_ring->rx_skb_top);
+		rx_ring->rx_skb_top = NULL;
+	}
+#endif
+
+	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+	size = sizeof(struct e1000_ps_page) * rx_ring->count;
+	memset(rx_ring->ps_page, 0, size);
+	size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
+	memset(rx_ring->ps_page_dma, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->rdh);
+	writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+#if 0
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	if (adapter->hw.mac.type != e1000_82542)
+		return;
+	if (adapter->hw.revision_id != E1000_REVISION_2)
+		return;
+
+	e1000_pci_clear_mwi(&adapter->hw);
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	rctl |= E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (rtnetif_running(netdev))
+		e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	if (adapter->hw.mac.type != e1000_82542)
+		return;
+	if (adapter->hw.revision_id != E1000_REVISION_2)
+		return;
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	rctl &= ~E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (adapter->hw.bus.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+		e1000_pci_set_mwi(&adapter->hw);
+
+	if (rtnetif_running(netdev)) {
+		/* No need to loop, because 82542 supports only 1 queue */
+		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
+		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+	}
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (adapter->hw.mac.type == e1000_82542)
+		e1000_enter_82542_rst(adapter);
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+	/* With 82571 controllers, LAA may be overwritten (with the default)
+	 * due to controller reset from the other port. */
+	if (adapter->hw.mac.type == e1000_82571) {
+		/* activate the work around */
+		e1000_set_laa_state_82571(&adapter->hw, TRUE);
+
+		/* Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered  and the time it
+		 * gets fixed (in e1000_watchdog), the actual LAA is in one
+		 * of the RARs and no incoming packets directed to this port
+		 * are dropped. Eventually the LAA will be in RAR[0] and
+		 * RAR[14] */
+		e1000_rar_set(&adapter->hw,
+			      adapter->hw.mac.addr,
+			      adapter->hw.mac.rar_entry_count - 1);
+	}
+
+	if (adapter->hw.mac.type == e1000_82542)
+		e1000_leave_82542_rst(adapter);
+
+	return 0;
+}
+#endif
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000_set_multi(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= E1000_RCTL_MPE;
+		rctl &= ~E1000_RCTL_UPE;
+	} else {
+		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+	}
+
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     phy_info_task.work);
+	e1000_get_phy_info(&adapter->hw);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     fifo_stall_task.work);
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl;
+
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
+		    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
+		   (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
+		    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
+		   (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
+		    E1000_READ_REG(&adapter->hw, E1000_TDFHS))) {
+			tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+			E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+					tctl & ~E1000_TCTL_EN);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+			E1000_WRITE_FLUSH(&adapter->hw);
+
+			adapter->tx_fifo_head = 0;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+			rtnetif_wake_queue(netdev);
+		} else if (!test_bit(__E1000_DOWN, &adapter->state))
+			schedule_delayed_work(&adapter->fifo_stall_task, 1);
+	}
+}
+
+static bool e1000_has_link(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = FALSE;
+	s32 ret_val = 0;
+
+	/* get_link_status is set on LSC (link status) interrupt or
+	 * rx sequence error interrupt.  get_link_status will stay
+	 * false until the e1000_check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (hw->mac.get_link_status) {
+			ret_val = e1000_check_for_link(hw);
+			link_active = !hw->mac.get_link_status;
+		} else {
+			link_active = TRUE;
+		}
+		break;
+	case e1000_media_type_fiber:
+		ret_val = e1000_check_for_link(hw);
+		link_active = !!(E1000_READ_REG(hw, E1000_STATUS) &
+				 E1000_STATUS_LU);
+		break;
+	case e1000_media_type_internal_serdes:
+		ret_val = e1000_check_for_link(hw);
+		link_active = adapter->hw.mac.serdes_has_link;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	    (E1000_READ_REG(&adapter->hw, E1000_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+		DPRINTK(LINK, INFO,
+			"Gigabit has been disabled, downgrading speed\n");
+	}
+
+	return link_active;
+}
+
+static void e1000_enable_receives(struct e1000_adapter *adapter)
+{
+	/* make sure the receive unit is started */
+	if ((adapter->flags & E1000_FLAG_RX_NEEDS_RESTART) &&
+	    (adapter->flags & E1000_FLAG_RX_RESTART_NOW)) {
+		struct e1000_hw *hw = &adapter->hw;
+		u32 rctl = E1000_READ_REG(hw, E1000_RCTL);
+		E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
+		adapter->flags &= ~E1000_FLAG_RX_RESTART_NOW;
+	}
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     watchdog_task.work);
+
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_tx_ring *tx_ring;
+	u32 link, tctl;
+	int i, tx_pending = 0;
+
+	link = e1000_has_link(adapter);
+	if ((rtnetif_carrier_ok(netdev)) && link) {
+		e1000_enable_receives(adapter);
+		goto link_up;
+	}
+
+	if (mac->type == e1000_82573) {
+		e1000_enable_tx_pkt_filtering(&adapter->hw);
+#ifdef NETIF_F_HW_VLAN_TX
+		if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
+			e1000_update_mng_vlan(adapter);
+#endif
+	}
+
+	if (link) {
+		if (!rtnetif_carrier_ok(netdev)) {
+			u32 ctrl;
+			bool txb2b = 1;
+#ifdef SIOCGMIIPHY
+			/* update snapshot of PHY registers on LSC */
+			e1000_phy_read_status(adapter);
+#endif
+			e1000_get_speed_and_duplex(&adapter->hw,
+						   &adapter->link_speed,
+						   &adapter->link_duplex);
+
+			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
+				"Flow Control: %s\n",
+				adapter->link_speed,
+				adapter->link_duplex == FULL_DUPLEX ?
+				"Full Duplex" : "Half Duplex",
+				((ctrl & E1000_CTRL_TFCE) && (ctrl &
+				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+				E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+			/* tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor */
+			//netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = 0;
+				//netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 16;
+				break;
+			case SPEED_100:
+				txb2b = 0;
+				//netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			if ((mac->type == e1000_82571 ||
+			     mac->type == e1000_82572) &&
+			    txb2b == 0) {
+				u32 tarc0;
+				tarc0 = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
+				tarc0 &= ~SPEED_MODE_BIT;
+				E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc0);
+			}
+
+#ifdef NETIF_F_TSO
+			/* disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues */
+			if (!(adapter->flags & E1000_FLAG_TSO_FORCE) &&
+			    adapter->hw.bus.type == e1000_bus_type_pci_express){
+				switch (adapter->link_speed) {
+				case SPEED_10:
+				case SPEED_100:
+					DPRINTK(PROBE,INFO,
+					"10/100 speed: disabling TSO\n");
+					netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					netdev->features &= ~NETIF_F_TSO6;
+#endif
+					break;
+				case SPEED_1000:
+					netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					netdev->features |= NETIF_F_TSO6;
+#endif
+					break;
+				default:
+					/* oops */
+					break;
+				}
+			}
+#endif
+
+			/* enable transmits in the hardware, need to do this
+			 * after setting TARC0 */
+			tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+			tctl |= E1000_TCTL_EN;
+			E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+
+			rtnetif_carrier_on(netdev);
+			rtnetif_wake_queue(netdev);
+#ifdef CONFIG_E1000_MQ
+			if (netif_is_multiqueue(netdev))
+				for (i = 0; i < adapter->num_tx_queues; i++)
+					netif_wake_subqueue(netdev, i);
+#endif
+
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				schedule_delayed_work(&adapter->phy_info_task,
+						      2 * HZ);
+			adapter->smartspeed = 0;
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
+			rtnetif_carrier_off(netdev);
+			rtnetif_stop_queue(netdev);
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				schedule_delayed_work(&adapter->phy_info_task,
+						      2 * HZ);
+
+			/* 80003ES2LAN workaround--
+			 * For packet buffer work-around on link down event;
+			 * disable receives in the ISR and
+			 * reset device here in the watchdog
+			 */
+			if (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)
+				/* reset device */
+				schedule_work(&adapter->reset_task);
+		}
+
+		e1000_smartspeed(adapter);
+	}
+
+link_up:
+	e1000_update_stats(adapter);
+
+	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+	adapter->gorc_old = adapter->stats.gorc;
+	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+	adapter->gotc_old = adapter->stats.gotc;
+
+	e1000_update_adaptive(&adapter->hw);
+
+	if (!rtnetif_carrier_ok(netdev)) {
+		for (i = 0 ; i < adapter->num_tx_queues ; i++) {
+			tx_ring = &adapter->tx_ring[i];
+			tx_pending |= (E1000_DESC_UNUSED(tx_ring) + 1 <
+							       tx_ring->count);
+		}
+		if (tx_pending) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+		}
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = TRUE;
+
+	/* With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
+		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+	/* Reschedule the task */
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see e1000_param.c)
+ **/
+#if 0
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+				     u16 itr_setting, int packets,
+				     int bytes)
+{
+	unsigned int retval = itr_setting;
+
+	if (unlikely(!(adapter->flags & E1000_FLAG_HAS_INTR_MODERATION)))
+		goto update_itr_done;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+	switch (itr_setting) {
+	case lowest_latency:
+		/* handle TSO and jumbo frames */
+		if (bytes/packets > 8000)
+			retval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512)) {
+			retval = low_latency;
+		}
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* this if handles the TSO accounting */
+			if (bytes/packets > 8000) {
+				retval = bulk_latency;
+			} else if ((packets < 10) || ((bytes/packets) > 1200)) {
+				retval = bulk_latency;
+			} else if ((packets > 35)) {
+				retval = lowest_latency;
+			}
+		} else if (bytes/packets > 2000) {
+			retval = bulk_latency;
+		} else if (packets <= 2 && bytes < 512) {
+			retval = lowest_latency;
+		}
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35) {
+				retval = low_latency;
+			}
+		} else if (bytes < 6000) {
+			retval = low_latency;
+		}
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+#endif
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+#ifdef NETIF_F_TSO
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	u32 cmd_length = 0;
+	u16 ipcse = 0, tucse, mss;
+	u8 ipcss, ipcso, tucss, tucso, hdr_len;
+	int err;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		mss = skb_shinfo(skb)->gso_size;
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			cmd_length = E1000_TXD_CMD_IP;
+			ipcse = skb_transport_offset(skb) - 1;
+#ifdef NETIF_F_TSO6
+		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+			ipcse = 0;
+#endif
+		}
+		ipcss = skb_network_offset(skb);
+		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+		tucss = skb_transport_offset(skb);
+		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+		tucse = 0;
+
+		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+		i = tx_ring->next_to_use;
+		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+		buffer_info = &tx_ring->buffer_info[i];
+
+		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
+		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
+		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+		context_desc->upper_setup.tcp_fields.tucss = tucss;
+		context_desc->upper_setup.tcp_fields.tucso = tucso;
+		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
+
+		if (++i == tx_ring->count) i = 0;
+		tx_ring->next_to_use = i;
+
+		return TRUE;
+	}
+#endif
+
+	return FALSE;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring,
+			       struct sk_buff *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	// u8 css;
+	u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+		return FALSE;
+
+	switch (skb->protocol) {
+	case __constant_htons(ETH_P_IP):
+		break;
+	default:
+		if (unlikely(net_ratelimit())) {
+			DPRINTK(PROBE, WARNING, "checksum_partial proto=%x!\n",
+				skb->protocol);
+		}
+		break;
+	}
+
+	// css = skb_transport_offset(skb);
+
+	i = tx_ring->next_to_use;
+	buffer_info = &tx_ring->buffer_info[i];
+	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+	context_desc->lower_setup.ip_config = 0;
+	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
+
+	if (unlikely(++i == tx_ring->count)) i = 0;
+	tx_ring->next_to_use = i;
+
+	return TRUE;
+}
+
+#define E1000_MAX_TXD_PWR	12
+#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+			struct e1000_tx_ring *tx_ring,
+			struct sk_buff *skb, unsigned int first,
+			unsigned int max_per_txd, unsigned int nr_frags,
+			unsigned int mss)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned int len = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+#ifdef MAX_SKB_FRAGS
+	unsigned int f;
+	len -= skb->data_len;
+#endif
+
+	i = tx_ring->next_to_use;
+
+	while (len) {
+		buffer_info = &tx_ring->buffer_info[i];
+		size = min(len, max_per_txd);
+#ifdef NETIF_F_TSO
+		/* Workaround for Controller erratum --
+		 * descriptor for non-tso packet in a linear SKB that follows a
+		 * tso gets written back prematurely before the data is fully
+		 * DMA'd to the controller */
+		if (tx_ring->last_tx_tso && !skb_is_gso(skb)) {
+			tx_ring->last_tx_tso = 0;
+			if (!skb->data_len)
+				size -= 4;
+		}
+
+		/* Workaround for premature desc write-backs
+		 * in TSO mode.  Append 4-byte sentinel desc */
+		if (unlikely(mss && !nr_frags && size == len && size > 8))
+			size -= 4;
+#endif
+		/* work-around for errata 10 and it applies
+		 * to all controllers in PCI-X mode
+		 * The fix is to make sure that the first descriptor of a
+		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+		 */
+		if (unlikely((adapter->hw.bus.type == e1000_bus_type_pcix) &&
+				(size > 2015) && count == 0))
+			size = 2015;
+
+		/* Workaround for potential 82544 hang in PCI-X.  Avoid
+		 * terminating buffers within evenly-aligned dwords. */
+		if (unlikely(adapter->pcix_82544 &&
+		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+		   size > 4))
+			size -= 4;
+
+		buffer_info->length = size;
+		/* set time_stamp *before* dma to help avoid a possible race */
+		buffer_info->time_stamp = jiffies;
+		buffer_info->dma =
+			pci_map_single(adapter->pdev,
+				skb->data + offset,
+				size,
+				PCI_DMA_TODEVICE);
+		buffer_info->next_to_watch = i;
+
+		len -= size;
+		offset += size;
+		count++;
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+#ifdef MAX_SKB_FRAGS
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = frag->size;
+		offset = frag->page_offset;
+
+		while (len) {
+			buffer_info = &tx_ring->buffer_info[i];
+			size = min(len, max_per_txd);
+#ifdef NETIF_F_TSO
+			/* Workaround for premature desc write-backs
+			 * in TSO mode.  Append 4-byte sentinel desc */
+			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+				size -= 4;
+#endif
+			/* Workaround for potential 82544 hang in PCI-X.
+			 * Avoid terminating buffers within evenly-aligned
+			 * dwords. */
+			if (unlikely(adapter->pcix_82544 &&
+			   !((unsigned long)(frag->page+offset+size-1) & 4) &&
+			   size > 4))
+				size -= 4;
+
+			buffer_info->length = size;
+			buffer_info->time_stamp = jiffies;
+			buffer_info->dma =
+				pci_map_page(adapter->pdev,
+					frag->page,
+					offset,
+					size,
+					PCI_DMA_TODEVICE);
+			buffer_info->next_to_watch = i;
+
+			len -= size;
+			offset += size;
+			count++;
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+	}
+#endif
+
+	i = (i == 0) ? tx_ring->count - 1 : i - 1;
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+			   struct e1000_tx_ring *tx_ring,
+			   int tx_flags, int count, nanosecs_abs_t *xmit_stamp)
+{
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+    rtdm_lockctx_t context;
+
+	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+			     E1000_TXD_CMD_TSE;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+	}
+
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+		txd_lower |= E1000_TXD_CMD_VLE;
+		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+	}
+
+	i = tx_ring->next_to_use;
+
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+    rtdm_lock_irqsave(context);
+
+    if (xmit_stamp)
+	*xmit_stamp = cpu_to_be64(rtdm_clock_read() + *xmit_stamp);
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+
+    rtdm_lock_irqrestore(context);
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems */
+	mmiowb();
+}
+
+#define E1000_FIFO_HDR			0x10
+#define E1000_82547_PAD_LEN		0x3E0
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time.  This gives the Tx FIFO an opportunity to
+ * flush all packets.  When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct sk_buff *skb)
+{
+	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+	if (adapter->link_duplex != HALF_DUPLEX)
+		goto no_fifo_stall_required;
+
+	if (atomic_read(&adapter->tx_fifo_stall))
+		return 1;
+
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+		atomic_set(&adapter->tx_fifo_stall, 1);
+		return 1;
+	}
+
+no_fifo_stall_required:
+	adapter->tx_fifo_head += skb_fifo_len;
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+		adapter->tx_fifo_head -= adapter->tx_fifo_size;
+	return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+				    struct sk_buff *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	u16 length, offset;
+#ifdef NETIF_F_HW_VLAN_TX
+	if (vlan_tx_tag_present(skb)) {
+		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
+		    && (adapter->hw.mng_cookie.status &
+			E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+			return 0;
+	}
+#endif
+	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+		struct ethhdr *eth = (struct ethhdr *) skb->data;
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
+				(struct iphdr *)((u8 *)skb->data+14);
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((u8 *)ip +
+						(ip->ihl << 2));
+				if (ntohs(udp->dest) == 67) {
+					offset = (u8 *)udp + 8 - skb->data;
+					length = skb->len - offset;
+
+					return e1000_mng_write_dhcp_info(hw,
+							(u8 *)udp + 8,
+							length);
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev,
+				 struct e1000_tx_ring *tx_ring, int size)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	rtnetif_stop_queue(netdev);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! */
+	rtnetif_start_queue(netdev);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+			       struct e1000_tx_ring *tx_ring, int size)
+{
+	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __e1000_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame_ring(struct sk_buff *skb,
+				 struct net_device *netdev,
+				 struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+	unsigned int tx_flags = 0;
+	unsigned int len = skb->len;
+	unsigned long irq_flags;
+	unsigned int nr_frags = 0;
+	unsigned int mss = 0;
+	int count = 0;
+	int tso;
+#ifdef MAX_SKB_FRAGS
+	unsigned int f;
+	len -= skb->data_len;
+#endif
+
+	if (test_bit(__E1000_DOWN, &adapter->state)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(skb->len <= 0)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+
+	/* 82571 and newer doesn't need the workaround that limited descriptor
+	 * length to 4kB */
+	if (adapter->hw.mac.type >= e1000_82571)
+		max_per_txd = 8192;
+
+#ifdef NETIF_F_TSO
+	mss = skb_shinfo(skb)->gso_size;
+	/* The controller does a simple calculation to
+	 * make sure there is enough room in the FIFO before
+	 * initiating the DMA for each buffer.  The calc is:
+	 * 4 = ceil(buffer len/mss).  To make sure we don't
+	 * overrun the FIFO, adjust the max buffer len if mss
+	 * drops. */
+	if (mss) {
+		u8 hdr_len;
+		max_per_txd = min(mss << 2, max_per_txd);
+		max_txd_pwr = fls(max_per_txd) - 1;
+
+		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+		* points to just header, pull a few bytes of payload from
+		* frags into skb->data */
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
+			switch (adapter->hw.mac.type) {
+				unsigned int pull_size;
+			case e1000_82544:
+				/* Make sure we have room to chop off 4 bytes,
+				 * and that the end alignment will work out to
+				 * this hardware's requirements
+				 * NOTE: this is a TSO only workaround
+				 * if end byte alignment not correct move us
+				 * into the next dword */
+				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+					break;
+				fallthrough;
+			case e1000_82571:
+			case e1000_82572:
+			case e1000_82573:
+			case e1000_ich8lan:
+			case e1000_ich9lan:
+				pull_size = min((unsigned int)4, skb->data_len);
+				if (!__pskb_pull_tail(skb, pull_size)) {
+					DPRINTK(DRV, ERR,
+						"__pskb_pull_tail failed.\n");
+					kfree_rtskb(skb);
+					return NETDEV_TX_OK;
+				}
+				len = skb->len - skb->data_len;
+				break;
+			default:
+				/* do nothing */
+				break;
+			}
+		}
+	}
+
+	/* reserve a descriptor for the offload context */
+	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+		count++;
+	count++;
+#else
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		count++;
+#endif
+
+#ifdef NETIF_F_TSO
+	/* Controller Erratum workaround */
+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+		count++;
+#endif
+
+	count += TXD_USE_COUNT(len, max_txd_pwr);
+
+	if (adapter->pcix_82544)
+		count++;
+
+	/* work-around for errata 10 and it applies to all controllers
+	 * in PCI-X mode, so add one more descriptor to the count
+	 */
+	if (unlikely((adapter->hw.bus.type == e1000_bus_type_pcix) &&
+			(len > 2015)))
+		count++;
+
+#ifdef MAX_SKB_FRAGS
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (f = 0; f < nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+				       max_txd_pwr);
+	if (adapter->pcix_82544)
+		count += nr_frags;
+
+#endif
+
+	if (adapter->hw.mac.tx_pkt_filtering &&
+	    (adapter->hw.mac.type == e1000_82573))
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	rtdm_lock_get_irqsave(&tx_ring->tx_lock, irq_flags);
+
+	/* need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time */
+	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
+		rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+		rtdm_printk("FATAL: rt_e1000 ran into tail close to head situation!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(adapter->hw.mac.type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+			rtnetif_stop_queue(netdev);
+			rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				schedule_delayed_work(&adapter->fifo_stall_task,
+						      1);
+		    rtdm_printk("FATAL: rt_e1000 ran into tail 82547 controller bug!\n");
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+#ifndef NETIF_F_LLTX
+	rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+		tx_flags |= E1000_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+	}
+#endif
+
+	first = tx_ring->next_to_use;
+
+	tso = e1000_tso(adapter, tx_ring, skb);
+	if (tso < 0) {
+		kfree_rtskb(skb);
+#ifdef NETIF_F_LLTX
+		rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+#endif
+		return NETDEV_TX_OK;
+	}
+
+	if (likely(tso)) {
+		tx_ring->last_tx_tso = 1;
+		tx_flags |= E1000_TX_FLAGS_TSO;
+	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+		tx_flags |= E1000_TX_FLAGS_CSUM;
+
+	/* Old method was to assume IPv4 packet by default if TSO was enabled.
+	 * 82571 hardware supports TSO capabilities for IPv6 as well...
+	 * no longer assume, we must. */
+	if (likely(skb->protocol == htons(ETH_P_IP)))
+		tx_flags |= E1000_TX_FLAGS_IPV4;
+
+	e1000_tx_queue(adapter, tx_ring, tx_flags,
+		       e1000_tx_map(adapter, tx_ring, skb, first,
+				    max_per_txd, nr_frags, mss),
+		   skb->xmit_stamp);
+
+	// netdev->trans_start = jiffies;
+
+	/* Make sure there is space in the ring for the next send. */
+	// e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+
+#ifdef NETIF_F_LLTX
+	rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+#endif
+	return NETDEV_TX_OK;
+}
+
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	return (e1000_xmit_frame_ring(skb, netdev, tx_ring));
+}
+
+#ifdef CONFIG_E1000_MQ
+static int e1000_subqueue_xmit_frame(struct sk_buff *skb,
+				     struct net_device *netdev, int queue)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_tx_ring *tx_ring = &adapter->tx_ring[queue];
+
+	return (e1000_xmit_frame_ring(skb, netdev, tx_ring));
+}
+#endif
+
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+#if 0
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	adapter->tx_timeout_count++;
+	schedule_work(&adapter->reset_task);
+}
+#endif
+
+static void e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter;
+	adapter = container_of(work, struct e1000_adapter, reset_task);
+
+	e1000_reinit_locked(adapter);
+}
+
+#if 0
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETHERNET_FCS_SIZE;
+	u16 eeprom_data = 0;
+
+	if ((max_frame < ETH_ZLEN + ETHERNET_FCS_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+	/* Adapter-specific max frame size limits. */
+	switch (adapter->hw.mac.type) {
+	case e1000_undefined:
+	case e1000_82542:
+	case e1000_ich8lan:
+		if (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	case e1000_82573:
+		/* Jumbo Frames not supported if:
+		 * - this is not an 82573L device
+		 * - ASPM is enabled in any way (0x1A bits 3:2) */
+		e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, &eeprom_data);
+		if ((adapter->hw.device_id != E1000_DEV_ID_82573L) ||
+		    (eeprom_data & NVM_WORD1A_ASPM_MASK)) {
+			if (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+				DPRINTK(PROBE, ERR,
+					"Jumbo Frames not supported.\n");
+				return -EINVAL;
+			}
+			break;
+		}
+		/* ERT will be enabled later to enable wire speed receives */
+
+		/* fall through to get support */
+	case e1000_ich9lan:
+		if ((adapter->hw.phy.type == e1000_phy_ife) &&
+		    (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE)) {
+			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+			return -EINVAL;
+		}
+		/* fall through to get support */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+		if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+			DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+		break;
+	}
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+	/* e1000_down has a dependency on max_frame_size */
+	adapter->max_frame_size = max_frame;
+	if (rtnetif_running(netdev))
+		e1000_down(adapter);
+
+	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	 * means we reserve 2 more, this pushes us to allocate from the next
+	 * larger slab size.
+	 * i.e. RXBUFFER_2048 --> size-4096 slab
+	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
+	 *  fragmented skbs */
+
+	if (max_frame <= E1000_RXBUFFER_256)
+		adapter->rx_buffer_len = E1000_RXBUFFER_256;
+	else if (max_frame <= E1000_RXBUFFER_512)
+		adapter->rx_buffer_len = E1000_RXBUFFER_512;
+	else if (max_frame <= E1000_RXBUFFER_1024)
+		adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+	else if (max_frame <= E1000_RXBUFFER_2048)
+		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+#ifdef CONFIG_E1000_NAPI
+	else
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+#else
+	else if (max_frame <= E1000_RXBUFFER_4096)
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+	else if (max_frame <= E1000_RXBUFFER_8192)
+		adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+	else if (max_frame <= E1000_RXBUFFER_16384)
+		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+#endif
+
+	/* adjust allocation if LPE protects us, and we aren't using SBP */
+	if (!e1000_tbi_sbp_enabled_82543(&adapter->hw) &&
+	    ((max_frame == ETH_FRAME_LEN + ETHERNET_FCS_SIZE) ||
+	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
+		netdev->mtu, new_mtu);
+	netdev->mtu = new_mtu;
+
+	if (rtnetif_running(netdev))
+		e1000_up(adapter);
+	else
+		e1000_reset(adapter);
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+
+	return 0;
+}
+#endif
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+}
+#ifdef SIOCGMIIPHY
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_phy_regs *phy = &adapter->phy_regs;
+	int ret_val = E1000_SUCCESS;
+	unsigned long irq_flags;
+
+
+	rtdm_lock_get_irqsave(&adapter->stats_lock, irq_flags);
+
+	if (E1000_READ_REG(hw, E1000_STATUS)& E1000_STATUS_LU) {
+		ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy->bmcr);
+		ret_val |= e1000_read_phy_reg(hw, PHY_STATUS, &phy->bmsr);
+		ret_val |= e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+					      &phy->advertise);
+		ret_val |= e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy->lpa);
+		ret_val |= e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
+					      &phy->expansion);
+		ret_val |= e1000_read_phy_reg(hw, PHY_1000T_CTRL,
+					      &phy->ctrl1000);
+		ret_val |= e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+					      &phy->stat1000);
+		ret_val |= e1000_read_phy_reg(hw, PHY_EXT_STATUS,
+					      &phy->estatus);
+		if (ret_val)
+			DPRINTK(DRV, WARNING, "Error reading PHY register\n");
+	} else {
+		/* Do not read PHY registers if link is not up
+		 * Set values to typical power-on defaults */
+		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+			     BMSR_ERCAP);
+		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+				  ADVERTISE_ALL | ADVERTISE_CSMA);
+		phy->lpa = 0;
+		phy->expansion = EXPANSION_ENABLENPAGE;
+		phy->ctrl1000 = ADVERTISE_1000FULL;
+		phy->stat1000 = 0;
+		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+	}
+
+	rtdm_lock_put_irqrestore(&adapter->stats_lock, irq_flags);
+}
+#endif
+
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr_msi(rtdm_irq_t *irq_handle)
+{
+    struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+#ifndef CONFIG_E1000_NAPI
+	int i, j;
+	int rx_cleaned, tx_cleaned;
+#endif
+	u32 icr = E1000_READ_REG(hw, E1000_ICR);
+    nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+
+#ifdef CONFIG_E1000_NAPI
+	/* read ICR disables interrupts using IAM, so keep up with our
+	 * enable/disable accounting */
+	atomic_inc(&adapter->irq_sem);
+#endif
+	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+		hw->mac.get_link_status = 1;
+		/* ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3) &&
+		    (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* 80003ES2LAN workaround-- For packet buffer work-around on
+		 * link down event; disable receives here in the ISR and reset
+		 * adapter in watchdog */
+		if (rtnetif_carrier_ok(netdev) &&
+		    (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)) {
+			/* disable receives */
+			u32 rctl = E1000_READ_REG(hw, E1000_RCTL);
+			E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= E1000_FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		//if (!test_bit(__E1000_DOWN, &adapter->state))
+		//	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	/* XXX only using ring 0 for napi */
+	if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+	} else {
+		atomic_dec(&adapter->irq_sem);
+	}
+#else
+	adapter->total_tx_bytes = 0;
+	adapter->total_rx_bytes = 0;
+	adapter->total_tx_packets = 0;
+	adapter->total_rx_packets = 0;
+    adapter->data_received = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++) {
+		rx_cleaned = 0;
+		for (j = 0; j < adapter->num_rx_queues; j++)
+			rx_cleaned |= adapter->clean_rx(adapter,
+							&adapter->rx_ring[j], &time_stamp);
+
+		tx_cleaned = 0;
+		for (j = 0 ; j < adapter->num_tx_queues ; j++)
+			tx_cleaned |= e1000_clean_tx_irq(adapter,
+							 &adapter->tx_ring[j]);
+
+		if (!rx_cleaned && !tx_cleaned)
+			break;
+	}
+
+	if (likely(adapter->itr_setting & 3))
+		e1000_set_itr(adapter);
+#endif
+
+	if (adapter->data_received)
+		rt_mark_stack_mgr(netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl, icr = E1000_READ_REG(hw, E1000_ICR);
+#ifndef CONFIG_E1000_NAPI
+	int i, j;
+	int rx_cleaned, tx_cleaned;
+#endif
+    nanosecs_abs_t time_stamp = rtdm_clock_read();
+	if (unlikely(!icr))
+		return RTDM_IRQ_NONE;  /* Not our interrupt */
+
+#ifdef CONFIG_E1000_NAPI
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt */
+	if ((adapter->flags & E1000_FLAG_INT_ASSERT_AUTO_MASK) &&
+	    !(icr & E1000_ICR_INT_ASSERTED))
+		return IRQ_NONE;
+
+	/* Interrupt Auto-Mask...upon reading ICR,
+	 * interrupts are masked.  No need for the
+	 * IMC write, but it does mean we should
+	 * account for it ASAP. */
+	if (likely(hw->mac.type >= e1000_82571))
+		atomic_inc(&adapter->irq_sem);
+#endif
+
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+		hw->mac.get_link_status = 1;
+		/* ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3) &&
+		    (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(netdev) &&
+		    (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)) {
+			/* disable receives */
+			rctl = E1000_READ_REG(hw, E1000_RCTL);
+			E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= E1000_FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		//if (!test_bit(__E1000_DOWN, &adapter->state))
+		//	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	if (hw->mac.type < e1000_82571) {
+		/* disable interrupts, without the synchronize_irq bit */
+		atomic_inc(&adapter->irq_sem);
+		E1000_WRITE_REG(hw, E1000_IMC, ~0);
+		E1000_WRITE_FLUSH(hw);
+	}
+	/* XXX only using ring 0 for napi */
+	if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+	} else {
+		atomic_dec(&adapter->irq_sem);
+	}
+#else
+	/* Writing IMC and IMS is needed for 82547.
+	 * Due to Hub Link bus being occupied, an interrupt
+	 * de-assertion message is not able to be sent.
+	 * When an interrupt assertion message is generated later,
+	 * two messages are re-ordered and sent out.
+	 * That causes APIC to think 82547 is in de-assertion
+	 * state, while 82547 is in assertion state, resulting
+	 * in dead lock. Writing IMC forces 82547 into
+	 * de-assertion state.
+	 */
+	if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2) {
+		atomic_inc(&adapter->irq_sem);
+		E1000_WRITE_REG(hw, E1000_IMC, ~0);
+	}
+
+    adapter->data_received = 0;
+	adapter->total_tx_bytes = 0;
+	adapter->total_rx_bytes = 0;
+	adapter->total_tx_packets = 0;
+	adapter->total_rx_packets = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++) {
+		rx_cleaned = 0;
+		for (j = 0; j < adapter->num_rx_queues; j++)
+			rx_cleaned |= adapter->clean_rx(adapter,
+							&adapter->rx_ring[j], &time_stamp);
+
+		tx_cleaned = 0;
+		for (j = 0 ; j < adapter->num_tx_queues ; j++)
+			tx_cleaned |= e1000_clean_tx_irq(adapter,
+							 &adapter->tx_ring[j]);
+
+		if (!rx_cleaned && !tx_cleaned)
+			break;
+	}
+
+	if (likely(adapter->itr_setting & 3))
+		e1000_set_itr(adapter);
+
+	if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2)
+		e1000_irq_enable(adapter);
+
+#endif
+
+	if (adapter->data_received)
+		rt_mark_stack_mgr(netdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+#ifdef CONFIG_E1000_NAPI
+/**
+ * e1000_poll - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @budget: amount of packets driver is allowed to process this poll
+ **/
+static int e1000_poll(struct napi_struct *napi, int budget)
+{
+	struct e1000_rx_ring *rx_ring = container_of(napi, struct e1000_rx_ring,
+						     napi);
+	struct e1000_adapter *adapter = rx_ring->adapter;
+	struct net_device *netdev = adapter->netdev;
+	int tx_clean_complete = 1, work_done = 0;
+	int i;
+
+	/* FIXME: i think this code is un-necessary when using base netdev */
+	/* Keep link state information with original netdev */
+	if (!rtnetif_carrier_ok(netdev))
+		goto quit_polling;
+
+	/* e1000_poll is called per-cpu.  This lock protects
+	 * tx_ring[i] from being cleaned by multiple cpus
+	 * simultaneously.  A failure obtaining the lock means
+	 * tx_ring[i] is currently being cleaned anyway. */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+#ifdef CONFIG_E1000_MQ
+		if (spin_trylock(&adapter->tx_ring[i].tx_queue_lock)) {
+			tx_clean_complete &= e1000_clean_tx_irq(adapter,
+							&adapter->tx_ring[i]);
+			spin_unlock(&adapter->tx_ring[i].tx_queue_lock);
+		}
+#else
+		if (spin_trylock(&adapter->tx_queue_lock)) {
+			tx_clean_complete &= e1000_clean_tx_irq(adapter,
+							&adapter->tx_ring[i]);
+			spin_unlock(&adapter->tx_queue_lock);
+		}
+#endif
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->clean_rx(adapter, &adapter->rx_ring[i],
+				  &work_done, budget);
+	}
+
+	/* If no Tx and not enough Rx work done, exit the polling mode */
+	if ((tx_clean_complete && (work_done == 0)) ||
+	   !rtnetif_running(netdev)) {
+quit_polling:
+		if (likely(adapter->itr_setting & 3))
+			e1000_set_itr(adapter);
+		netif_rx_complete(netdev, napi);
+		if (test_bit(__E1000_DOWN, &adapter->state))
+			atomic_dec(&adapter->irq_sem);
+		else
+			e1000_irq_enable(adapter);
+		return 0;
+	}
+
+	/* need to make sure the stack is aware of a tx-only poll loop */
+	if (!tx_clean_complete)
+		work_done = budget;
+
+	return work_done;
+}
+
+#endif
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+#ifdef CONFIG_E1000_NAPI
+	unsigned int count = 0;
+#endif
+	bool cleaned = FALSE;
+	bool retval = TRUE;
+	unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+		for (cleaned = FALSE; !cleaned; ) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+#ifdef CONFIG_E1000_MQ
+			tx_ring->tx_stats.bytes += buffer_info->length;
+#endif
+			if (cleaned) {
+				struct sk_buff *skb = buffer_info->skb;
+#ifdef NETIF_F_TSO
+				unsigned int segs, bytecount;
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+					    skb->len;
+				total_tx_packets += segs;
+				total_tx_bytes += bytecount;
+#else
+				total_tx_packets++;
+				total_tx_bytes += skb->len;
+#endif
+			}
+			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+
+#ifdef CONFIG_E1000_MQ
+		tx_ring->tx_stats.packets++;
+#endif
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+#ifdef CONFIG_E1000_NAPI
+#define E1000_TX_WEIGHT 64
+		/* weight of a sort for tx, to avoid endless transmit cleanup */
+		if (count++ == E1000_TX_WEIGHT) {
+			retval = FALSE;
+			break;
+		}
+#endif
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (unlikely(cleaned && rtnetif_carrier_ok(netdev) &&
+		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+
+		if (rtnetif_queue_stopped(netdev) &&
+		    !(test_bit(__E1000_DOWN, &adapter->state))) {
+			rtnetif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (adapter->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		adapter->detect_tx_hung = FALSE;
+		if (tx_ring->buffer_info[eop].dma &&
+		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+			       (adapter->tx_timeout_factor * HZ))
+		    && !(E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+			 E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct e1000_tx_ring)),
+				readl(adapter->hw.hw_addr + tx_ring->tdh),
+				readl(adapter->hw.hw_addr + tx_ring->tdt),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[eop].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->upper.fields.status);
+			rtnetif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	adapter->net_stats.tx_bytes += total_tx_bytes;
+	adapter->net_stats.tx_packets += total_tx_packets;
+	return retval;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:        receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+			      u32 csum, struct sk_buff *skb)
+{
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* 82543 or newer only */
+	if (unlikely(adapter->hw.mac.type < e1000_82543)) return;
+	/* Ignore Checksum bit is set */
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	/* TCP/UDP checksum error bit is set */
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* TCP/UDP Checksum has not been calculated */
+	if (adapter->hw.mac.type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
+			return;
+	} else {
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+			return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (likely(status & E1000_RXD_STAT_TCPCS)) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (adapter->hw.mac.type > e1000_82547_rev_2) {
+		/* IP fragment with UDP payload */
+		/* Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		csum = ntohl(csum ^ 0xFFFF);
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
+			      u16 vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_E1000_NAPI
+#ifdef NETIF_F_HW_VLAN_TX
+	if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
+		vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+					 le16_to_cpu(vlan) &
+					 E1000_RXD_SPC_VLAN_MASK);
+	} else {
+		netif_receive_skb(skb);
+	}
+#else
+	netif_receive_skb(skb);
+#endif
+#else /* CONFIG_E1000_NAPI */
+#ifdef NETIF_F_HW_VLAN_TX
+	if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
+		vlan_hwaccel_rx(skb, adapter->vlgrp,
+				le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK);
+	} else {
+		netif_rx(skb);
+	}
+#else
+	rtnetif_rx(skb);
+#endif
+#endif /* CONFIG_E1000_NAPI */
+}
+
+#ifdef CONFIG_E1000_NAPI
+/* NOTE: these new jumbo frame routines rely on NAPI because of the
+ * pskb_may_pull call, which eventually must call kmap_atomic which you cannot
+ * call from hard irq context */
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
+			       u16 length)
+{
+	bi->page = NULL;
+	skb->len += length;
+	skb->data_len += length;
+	skb->truesize += length;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+					  struct e1000_rx_ring *rx_ring,
+					  int *work_done, int work_to_do)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_rx_buffer *buffer_info, *next_buffer;
+	unsigned long irq_flags;
+	u32 length;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = TRUE;
+		cleaned_count++;
+		pci_unmap_page(pdev,
+			       buffer_info->dma,
+			       PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+
+		/* errors is only valid for DD + EOP descriptors */
+		if (unlikely((status & E1000_RXD_STAT_EOP) &&
+		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
+			u8 last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(&adapter->hw, status,
+				      rx_desc->errors, length, last_byte,
+				      adapter->min_frame_size,
+				      adapter->max_frame_size)) {
+				rtdm_lock_get_irqsave(&adapter->stats_lock,
+						  irq_flags);
+				e1000_tbi_adjust_stats_82543(&adapter->hw,
+						      &adapter->stats,
+						      length, skb->data,
+						      adapter->max_frame_size);
+				rtdm_lock_put_irqrestore(&adapter->stats_lock,
+						       irq_flags);
+				length--;
+			} else {
+				/* recycle both page and skb */
+				buffer_info->skb = skb;
+				/* an error means any chain goes out the window
+				 * too */
+				if (rx_ring->rx_skb_top)
+					kfree_rtskb(rx_ring->rx_skb_top);
+				rx_ring->rx_skb_top = NULL;
+				goto next_desc;
+			}
+		}
+
+#define rxtop rx_ring->rx_skb_top
+		if (!(status & E1000_RXD_STAT_EOP)) {
+			/* this descriptor is only the beginning (or middle) */
+			if (!rxtop) {
+				/* this is the beginning of a chain */
+				rxtop = skb;
+				skb_fill_page_desc(rxtop, 0, buffer_info->page,
+						   0, length);
+			} else {
+				/* this is the middle of a chain */
+				skb_fill_page_desc(rxtop,
+				    skb_shinfo(rxtop)->nr_frags,
+				    buffer_info->page, 0, length);
+				/* re-use the skb, only consumed the page */
+				buffer_info->skb = skb;
+			}
+			e1000_consume_page(buffer_info, rxtop, length);
+			goto next_desc;
+		} else {
+			if (rxtop) {
+				/* end of the chain */
+				skb_fill_page_desc(rxtop,
+				    skb_shinfo(rxtop)->nr_frags,
+				    buffer_info->page, 0, length);
+				/* re-use the current skb, we only consumed the
+				 * page */
+				buffer_info->skb = skb;
+				skb = rxtop;
+				rxtop = NULL;
+				e1000_consume_page(buffer_info, skb, length);
+			} else {
+				/* no chain, got EOP, this buf is the packet
+				 * copybreak to save the put_page/alloc_page */
+				if (length <= copybreak &&
+				    skb_tailroom(skb) >= length) {
+					u8 *vaddr;
+					vaddr = kmap_atomic(buffer_info->page,
+							   KM_SKB_DATA_SOFTIRQ);
+					memcpy(skb_tail_pointer(skb), vaddr, length);
+					kunmap_atomic(vaddr,
+						      KM_SKB_DATA_SOFTIRQ);
+					/* re-use the page, so don't erase
+					 * buffer_info->page */
+					rtskb_put(skb, length);
+				} else {
+					skb_fill_page_desc(skb, 0,
+							   buffer_info->page, 0,
+							   length);
+					e1000_consume_page(buffer_info, skb,
+							   length);
+				}
+			}
+		}
+
+		/* Receive Checksum Offload XXX recompute due to CRC strip? */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		pskb_trim(skb, skb->len - 4);
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		/* eth type trans needs skb->data to point to something */
+		if (!pskb_may_pull(skb, ETH_HLEN)) {
+			DPRINTK(DRV, ERR, "__pskb_pull_tail failed.\n");
+			kfree_rtskb(skb);
+			goto next_desc;
+		}
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+
+		e1000_receive_skb(adapter, status, rx_desc->special, skb);
+	adapter->data_received = 1; // Set flag for the main interrupt routine
+
+		netdev->last_rx = jiffies;
+#ifdef CONFIG_E1000_MQ
+		rx_ring->rx_stats.packets++;
+		rx_ring->rx_stats.bytes += length;
+#endif
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+}
+#endif /* NAPI */
+
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+#ifdef CONFIG_E1000_NAPI
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    int *work_done, int work_to_do)
+#else
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    nanosecs_abs_t *time_stamp)
+#endif
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_rx_buffer *buffer_info, *next_buffer;
+	u32 length;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	// rtdm_printk("<2> e1000_clean_rx_irq %i\n", __LINE__);
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+#ifdef CONFIG_E1000_NAPI
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+#endif
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = TRUE;
+		cleaned_count++;
+		pci_unmap_single(pdev,
+				 buffer_info->dma,
+				 adapter->rx_buffer_len,
+				 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+
+		/* !EOP means multiple descriptors were used to store a single
+		 * packet, also make sure the frame isn't just CRC only */
+		if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+			/* All receives must fit into a single buffer */
+			E1000_DBG("%s: Receive packet consumed multiple"
+				  " buffers\n", netdev->name);
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+			u8 last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(&adapter->hw, status,
+				      rx_desc->errors, length, last_byte,
+				      adapter->min_frame_size,
+				      adapter->max_frame_size)) {
+				length--;
+			} else {
+				/* recycle */
+				buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		/* adjust length to remove Ethernet CRC, this must be
+		 * done after the TBI_ACCEPT workaround above */
+		length -= 4;
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		rtskb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+	skb->time_stamp = *time_stamp;
+
+		e1000_receive_skb(adapter, status, rx_desc->special, skb);
+	adapter->data_received = 1; // Set flag for the main interrupt routine
+
+		// netdev->last_rx = jiffies;
+#ifdef CONFIG_E1000_MQ
+		rx_ring->rx_stats.packets++;
+		rx_ring->rx_stats.bytes += length;
+#endif
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+}
+
+/**
+ * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+#ifdef CONFIG_E1000_NAPI
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+				       int *work_done, int work_to_do)
+#else
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+				       nanosecs_abs_t *time_stamp)
+#endif
+{
+#ifdef CONFIG_E1000_DISABLE_PACKET_SPLIT
+    return true;
+
+#else
+
+	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_buffer *buffer_info, *next_buffer;
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+	struct sk_buff *skb;
+	unsigned int i, j;
+	u32 length, staterr;
+	int cleaned_count = 0;
+	bool cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		ps_page = &rx_ring->ps_page[i];
+		ps_page_dma = &rx_ring->ps_page_dma[i];
+#ifdef CONFIG_E1000_NAPI
+		if (unlikely(*work_done >= work_to_do))
+			break;
+		(*work_done)++;
+#endif
+		skb = buffer_info->skb;
+
+		/* in the packet split case this is header only */
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = TRUE;
+		cleaned_count++;
+		pci_unmap_single(pdev, buffer_info->dma,
+				 adapter->rx_ps_bsize0,
+				 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
+			E1000_DBG("%s: Packet Split buffers didn't pick up"
+				  " the full packet\n", netdev->name);
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		length = le16_to_cpu(rx_desc->wb.middle.length0);
+
+		if (unlikely(!length)) {
+			E1000_DBG("%s: Last part of the packet spanning"
+				  " multiple descriptors\n", netdev->name);
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		/* Good Receive */
+		rtskb_put(skb, length);
+#ifdef CONFIG_E1000_MQ
+		rx_ring->rx_stats.packets++;
+		rx_ring->rx_stats.bytes += skb->len;
+#endif
+
+#ifdef CONFIG_E1000_NAPI
+		{
+		/* this looks ugly, but it seems compiler issues make it
+		   more efficient than reusing j */
+		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
+
+		/* page alloc/put takes too long and effects small packet
+		 * throughput, so unsplit small packets and save the alloc/put
+		 * only valid in softirq (napi) context to call kmap_* */
+		if (l1 && (l1 <= copybreak) &&
+		    ((length + l1) <= adapter->rx_ps_bsize0)) {
+			u8 *vaddr;
+			/* there is no documentation about how to call
+			 * kmap_atomic, so we can't hold the mapping
+			 * very long */
+			pci_dma_sync_single_for_cpu(pdev,
+				ps_page_dma->ps_page_dma[0],
+				PAGE_SIZE,
+				PCI_DMA_FROMDEVICE);
+			vaddr = kmap_atomic(ps_page->ps_page[0],
+					    KM_SKB_DATA_SOFTIRQ);
+			memcpy(skb_tail_pointer(skb), vaddr, l1);
+			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+			pci_dma_sync_single_for_device(pdev,
+				ps_page_dma->ps_page_dma[0],
+				PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			/* remove the CRC */
+			l1 -= 4;
+			rtskb_put(skb, l1);
+			goto copydone;
+		} /* if */
+		}
+#endif
+
+		for (j = 0; j < adapter->rx_ps_pages; j++) {
+			if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
+				break;
+			pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
+					PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			ps_page_dma->ps_page_dma[j] = 0;
+			skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
+					   length);
+			ps_page->ps_page[j] = NULL;
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+		}
+
+		/* strip the ethernet crc, problem is we're using pages now so
+		 * this whole operation can get a little cpu intensive */
+		pskb_trim(skb, skb->len - 4);
+
+#ifdef CONFIG_E1000_NAPI
+copydone:
+#endif
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		e1000_rx_checksum(adapter, staterr,
+				  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+
+		if (likely(rx_desc->wb.upper.header_status &
+			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
+			adapter->rx_hdr_split++;
+
+		e1000_receive_skb(adapter, staterr, rx_desc->wb.middle.vlan,
+				  skb);
+		netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
+		buffer_info->skb = NULL;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+
+		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+#endif
+}
+
+#ifdef CONFIG_E1000_NAPI
+/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @rx_ring: pointer to receive ring structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+					 struct e1000_rx_ring *rx_ring,
+					 int cleaned_count)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_rx_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = 256 -
+			     16 /*for skb_reserve */ -
+			     NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			skb_trim(skb, 0);
+			goto check_page;
+		}
+
+		skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct sk_buff *oldskb = skb;
+			DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				kfree_rtskb(skb);
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break; /* while !buffer_info->skb */
+			}
+
+			/* Use new allocation */
+			kfree_rtskb(oldskb);
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+check_page:
+		/* allocate a new page if necessary */
+		if (!buffer_info->page) {
+			buffer_info->page = alloc_page(GFP_ATOMIC);
+			if (unlikely(!buffer_info->page)) {
+				adapter->alloc_rx_buff_failed++;
+				break;
+			}
+		}
+
+		if (!buffer_info->dma)
+			buffer_info->dma = pci_map_page(pdev,
+							buffer_info->page, 0,
+							PAGE_SIZE,
+							PCI_DMA_FROMDEVICE);
+
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
+}
+#endif /* NAPI */
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_rx_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			rtskb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct sk_buff *oldskb = skb;
+			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				kfree_rtskb(skb);
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break; /* while !buffer_info->skb */
+			}
+
+			/* Use new allocation */
+			kfree_rtskb(oldskb);
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+map_skb:
+		buffer_info->dma = pci_map_single(pdev,
+						  skb->data,
+						  adapter->rx_buffer_len,
+						  PCI_DMA_FROMDEVICE);
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter,
+					(void *)(unsigned long)buffer_info->dma,
+					adapter->rx_buffer_len)) {
+			DPRINTK(RX_ERR, ERR,
+				"dma align check failed: %u bytes at %p\n",
+				adapter->rx_buffer_len,
+				(void *)(unsigned long)buffer_info->dma);
+			kfree_rtskb(skb);
+			buffer_info->skb = NULL;
+
+			pci_unmap_single(pdev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
+			buffer_info->dma = 0;
+
+			adapter->alloc_rx_buff_failed++;
+			break; /* while !buffer_info->skb */
+		}
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
+}
+
+/**
+ * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+				      struct e1000_rx_ring *rx_ring,
+				      int cleaned_count)
+{
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_phy_info *phy = &adapter->hw.phy;
+	u16 phy_status;
+	u16 phy_ctrl;
+
+	if ((phy->type != e1000_phy_igp) || !mac->autoneg ||
+	    !(phy->autoneg_advertised & ADVERTISE_1000_FULL))
+		return;
+
+	if (adapter->smartspeed == 0) {
+		/* If Master/Slave config fault is asserted twice,
+		 * we assume back-to-back */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
+			phy_ctrl &= ~CR_1000T_MS_ENABLE;
+			e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+					    phy_ctrl);
+			adapter->smartspeed++;
+			if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+			   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL,
+					       &phy_ctrl)) {
+				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+					     MII_CR_RESTART_AUTO_NEG);
+				e1000_write_phy_reg(&adapter->hw, PHY_CONTROL,
+						    phy_ctrl);
+			}
+		}
+		return;
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+		/* If still no link, perhaps using 2/3 pair cable */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		phy_ctrl |= CR_1000T_MS_ENABLE;
+		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
+		if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_ctrl)) {
+			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+				     MII_CR_RESTART_AUTO_NEG);
+			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_ctrl);
+		}
+	}
+	/* Restart process after E1000_SMARTSPEED_MAX iterations */
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+		adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+#if 0
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+#ifdef SIOCGMIIPHY
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return e1000_mii_ioctl(netdev, ifr, cmd);
+#endif
+#ifdef ETHTOOL_OPS_COMPAT
+	case SIOCETHTOOL:
+		return ethtool_ioctl(ifr);
+#endif
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+#ifdef SIOCGMIIPHY
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	if (adapter->hw.phy.media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = adapter->hw.phy.addr;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		switch (data->reg_num & 0x1F) {
+		case MII_BMCR:
+			data->val_out = adapter->phy_regs.bmcr;
+			break;
+		case MII_BMSR:
+			data->val_out = adapter->phy_regs.bmsr;
+			break;
+		case MII_PHYSID1:
+			data->val_out = (adapter->hw.phy.id >> 16);
+			break;
+		case MII_PHYSID2:
+			data->val_out = (adapter->hw.phy.id & 0xFFFF);
+			break;
+		case MII_ADVERTISE:
+			data->val_out = adapter->phy_regs.advertise;
+			break;
+		case MII_LPA:
+			data->val_out = adapter->phy_regs.lpa;
+			break;
+		case MII_EXPANSION:
+			data->val_out = adapter->phy_regs.expansion;
+			break;
+		case MII_CTRL1000:
+			data->val_out = adapter->phy_regs.ctrl1000;
+			break;
+		case MII_STAT1000:
+			data->val_out = adapter->phy_regs.stat1000;
+			break;
+		case MII_ESTATUS:
+			data->val_out = adapter->phy_regs.estatus;
+			break;
+		default:
+			return -EIO;
+		}
+		break;
+	case SIOCSMIIREG:
+	default:
+		return -EOPNOTSUPP;
+	}
+	return E1000_SUCCESS;
+}
+#endif
+#endif
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+	int ret_val = pci_set_mwi(adapter->pdev);
+
+	if (ret_val)
+		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_clear_mwi(adapter->pdev);
+}
+
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+	u16 cap_offset;
+
+	cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+	if (!cap_offset)
+		return -E1000_ERR_CONFIG;
+
+	pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+	return E1000_SUCCESS;
+}
+
+#ifdef NETIF_F_HW_VLAN_TX
+static void e1000_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl, rctl;
+
+	e1000_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_VME;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		if ((adapter->hw.mac.type != e1000_ich8lan) &&
+		    (adapter->hw.mac.type != e1000_ich9lan)) {
+			/* enable VLAN receive filtering */
+			rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+			rctl |= E1000_RCTL_VFE;
+			rctl &= ~E1000_RCTL_CFIEN;
+			E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+			e1000_update_mng_vlan(adapter);
+		}
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl &= ~E1000_CTRL_VME;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		if ((adapter->hw.mac.type != e1000_ich8lan) &&
+		    (adapter->hw.mac.type != e1000_ich9lan)) {
+			/* disable VLAN filtering */
+			rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+			rctl &= ~E1000_RCTL_VFE;
+			E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+			if (adapter->mng_vlan_id !=
+			    (u16)E1000_MNG_VLAN_NONE) {
+				e1000_vlan_rx_kill_vid(netdev,
+						       adapter->mng_vlan_id);
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
+		}
+	}
+
+	e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 vfta, index;
+	struct net_device *v_netdev;
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+	/* add VID to filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta |= (1 << (vid & 0x1F));
+	e1000_write_vfta(&adapter->hw, index, vfta);
+	/* Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 */
+	v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+	v_netdev->features |= adapter->netdev->features;
+	vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 vfta, index;
+
+	e1000_irq_disable(adapter);
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+	e1000_irq_enable(adapter);
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		e1000_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta &= ~(1 << (vid & 0x1F));
+	e1000_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_N_VID; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			e1000_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+#endif
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+
+	mac->autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		mac->forced_speed_duplex = ADVERTISE_10_HALF;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		mac->forced_speed_duplex = ADVERTISE_10_FULL;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		mac->forced_speed_duplex = ADVERTISE_100_HALF;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		mac->forced_speed_duplex = ADVERTISE_100_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		mac->autoneg = 1;
+		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#ifdef USE_REBOOT_NOTIFIER
+/* only want to do this for 2.4 kernels? */
+static int e1000_notify_reboot(struct notifier_block *nb,
+			       unsigned long event, void *p)
+{
+	struct pci_dev *pdev = NULL;
+
+	switch (event) {
+	case SYS_DOWN:
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+			if (pci_dev_driver(pdev) == &e1000_driver)
+				e1000_suspend(pdev, PMSG_SUSPEND);
+		}
+	}
+	return NOTIFY_DONE;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	if ((err = pci_enable_device(pdev))) {
+		printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (rtnetif_running(netdev) && (err = e1000_request_irq(adapter)))
+		return err;
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		e1000_power_up_phy(&adapter->hw);
+		e1000_setup_link(&adapter->hw);
+	}
+	e1000_reset(adapter);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+
+	e1000_init_manageability(adapter);
+
+	if (rtnetif_running(netdev))
+		e1000_up(adapter);
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 or ICHx and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (((adapter->hw.mac.type != e1000_82573) &&
+	     (adapter->hw.mac.type != e1000_ich8lan) &&
+	     (adapter->hw.mac.type != e1000_ich9lan)) ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	disable_irq(adapter->pdev->irq);
+	e1000_intr(adapter->pdev->irq, netdev);
+
+	for (i = 0; i < adapter->num_tx_queues ; i++ )
+		e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+#ifndef CONFIG_E1000_NAPI
+	for (i = 0; i < adapter->num_rx_queues ; i++ )
+		adapter->clean_rx(adapter, &adapter->rx_ring[i], NULL);
+#endif
+	enable_irq(adapter->pdev->irq);
+}
+#endif
+
+#ifdef HAVE_PCI_ERS
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	netif_device_detach(netdev);
+
+	if (rtnetif_running(netdev))
+		e1000_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	e1000_reset(adapter);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	e1000_init_manageability(adapter);
+
+	if (rtnetif_running(netdev)) {
+		if (e1000_up(adapter)) {
+			printk("e1000: can't bring device back up after reset\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 or ICHx and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (((adapter->hw.mac.type != e1000_82573) &&
+	     (adapter->hw.mac.type != e1000_ich8lan) &&
+	     (adapter->hw.mac.type != e1000_ich9lan)) ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+}
+#endif /* HAVE_PCI_ERS */
+
+s32 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size)
+{
+	hw->dev_spec = kmalloc(size, GFP_KERNEL);
+
+	if (!hw->dev_spec)
+		return -ENOMEM;
+
+	memset(hw->dev_spec, 0, size);
+
+	return E1000_SUCCESS;
+}
+
+void e1000_free_dev_spec_struct(struct e1000_hw *hw)
+{
+	if (!hw->dev_spec)
+		return;
+
+	kfree(hw->dev_spec);
+}
+
+/* vim: set ts=4: */
+/* e1000_main.c */
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h	2022-03-21 12:58:29.735885587 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32  e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32  e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
+s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                              u16 words, u16 *data);
+s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32  e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset,
+                          u16 words, u16 *data);
+s32  e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                               u16 words, u16 *data);
+s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_stop_nvm(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/* Function pointers */
+s32  e1000_acquire_nvm(struct e1000_hw *hw);
+void e1000_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE  0xDB00
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c	2022-03-21 12:58:29.729885646 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_mac.h"
+
+/**
+ *  e1000_remove_device_generic - Free device specific structure
+ *  @hw: pointer to the HW structure
+ *
+ *  If a device specific structure was allocated, this function will
+ *  free it.
+ **/
+void e1000_remove_device_generic(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_remove_device_generic");
+
+	/* Freeing the dev_spec member of e1000_hw structure */
+	e1000_free_dev_spec_struct(hw);
+}
+
+/**
+ *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
+ **/
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	u32 status = E1000_READ_REG(hw, E1000_STATUS);
+	s32 ret_val = E1000_SUCCESS;
+	u16 pci_header_type;
+
+	DEBUGFUNC("e1000_get_bus_info_pci_generic");
+
+	/* PCI or PCI-X? */
+	bus->type = (status & E1000_STATUS_PCIX_MODE)
+			? e1000_bus_type_pcix
+			: e1000_bus_type_pci;
+
+	/* Bus speed */
+	if (bus->type == e1000_bus_type_pci) {
+		bus->speed = (status & E1000_STATUS_PCI66)
+		             ? e1000_bus_speed_66
+		             : e1000_bus_speed_33;
+	} else {
+		switch (status & E1000_STATUS_PCIX_SPEED) {
+		case E1000_STATUS_PCIX_SPEED_66:
+			bus->speed = e1000_bus_speed_66;
+			break;
+		case E1000_STATUS_PCIX_SPEED_100:
+			bus->speed = e1000_bus_speed_100;
+			break;
+		case E1000_STATUS_PCIX_SPEED_133:
+			bus->speed = e1000_bus_speed_133;
+			break;
+		default:
+			bus->speed = e1000_bus_speed_reserved;
+			break;
+		}
+	}
+
+	/* Bus width */
+	bus->width = (status & E1000_STATUS_BUS64)
+	             ? e1000_bus_width_64
+	             : e1000_bus_width_32;
+
+	/* Which PCI(-X) function? */
+	e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC)
+		bus->func = (status & E1000_STATUS_FUNC_MASK)
+		            >> E1000_STATUS_FUNC_SHIFT;
+	else
+		bus->func = 0;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+	u32 status;
+	u16 pcie_link_status, pci_header_type;
+
+	DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+	bus->type = e1000_bus_type_pci_express;
+	bus->speed = e1000_bus_speed_2500;
+
+	ret_val = e1000_read_pcie_cap_reg(hw,
+	                                  PCIE_LINK_STATUS,
+	                                  &pcie_link_status);
+	if (ret_val)
+		bus->width = e1000_bus_width_unknown;
+	else
+		bus->width = (e1000_bus_width)((pcie_link_status &
+		                                PCIE_LINK_WIDTH_MASK) >>
+		                               PCIE_LINK_WIDTH_SHIFT);
+
+	e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		bus->func = (status & E1000_STATUS_FUNC_MASK)
+		            >> E1000_STATUS_FUNC_SHIFT;
+	} else {
+		bus->func = 0;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	DEBUGFUNC("e1000_clear_vfta_generic");
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	DEBUGFUNC("e1000_write_vfta_generic");
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_init_rx_addrs_generic - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setups the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+
+	DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+	/* Setup the receive address */
+	DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+	e1000_rar_set_generic(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+	for (i = 1; i < rar_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address.  If an
+ *  alternate MAC address is found it is saved in the hw struct and
+ *  programmed into RAR0 and the function returns success, otherwise the
+ *  function returns an error.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+	u32 i;
+	s32 ret_val = E1000_SUCCESS;
+	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+	u8 alt_mac_addr[ETH_ADDR_LEN];
+
+	DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+	ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+	                         &nvm_alt_mac_addr_offset);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (nvm_alt_mac_addr_offset == 0xFFFF) {
+		ret_val = -(E1000_NOT_IMPLEMENTED);
+		goto out;
+	}
+
+	if (hw->bus.func == E1000_FUNC_1)
+		nvm_alt_mac_addr_offset += ETH_ADDR_LEN/sizeof(u16);
+
+	for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+		offset = nvm_alt_mac_addr_offset + (i >> 1);
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			goto out;
+		}
+
+		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+	}
+
+	/* if multicast bit is set, the alternate address will not be used */
+	if (alt_mac_addr[0] & 0x01) {
+		ret_val = -(E1000_NOT_IMPLEMENTED);
+		goto out;
+	}
+
+	for (i = 0; i < ETH_ADDR_LEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
+
+	e1000_rar_set(hw, hw->mac.perm_addr, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_rar_set_generic - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	DEBUGFUNC("e1000_rar_set_generic");
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+	           ((u32) addr[1] << 8) |
+	           ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high) {
+		if (!hw->mac.disable_av)
+			rar_high |= E1000_RAH_AV;
+	}
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
+	E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+}
+
+/**
+ *  e1000_mta_set_generic - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta;
+
+	DEBUGFUNC("e1000_mta_set_generic");
+	/*
+	 * The MTA is a register array of 32-bit registers. It is
+	 * treated like an array of (32*mta_reg_count) bits.  We want to
+	 * set bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
+	 * mask to bits 31:5 of the hash value which gives us the
+	 * register we're modifying.  The hash bit within that register
+	 * is determined by the lower 5 bits of the hash value.
+	 */
+	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+	hash_bit = hash_value & 0x1F;
+
+	mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_update_mc_addr_list_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count,
+                                       u32 rar_used_count, u32 rar_count)
+{
+	u32 hash_value;
+	u32 i;
+
+	DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+	/*
+	 * Load the first set of multicast addresses into the exact
+	 * filters (RAR).  If there are not enough to fill the RAR
+	 * array, clear the filters.
+	 */
+	for (i = rar_used_count; i < rar_count; i++) {
+		if (mc_addr_count) {
+			e1000_rar_set(hw, mc_addr_list, i);
+			mc_addr_count--;
+			mc_addr_list += ETH_ADDR_LEN;
+		} else {
+			E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
+			E1000_WRITE_FLUSH(hw);
+			E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
+			E1000_WRITE_FLUSH(hw);
+		}
+	}
+
+	/* Clear the old settings from the MTA */
+	DEBUGOUT("Clearing MTA\n");
+	for (i = 0; i < hw->mac.mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Load any remaining multicast addresses into the hash table. */
+	for (; mc_addr_count > 0; mc_addr_count--) {
+		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+		DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
+		e1000_mta_set(hw, hash_value);
+		mc_addr_list += ETH_ADDR_LEN;
+	}
+}
+
+/**
+ *  e1000_hash_mc_addr_generic - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  e1000_mta_set_generic()
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+	DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/*
+	 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/*
+	 * The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 *
+	 * For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB                 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+		default:
+		case 0:
+			break;
+		case 1:
+			bit_shift += 1;
+			break;
+		case 2:
+			bit_shift += 2;
+			break;
+		case 3:
+			bit_shift += 4;
+			break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+	                          (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ *  @hw: pointer to the HW structure
+ *
+ *  In certain situations, a system BIOS may report that the PCIx maximum
+ *  memory read byte count (MMRBC) value is higher than than the actual
+ *  value. We check the PCIx command regsiter with the current PCIx status
+ *  regsiter.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+	u16 cmd_mmrbc;
+	u16 pcix_cmd;
+	u16 pcix_stat_hi_word;
+	u16 stat_mmrbc;
+
+	DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+	/* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+	if (hw->bus.type != e1000_bus_type_pcix)
+		return;
+
+	e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+	e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+	cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+	             PCIX_COMMAND_MMRBC_SHIFT;
+	stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+	              PCIX_STATUS_HI_MMRBC_SHIFT;
+	if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+		stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+	if (cmd_mmrbc > stat_mmrbc) {
+		pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+		pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+		e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+	}
+}
+
+/**
+ *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+	temp = E1000_READ_REG(hw, E1000_CRCERRS);
+	temp = E1000_READ_REG(hw, E1000_SYMERRS);
+	temp = E1000_READ_REG(hw, E1000_MPC);
+	temp = E1000_READ_REG(hw, E1000_SCC);
+	temp = E1000_READ_REG(hw, E1000_ECOL);
+	temp = E1000_READ_REG(hw, E1000_MCC);
+	temp = E1000_READ_REG(hw, E1000_LATECOL);
+	temp = E1000_READ_REG(hw, E1000_COLC);
+	temp = E1000_READ_REG(hw, E1000_DC);
+	temp = E1000_READ_REG(hw, E1000_SEC);
+	temp = E1000_READ_REG(hw, E1000_RLEC);
+	temp = E1000_READ_REG(hw, E1000_XONRXC);
+	temp = E1000_READ_REG(hw, E1000_XONTXC);
+	temp = E1000_READ_REG(hw, E1000_XOFFRXC);
+	temp = E1000_READ_REG(hw, E1000_XOFFTXC);
+	temp = E1000_READ_REG(hw, E1000_FCRUC);
+	temp = E1000_READ_REG(hw, E1000_GPRC);
+	temp = E1000_READ_REG(hw, E1000_BPRC);
+	temp = E1000_READ_REG(hw, E1000_MPRC);
+	temp = E1000_READ_REG(hw, E1000_GPTC);
+	temp = E1000_READ_REG(hw, E1000_GORCL);
+	temp = E1000_READ_REG(hw, E1000_GORCH);
+	temp = E1000_READ_REG(hw, E1000_GOTCL);
+	temp = E1000_READ_REG(hw, E1000_GOTCH);
+	temp = E1000_READ_REG(hw, E1000_RNBC);
+	temp = E1000_READ_REG(hw, E1000_RUC);
+	temp = E1000_READ_REG(hw, E1000_RFC);
+	temp = E1000_READ_REG(hw, E1000_ROC);
+	temp = E1000_READ_REG(hw, E1000_RJC);
+	temp = E1000_READ_REG(hw, E1000_TORL);
+	temp = E1000_READ_REG(hw, E1000_TORH);
+	temp = E1000_READ_REG(hw, E1000_TOTL);
+	temp = E1000_READ_REG(hw, E1000_TOTH);
+	temp = E1000_READ_REG(hw, E1000_TPR);
+	temp = E1000_READ_REG(hw, E1000_TPT);
+	temp = E1000_READ_REG(hw, E1000_MPTC);
+	temp = E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ *  e1000_check_for_copper_link_generic - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_check_for_copper_link");
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = FALSE;
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000_check_downshift_generic(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000_config_collision_dist_generic(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error configuring flow control\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), the cable is plugged in (we have signal),
+	 * and our link partner is not trying to auto-negotiate with us (we
+	 * are receiving idles or data), we need to force link up. We also
+	 * need to give auto-negotiation time to complete, in case the cable
+	 * was just plugged in. The autoneg_failed flag does this.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+	    (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			goto out;
+		}
+		DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring flow control\n");
+			goto out;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = TRUE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), and our link partner is not trying to
+	 * auto-negotiate with us (we are receiving idles or data),
+	 * we need to force link up. We also need to give auto-negotiation
+	 * time to complete.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			goto out;
+		}
+		DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring flow control\n");
+			goto out;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = TRUE;
+	} else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+		/*
+		 * If we force link for non-auto-negotiation switch, check
+		 * link status based on MAC synchronization for internal
+		 * serdes media type.
+		 */
+		/* SYNCH bit and IV bit are sticky. */
+		usec_delay(10);
+		if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, E1000_RXCW)) {
+			if (!(rxcw & E1000_RXCW_IV)) {
+				mac->serdes_has_link = TRUE;
+				DEBUGOUT("SERDES: Link is up.\n");
+			}
+		} else {
+			mac->serdes_has_link = FALSE;
+			DEBUGOUT("SERDES: Link is down.\n");
+		}
+	}
+
+	if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		mac->serdes_has_link = (status & E1000_STATUS_LU)
+					? TRUE
+					: FALSE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_generic - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_link_generic");
+
+	/*
+	 * In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	/*
+	 * If flow control is set to default, set flow control based on
+	 * the EEPROM flow control settings.
+	 */
+	if (hw->fc.type == e1000_fc_default) {
+		ret_val = e1000_set_default_fc_generic(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * We want to save off the original Flow Control configuration just
+	 * in case we get disconnected and then reconnected into a different
+	 * hub or switch with different Flow Control capabilities.
+	 */
+	hw->fc.original_type = hw->fc.type;
+
+	DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type);
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+	E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+	E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes
+ *  links.  Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Take the link out of reset */
+	ctrl &= ~E1000_CTRL_LRST;
+
+	e1000_config_collision_dist_generic(hw);
+
+	ret_val = e1000_commit_fc_settings_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Since auto-negotiation is enabled, take the link out of reset (the
+	 * link will be in reset, because we previously reset the chip). This
+	 * will restart auto-negotiation.  If auto-negotiation is successful
+	 * then the link-up status bit will be set and the flow control enable
+	 * bits (RFCE and TFCE) will be set according to their negotiated value.
+	 */
+	DEBUGOUT("Auto-negotiation enabled\n");
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+	msec_delay(1);
+
+	/*
+	 * For these adapters, the SW defineable pin 1 is set when the optics
+	 * detect a signal.  If we have a signal, then poll for a "Link-Up"
+	 * indication.
+	 */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+	    (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+	} else {
+		DEBUGOUT("No signal detected\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_generic - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+	DEBUGFUNC("e1000_config_collision_dist_generic");
+
+	tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_poll_fiber_serdes_link_generic - Poll for link up
+ *  @hw: pointer to the HW structure
+ *
+ *  Polls for link up by reading the status register, if link fails to come
+ *  up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 i, status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+	/*
+	 * If we have a signal (the cable is plugged in, or assumed true for
+	 * serdes media) then poll for a "Link-Up" indication in the Device
+	 * Status Register.  Time-out if a link isn't seen in 500 milliseconds
+	 * seconds (Auto-negotiation should complete in less than 500
+	 * milliseconds even if the other end is doing it in SW).
+	 */
+	for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+		msec_delay(10);
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		if (status & E1000_STATUS_LU)
+			break;
+	}
+	if (i == FIBER_LINK_UP_LIMIT) {
+		DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+		mac->autoneg_failed = 1;
+		/*
+		 * AutoNeg failed to achieve a link, so we'll call
+		 * mac->check_for_link. This routine will force the
+		 * link up if we detect a signal. This will allow us to
+		 * communicate with non-autonegotiating link partners.
+		 */
+		ret_val = e1000_check_for_link(hw);
+		if (ret_val) {
+			DEBUGOUT("Error while checking for link\n");
+			goto out;
+		}
+		mac->autoneg_failed = 0;
+	} else {
+		mac->autoneg_failed = 0;
+		DEBUGOUT("Valid Link Found\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_commit_fc_settings_generic - Configure flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Write the flow control settings to the Transmit Config Word Register (TXCW)
+ *  base on the flow control settings in e1000_mac_info.
+ **/
+s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 txcw;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the device accordingly.  If auto-negotiation is enabled, then
+	 * software will have to set the "PAUSE" bits to the correct value in
+	 * the Transmit Config Word Register (TXCW) and re-start auto-
+	 * negotiation.  However, if auto-negotiation is disabled, then
+	 * software will have to manually configure the two flow control enable
+	 * bits in the CTRL register.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames,
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames but we
+	 *          do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 */
+	switch (hw->fc.type) {
+	case e1000_fc_none:
+		/* Flow control completely disabled by a software over-ride. */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is disabled
+		 * by a software over-ride. Since there really isn't a way to
+		 * advertise that we are capable of Rx Pause ONLY, we will
+		 * advertise that we support both symmetric and asymmetric RX
+		 * PAUSE.  Later, we will disable the adapter's ability to send
+		 * PAUSE frames.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is disabled,
+		 * by a software over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+	mac->txcw = txcw;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  tansmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u32 fcrtl = 0, fcrth = 0;
+
+	DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+	/*
+	 * Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (hw->fc.type & e1000_fc_tx_pause) {
+		/*
+		 * We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = hw->fc.low_water;
+		if (hw->fc.send_xon)
+			fcrtl |= E1000_FCRTL_XONE;
+
+		fcrth = hw->fc.high_water;
+	}
+	E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+	E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_set_default_fc_generic");
+
+	/*
+	 * Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		hw->fc.type = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		hw->fc.type = e1000_fc_tx_pause;
+	else
+		hw->fc.type = e1000_fc_full;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_force_mac_fc_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/*
+	 * Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "hw->fc.type" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+	DEBUGOUT1("hw->fc.type = %u\n", hw->fc.type);
+
+	switch (hw->fc.type) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_fc_after_link_up_generic - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+	DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+	/*
+	 * Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes)
+			ret_val = e1000_force_mac_fc_generic(hw);
+	} else {
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ret_val = e1000_force_mac_fc_generic(hw);
+	}
+
+	if (ret_val) {
+		DEBUGOUT("Error forcing flow control settings\n");
+		goto out;
+	}
+
+	/*
+	 * Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/*
+		 * Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+			DEBUGOUT("Copper PHY and Auto Neg "
+			         "has not completed.\n");
+			goto out;
+		}
+
+		/*
+		 * The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+		                             &mii_nway_adv_reg);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+		                             &mii_nway_lp_ability_reg);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/*
+			 * Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF  the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.original_type == e1000_fc_full) {
+				hw->fc.type = e1000_fc_full;
+				DEBUGOUT("Flow Control = FULL.\r\n");
+			} else {
+				hw->fc.type = e1000_fc_rx_pause;
+				DEBUGOUT("Flow Control = "
+				         "RX PAUSE frames only.\r\n");
+			}
+		}
+		/*
+		 * For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		          (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+		          (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+		          (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.type = e1000_fc_tx_pause;
+			DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+		}
+		/*
+		 * For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+		         !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+		         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.type = e1000_fc_rx_pause;
+			DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+		} else {
+			/*
+			 * Per the IEEE spec, at this point flow control
+			 * should be disabled.
+			 */
+			hw->fc.type = e1000_fc_none;
+			DEBUGOUT("Flow Control = NONE.\r\n");
+		}
+
+		/*
+		 * Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			DEBUGOUT("Error getting link speed and duplex\n");
+			goto out;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			hw->fc.type = e1000_fc_none;
+
+		/*
+		 * Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = e1000_force_mac_fc_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error forcing flow control settings\n");
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_copper_generic - Retreive current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+	u32 status;
+
+	DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	if (status & E1000_STATUS_SPEED_1000) {
+		*speed = SPEED_1000;
+		DEBUGOUT("1000 Mbs, ");
+	} else if (status & E1000_STATUS_SPEED_100) {
+		*speed = SPEED_100;
+		DEBUGOUT("100 Mbs, ");
+	} else {
+		*speed = SPEED_10;
+		DEBUGOUT("10 Mbs, ");
+	}
+
+	if (status & E1000_STATUS_FD) {
+		*duplex = FULL_DUPLEX;
+		DEBUGOUT("Full Duplex\n");
+	} else {
+		*duplex = HALF_DUPLEX;
+		DEBUGOUT("Half Duplex\n");
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_fiber_generic - Retreive current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Sets the speed and duplex to gigabit full duplex (the only possible option)
+ *  for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                    u16 *speed, u16 *duplex)
+{
+	DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+
+	*speed = SPEED_1000;
+	*duplex = FULL_DUPLEX;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = E1000_SUCCESS;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		usec_delay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		usec_delay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_generic(hw);
+		DEBUGOUT("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_generic - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+	swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_get_auto_rd_done_generic - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+	s32 i = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+			break;
+		msec_delay(1);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_generic - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_valid_led_default_generic");
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_id_led_init_generic -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw * hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+	DEBUGFUNC("e1000_id_led_init_generic");
+
+	ret_val = hw->func.valid_led_default(hw, &data);
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_led_generic");
+
+	if (hw->func.setup_led != e1000_setup_led_generic) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		hw->mac.ledctl_default = ledctl;
+		/* Turn off LED0 */
+		ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+		            E1000_LEDCTL_LED0_BLINK |
+		            E1000_LEDCTL_LED0_MODE_MASK);
+		ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+		           E1000_LEDCTL_LED0_MODE_SHIFT);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	} else if (hw->phy.media_type == e1000_media_type_copper) {
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_cleanup_led_generic");
+
+	if (hw->func.cleanup_led != e1000_cleanup_led_generic) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the led's which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+	DEBUGFUNC("e1000_blink_led_generic");
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/*
+		 * set the blink bit for each LED that's "on" (0x0E)
+		 * in ledctl_mode2
+		 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+				                 (i * 8));
+	}
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_led_on_generic");
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+		break;
+	default:
+		break;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_led_off_generic");
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ *  @hw: pointer to the HW structure
+ *  @no_snoop: bitmap of snoop events
+ *
+ *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+	u32 gcr;
+
+	DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	if (no_snoop) {
+		gcr = E1000_READ_REG(hw, E1000_GCR);
+		gcr &= ~(PCIE_NO_SNOOP_ALL);
+		gcr |= no_snoop;
+		E1000_WRITE_REG(hw, E1000_GCR, gcr);
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_disable_pcie_master_generic - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 (E1000_SUCCESS) if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	while (timeout) {
+		if (!(E1000_READ_REG(hw, E1000_STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		usec_delay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		DEBUGOUT("Master requests are pending.\n");
+		ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	DEBUGFUNC("e1000_reset_adaptive_generic");
+
+	if (!mac->adaptive_ifs) {
+		DEBUGOUT("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	if (!mac->ifs_params_forced) {
+		mac->current_ifs_val = 0;
+		mac->ifs_min_val = IFS_MIN;
+		mac->ifs_max_val = IFS_MAX;
+		mac->ifs_step_size = IFS_STEP;
+		mac->ifs_ratio = IFS_RATIO;
+	}
+
+	mac->in_ifs_mode = FALSE;
+	E1000_WRITE_REG(hw, E1000_AIT, 0);
+out:
+	return;
+}
+
+/**
+ *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	DEBUGFUNC("e1000_update_adaptive_generic");
+
+	if (!mac->adaptive_ifs) {
+		DEBUGOUT("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+			mac->in_ifs_mode = TRUE;
+			if (mac->current_ifs_val < mac->ifs_max_val) {
+				if (!mac->current_ifs_val)
+					mac->current_ifs_val = mac->ifs_min_val;
+				else
+					mac->current_ifs_val +=
+						mac->ifs_step_size;
+				E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
+			}
+		}
+	} else {
+		if (mac->in_ifs_mode &&
+		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+			mac->current_ifs_val = 0;
+			mac->in_ifs_mode = FALSE;
+			E1000_WRITE_REG(hw, E1000_AIT, 0);
+		}
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+		DEBUGOUT("Invalid MDI setting detected\n");
+		hw->phy.mdix = 1;
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                      u32 offset, u8 data)
+{
+	u32 i, regvalue = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+	/* Set up the address and data */
+	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+	E1000_WRITE_REG(hw, reg, regvalue);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+		usec_delay(5);
+		regvalue = E1000_READ_REG(hw, reg);
+		if (regvalue & E1000_GEN_CTL_READY)
+			break;
+	}
+	if (!(regvalue & E1000_GEN_CTL_READY)) {
+		DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h	2022-03-21 12:58:29.723885704 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS-dependent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "kcompat.h"
+
+#define usec_delay(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x)	do { if(in_interrupt()) { \
+				/* Don't sleep in interrupt context! */ \
+				BUG(); \
+			} else { \
+				msleep(x); \
+			} } while (0)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context.  Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+#endif
+
+#define PCI_COMMAND_REGISTER   PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN           ETH_ALEN
+
+#ifdef __BIG_ENDIAN
+#define E1000_BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#define E1000_REGISTER(a, reg) (((a)->mac.type >= e1000_82543) \
+			       ? reg                           \
+			       : e1000_translate_register_82542(reg))
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
+
+#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
+
+#define E1000_WRITE_REG_IO(a, reg, offset) do { \
+    outl(reg, ((a)->io_base));                  \
+    outl(offset, ((a)->io_base + 4));      } while(0)
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
+
+#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000.h	2022-03-21 12:58:29.718885753 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include "kcompat.h"
+
+#include "e1000_api.h"
+
+#define BAR_0		0
+#define BAR_1		1
+#define BAR_5		5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#define E1000_DBG(args...)
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args))
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD                  256
+#define E1000_MAX_TXD                      256
+#define E1000_MIN_TXD                       80
+#define E1000_MAX_82544_TXD               4096
+
+#define E1000_DEFAULT_RXD                  256
+#define E1000_MAX_RXD                      256
+
+#define E1000_MIN_RXD                       80
+#define E1000_MAX_82544_RXD               4096
+
+#define E1000_MIN_ITR_USECS                 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS              10000 /* 100    irq/sec */
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_MAX_TX_QUEUES                  4
+#endif
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128   128    /* Used for packet split */
+#define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
+#define E1000_RXBUFFER_2048  2048
+#define E1000_RXBUFFER_4096  4096
+#define E1000_RXBUFFER_8192  8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX       15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Early Receive defines */
+#define E1000_ERT_2048 0x100
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define E1000_EEPROM_82544_APM    0x0004
+#define E1000_EEPROM_APME         0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#define E1000_MNG_VLAN_NONE -1
+#endif
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+	struct rtskb *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+};
+
+struct e1000_rx_buffer {
+	struct rtskb *skb;
+	dma_addr_t dma;
+	struct page *page;
+};
+
+#ifdef CONFIG_E1000_MQ
+struct e1000_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+#endif
+
+struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
+
+struct e1000_tx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+#ifdef CONFIG_E1000_MQ
+	/* for tx ring cleanup - needed for multiqueue */
+	spinlock_t tx_queue_lock;
+#endif
+	rtdm_lock_t tx_lock;
+	u16 tdh;
+	u16 tdt;
+#ifdef CONFIG_E1000_MQ
+	struct e1000_queue_stats tx_stats;
+#endif
+	bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+	struct e1000_adapter *adapter; /* back link */
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+#ifdef CONFIG_E1000_NAPI
+	struct napi_struct napi;
+#endif
+	/* array of buffer information structs */
+	struct e1000_rx_buffer *buffer_info;
+	/* arrays of page information for packet split */
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+	struct sk_buff *rx_skb_top;
+
+	/* cpu for rx queue */
+	int cpu;
+
+	u16 rdh;
+	u16 rdt;
+#ifdef CONFIG_E1000_MQ
+	struct e1000_queue_stats rx_stats;
+#endif
+};
+
+#define E1000_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_PS(R, i)	    \
+	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i)	    \
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+#ifdef SIOCGMIIPHY
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+	u16 bmcr;		/* basic mode control register    */
+	u16 bmsr;		/* basic mode status register     */
+	u16 advertise;		/* auto-negotiation advertisement */
+	u16 lpa;		/* link partner ability register  */
+	u16 expansion;		/* auto-negotiation expansion reg */
+	u16 ctrl1000;		/* 1000BASE-T control register    */
+	u16 stat1000;		/* 1000BASE-T status register     */
+	u16 estatus;		/* extended status register       */
+};
+#endif
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+	u16 mng_vlan_id;
+#endif
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 wol;
+	u32 smartspeed;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+	rtdm_lock_t  stats_lock;
+#ifdef CONFIG_E1000_NAPI
+	spinlock_t tx_queue_lock;
+#endif
+	atomic_t irq_sem;
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	bool fc_autoneg;
+
+#ifdef ETHTOOL_PHYS_ID
+	struct timer_list blink_timer;
+	unsigned long led_status;
+#endif
+
+	/* TX */
+	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_MQ
+	struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
+#endif
+	unsigned int restart_queue;
+	unsigned long tx_queue_len;
+	u32 txd_cmd;
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+	u32 gotc;
+	u64 gotc_old;
+	u64 tpt_old;
+	u64 colc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u8 tx_timeout_factor;
+	atomic_t tx_fifo_stall;
+	bool pcix_82544;
+	bool detect_tx_hung;
+
+	/* RX */
+#ifdef CONFIG_E1000_NAPI
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+#else
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       nanosecs_abs_t *time_stamp);
+#endif
+	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+			      struct e1000_rx_ring *rx_ring,
+				int cleaned_count);
+	struct e1000_rx_ring *rx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_NAPI
+	//struct napi_struct napi;
+#endif
+	int num_tx_queues;
+	int num_rx_queues;
+
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 alloc_rx_buff_failed;
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+	bool rx_csum;
+	unsigned int rx_ps_pages;
+	u32 gorc;
+	u64 gorc_old;
+	u16 rx_ps_bsize0;
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+
+	/* OS defined structs */
+	struct rtnet_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	rtdm_irq_t irq_handle;
+	char  data_received;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+#ifdef SIOCGMIIPHY
+	/* Snapshot of PHY registers */
+	struct e1000_phy_regs phy_regs;
+#endif
+
+#ifdef ETHTOOL_TEST
+	u32 test_icr;
+	struct e1000_tx_ring test_tx_ring;
+	struct e1000_rx_ring test_rx_ring;
+#endif
+
+
+	int msg_enable;
+	/* to not mess up cache alignment, always add to the bottom */
+	unsigned long state;
+	u32 eeprom_wol;
+
+	u32 *config_space;
+
+	/* hardware capability, feature, and workaround flags */
+	unsigned int flags;
+
+	struct work_struct reset_task;
+	struct delayed_work watchdog_task;
+	struct delayed_work fifo_stall_task;
+	struct delayed_work phy_info_task;
+};
+
+#define E1000_FLAG_HAS_SMBUS                (1 << 0)
+#define E1000_FLAG_HAS_MANC2H               (1 << 1)
+#define E1000_FLAG_HAS_MSI                  (1 << 2)
+#define E1000_FLAG_MSI_ENABLED              (1 << 3)
+#define E1000_FLAG_HAS_INTR_MODERATION      (1 << 4)
+#define E1000_FLAG_RX_NEEDS_RESTART         (1 << 5)
+#define E1000_FLAG_BAD_TX_CARRIER_STATS_FD  (1 << 6)
+#define E1000_FLAG_INT_ASSERT_AUTO_MASK     (1 << 7)
+#define E1000_FLAG_QUAD_PORT_A              (1 << 8)
+#define E1000_FLAG_SMART_POWER_DOWN         (1 << 9)
+#ifdef NETIF_F_TSO
+#define E1000_FLAG_HAS_TSO                  (1 << 10)
+#ifdef NETIF_F_TSO6
+#define E1000_FLAG_HAS_TSO6                 (1 << 11)
+#endif
+#define E1000_FLAG_TSO_FORCE                (1 << 12)
+#endif
+#define E1000_FLAG_RX_RESTART_NOW           (1 << 13)
+
+enum e1000_state_t {
+	__E1000_TESTING,
+	__E1000_RESETTING,
+	__E1000_DOWN
+};
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern void e1000_power_up_phy(struct e1000_hw *hw);
+
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#endif /* _E1000_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h	2022-03-21 12:58:29.712885812 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_80003ES2LAN_H_
+#define _E1000_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL       0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL        0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL         0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE  0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS    0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS    0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING   0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT   0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE          0x2000
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN        0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN       0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN     0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Reversal Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG          0x2000
+                                               /* 1=Reverse Auto-Negotiation */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK                0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5          0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25          0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5        0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25         0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX           0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH               0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER         0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY                  0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE     0x0001
+                                          /* 1=Enable SERDES Electrical Idle */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                 0x0010 /* Disable Padding */
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h	2022-03-21 12:58:29.707885860 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/mpc8xx_fec.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82543_H_
+#define _E1000_82543_H_
+
+#define PHY_PREAMBLE      0xFFFFFFFF
+#define PHY_PREAMBLE_SIZE 32
+#define PHY_SOF           0x1
+#define PHY_OP_READ       0x2
+#define PHY_OP_WRITE      0x1
+#define PHY_TURNAROUND    0x2
+
+#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */
+/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
+#define TBI_SBP_ENABLED    0x2 
+                                
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/mpc8xx_fec.c	2022-03-21 12:58:29.701885919 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/rt_smc91111.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * BK Id: SCCS/s.fec.c 1.30 09/11/02 14:55:08 paulus
+ */
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * This version of the driver is specific to the FADS implementation,
+ * since the board contains control registers external to the processor
+ * for the control of the LevelOne LXT970 transceiver.  The MPC860T manual
+ * describes connections using the internal parallel port I/O, which
+ * is basically all of Port D.
+ *
+ * Includes support for the following PHYs: QS6612, LXT970, LXT971/2.
+ *
+ * Right now, I am very wasteful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Make use of MII for PHY control configurable.
+ * Some fixes.
+ * Copyright (c) 2000-2002 Wolfgang Denk, DENX Software Engineering.
+ *
+ * Fixes for tx_full condition and relink when using MII.
+ * Support for AMD AM79C874 added.
+ * Thomas Lange, thomas@corelatus.com
+ *
+ * Added code for Multicast support, Frederic Goddeeris, Paul Geerinckx
+ * Copyright (c) 2002 Siemens Atea
+ *
+ * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/8xx_io/fec.c".
+ * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/uaccess.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/commproc.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#error "MDIO for PHY configuration is not yet supported!"
+#endif
+
+#include <rtnet_port.h>
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for the MPC8xx FEC Ethernet");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+#define RT_DEBUG(fmt,args...)
+
+/* multicast support
+ */
+/* #define DEBUG_MULTICAST */
+
+/* CRC polynomium used by the FEC for the multicast group filtering
+ */
+#define FEC_CRC_POLY   0x04C11DB7
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* Forward declarations of some structures to support different PHYs
+*/
+
+typedef struct {
+	uint mii_data;
+	void (*funct)(uint mii_reg, struct net_device *dev, uint data);
+} phy_cmd_t;
+
+typedef struct {
+	uint id;
+	char *name;
+
+	const phy_cmd_t *config;
+	const phy_cmd_t *startup;
+	const phy_cmd_t *ack_int;
+	const phy_cmd_t *shutdown;
+} phy_info_t;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it is best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define FEC_ENET_RX_PAGES	4
+#define FEC_ENET_RX_FRSIZE	2048
+#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define TX_RING_SIZE		8	/* Must be power of two */
+#define TX_RING_MOD_MASK	7	/*   for this to work */
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR	((uint)0x80000000)	/* Heartbeat error */
+#define FEC_ENET_BABR	((uint)0x40000000)	/* Babbling receiver */
+#define FEC_ENET_BABT	((uint)0x20000000)	/* Babbling transmitter */
+#define FEC_ENET_GRA	((uint)0x10000000)	/* Graceful stop complete */
+#define FEC_ENET_TXF	((uint)0x08000000)	/* Full frame transmitted */
+#define FEC_ENET_TXB	((uint)0x04000000)	/* A buffer was transmitted */
+#define FEC_ENET_RXF	((uint)0x02000000)	/* Full frame received */
+#define FEC_ENET_RXB	((uint)0x01000000)	/* A buffer was received */
+#define FEC_ENET_MII	((uint)0x00800000)	/* MII interrupt */
+#define FEC_ENET_EBERR	((uint)0x00400000)	/* SDMA bus error */
+
+/*
+*/
+#define FEC_ECNTRL_PINMUX	0x00000004
+#define FEC_ECNTRL_ETHER_EN	0x00000002
+#define FEC_ECNTRL_RESET	0x00000001
+
+#define FEC_RCNTRL_BC_REJ	0x00000010
+#define FEC_RCNTRL_PROM		0x00000008
+#define FEC_RCNTRL_MII_MODE	0x00000004
+#define FEC_RCNTRL_DRT		0x00000002
+#define FEC_RCNTRL_LOOP		0x00000001
+
+#define FEC_TCNTRL_FDEN		0x00000004
+#define FEC_TCNTRL_HBC		0x00000002
+#define FEC_TCNTRL_GTS		0x00000001
+
+/* Delay to wait for FEC reset command to complete (in us)
+*/
+#define FEC_RESET_DELAY		50
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+#define PKT_MAXBLR_SIZE		1520
+
+/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct	rtskb *tx_skbuff[TX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses.
+	*/
+	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
+	cbd_t	*tx_bd_base;
+	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
+	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
+
+	/* Virtual addresses for the receive buffers because we can't
+	 * do a __va() on them anymore.
+	 */
+	unsigned char *rx_vaddr[RX_RING_SIZE];
+
+	struct	net_device_stats stats;
+	uint	tx_full;
+	rtdm_lock_t lock;
+	rtdm_irq_t irq_handle;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	uint	phy_id;
+	uint	phy_id_done;
+	uint	phy_status;
+	uint	phy_speed;
+	phy_info_t	*phy;
+	struct tq_struct phy_task;
+
+	uint	sequence_done;
+
+	uint	phy_addr;
+
+	struct timer_list phy_timer_list;
+	u16 old_status;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	int	link;
+	int	old_link;
+	int	full_duplex;
+
+};
+
+static int  fec_enet_open(struct rtnet_device *rtev);
+static int  fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static void fec_enet_tx(struct rtnet_device *rtdev);
+static void fec_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int fec_enet_interrupt(rtdm_irq_t *irq_handle);
+static int  fec_enet_close(struct rtnet_device *dev);
+static void fec_restart(struct rtnet_device *rtdev, int duplex);
+static void fec_stop(struct rtnet_device *rtdev);
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void fec_enet_mii(struct net_device *dev);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+static struct net_device_stats *fec_enet_get_stats(struct rtnet_device *rtdev);
+#ifdef ORIGINAL_VERSION
+static void set_multicast_list(struct net_device *dev);
+#endif /* ORIGINAL_VERSION */
+
+static struct rtnet_device *rtdev_root = NULL; /* for cleanup */
+
+static	ushort	my_enet_addr[3];
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr);
+
+static void mdio_callback(uint regval, struct net_device *dev, uint data);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+
+#if defined(CONFIG_FEC_DP83846A)
+static void mdio_timer_callback(unsigned long data);
+#endif /* CONFIG_FEC_DP83846A */
+
+/* MII processing.  We keep this as simple as possible.  Requests are
+ * placed on the list (if there is room).  When the request is finished
+ * by the MII, an optional function may be called.
+ */
+typedef struct mii_list {
+	uint	mii_regval;
+	void	(*mii_func)(uint val, struct net_device *dev, uint data);
+	struct	mii_list *mii_next;
+	uint	mii_data;
+} mii_list_t;
+
+#define		NMII	20
+mii_list_t	mii_cmds[NMII];
+mii_list_t	*mii_free;
+mii_list_t	*mii_head;
+mii_list_t	*mii_tail;
+
+typedef struct mdio_read_data {
+	u16 regval;
+	struct task_struct *sleeping_task;
+} mdio_read_data_t;
+
+static int	mii_queue(struct net_device *dev, int request,
+				void (*func)(uint, struct net_device *, uint), uint data);
+static void mii_queue_relink(uint mii_reg, struct net_device *dev, uint data);
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
+						(VAL & 0xffff))
+#define mk_mii_end	0
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+/* Transmitter timeout.
+*/
+#define TX_TIMEOUT (2*HZ)
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* Register definitions for the PHY.
+*/
+
+#define MII_REG_CR          0  /* Control Register                         */
+#define MII_REG_SR          1  /* Status Register                          */
+#define MII_REG_PHYIR1      2  /* PHY Identification Register 1            */
+#define MII_REG_PHYIR2      3  /* PHY Identification Register 2            */
+#define MII_REG_ANAR        4  /* A-N Advertisement Register               */
+#define MII_REG_ANLPAR      5  /* A-N Link Partner Ability Register        */
+#define MII_REG_ANER        6  /* A-N Expansion Register                   */
+#define MII_REG_ANNPTR      7  /* A-N Next Page Transmit Register          */
+#define MII_REG_ANLPRNPR    8  /* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE	0x0001  /* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP	0x0002  /* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK	0x00f0  /* mask for speed */
+#define PHY_CONF_10HDX	0x0010  /* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX	0x0020  /* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX	0x0040  /* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX	0x0080  /* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK	0x0100  /* 1 up - 0 down */
+#define PHY_STAT_FAULT	0x0200  /* 1 remote fault */
+#define PHY_STAT_ANC	0x0400  /* 1 auto-negotiation complete	*/
+#define PHY_STAT_SPMASK	0xf000  /* mask for speed */
+#define PHY_STAT_10HDX	0x1000  /* 10 Mbit half duplex selected	*/
+#define PHY_STAT_10FDX	0x2000  /* 10 Mbit full duplex selected	*/
+#define PHY_STAT_100HDX	0x4000  /* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX	0x8000  /* 100 Mbit full duplex selected */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+static int
+fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep;
+	volatile fec_t	*fecp;
+	volatile cbd_t	*bdp;
+	rtdm_lockctx_t	context;
+
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	fep = rtdev->priv;
+	fecp = (volatile fec_t*)rtdev->base_addr;
+
+	if (!fep->link) {
+		/* Link is down or autonegotiation is in progress. */
+		return 1;
+	}
+
+	/* Fill in a Tx ring entry */
+	bdp = fep->cur_tx;
+
+#ifndef final_version
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since dev->tbusy should be set.
+		 */
+		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
+		return 1;
+	}
+#endif
+
+	/* Clear all of the status flags.
+	 */
+	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+	/* Set buffer length and buffer pointer.
+	*/
+	bdp->cbd_bufaddr = __pa(skb->data);
+	bdp->cbd_datlen = skb->len;
+
+	/* Save skb pointer.
+	*/
+	fep->tx_skbuff[fep->skb_cur] = skb;
+
+	fep->stats.tx_bytes += skb->len;
+	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	rtdm_lock_get_irqsave(&fep->lock, context);
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* Push the data cache so the CPM does not get stale memory
+	 * data.
+	 */
+	flush_dcache_range((unsigned long)skb->data,
+			   (unsigned long)skb->data + skb->len);
+
+	/* Send it on its way.  Tell FEC its ready, interrupt when done,
+	 * its the last BD of the frame, and to put the CRC on the end.
+	 */
+
+	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+	//rtdev->trans_start = jiffies;
+
+	/* Trigger transmission start */
+	fecp->fec_x_des_active = 0x01000000;
+
+	/* If this was the last BD in the ring, start at the beginning again.
+	*/
+	if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
+		bdp = fep->tx_bd_base;
+	} else {
+		bdp++;
+	}
+
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		rtnetif_stop_queue(rtdev);
+		fep->tx_full = 1;
+	}
+
+	fep->cur_tx = (cbd_t *)bdp;
+
+	rtdm_lock_put_irqrestore(&fep->lock, context);
+
+	return 0;
+}
+
+#ifdef ORIGINAL_VERSION
+static void
+fec_timeout(struct net_device *dev)
+{
+	struct fec_enet_private *fep = rtdev->priv;
+
+	if (fep->link || fep->old_link) {
+		/* Link status changed - print timeout message */
+		printk("%s: transmit timed out.\n", rtdev->name);
+	}
+
+	fep->stats.tx_errors++;
+#ifndef final_version
+	if (fep->link) {
+		int	i;
+		cbd_t	*bdp;
+
+		printk ("Ring data dump: "
+			"cur_tx %p%s dirty_tx %p cur_rx %p\n",
+		       fep->cur_tx,
+		       fep->tx_full ? " (full)" : "",
+		       fep->dirty_tx,
+		       fep->cur_rx);
+
+		bdp = fep->tx_bd_base;
+		printk(" tx: %u buffers\n",  TX_RING_SIZE);
+		for (i = 0 ; i < TX_RING_SIZE; i++) {
+			printk("  %08x: %04x %04x %08x\n",
+			       (uint) bdp,
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+			bdp++;
+		}
+
+		bdp = fep->rx_bd_base;
+		printk(" rx: %lu buffers\n",  RX_RING_SIZE);
+		for (i = 0 ; i < RX_RING_SIZE; i++) {
+			printk("  %08x: %04x %04x %08x\n",
+			       (uint) bdp,
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+			bdp++;
+		}
+	}
+#endif
+	if (!fep->tx_full) {
+		netif_wake_queue(dev);
+	}
+}
+#endif /* ORIGINAL_VERSION */
+
+/* The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static int fec_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	volatile fec_t	*fecp;
+	uint	int_events;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+	fecp = (volatile fec_t*)rtdev->base_addr;
+
+	/* Get the interrupt events that caused us to be here.
+	*/
+	while ((int_events = fecp->fec_ievent) != 0) {
+		fecp->fec_ievent = int_events;
+		if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
+				   FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) {
+			rtdm_printk("FEC ERROR %x\n", int_events);
+		}
+
+		/* Handle receive event in its own function.
+		 */
+		if (int_events & FEC_ENET_RXF) {
+			fec_enet_rx(rtdev, &packets, &time_stamp);
+		}
+
+		/* Transmit OK, or non-fatal error. Update the buffer
+		   descriptors. FEC handles all errors, we just discover
+		   them as part of the transmit process.
+		*/
+		if (int_events & FEC_ENET_TXF) {
+			fec_enet_tx(rtdev);
+		}
+
+		if (int_events & FEC_ENET_MII) {
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+			fec_enet_mii(dev);
+#else
+		rtdm_printk("%s[%d] %s: unexpected FEC_ENET_MII event\n",
+			__FILE__,__LINE__,__FUNCTION__);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+		}
+
+	}
+
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+
+static void
+fec_enet_tx(struct rtnet_device *rtdev)
+{
+	struct rtskb *skb;
+	struct	fec_enet_private *fep = rtdev->priv;
+	volatile cbd_t	*bdp;
+	rtdm_lock_get(&fep->lock);
+	bdp = fep->dirty_tx;
+
+	while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) {
+		if (bdp == fep->cur_tx && fep->tx_full == 0) break;
+
+		skb = fep->tx_skbuff[fep->skb_dirty];
+		/* Check for errors. */
+		if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+				   BD_ENET_TX_RL | BD_ENET_TX_UN |
+				   BD_ENET_TX_CSL)) {
+			fep->stats.tx_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_HB)  /* No heartbeat */
+				fep->stats.tx_heartbeat_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_LC)  /* Late collision */
+				fep->stats.tx_window_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_RL)  /* Retrans limit */
+				fep->stats.tx_aborted_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_UN)  /* Underrun */
+				fep->stats.tx_fifo_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
+				fep->stats.tx_carrier_errors++;
+		} else {
+			fep->stats.tx_packets++;
+		}
+
+#ifndef final_version
+		if (bdp->cbd_sc & BD_ENET_TX_READY)
+			rtdm_printk("HEY! Enet xmit interrupt and TX_READY.\n");
+#endif
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_DEF)
+			fep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit.
+		 */
+		dev_kfree_rtskb(skb);
+		fep->tx_skbuff[fep->skb_dirty] = NULL;
+		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+			bdp = fep->tx_bd_base;
+		else
+			bdp++;
+
+		/* Since we have freed up a buffer, the ring is no longer
+		 * full.
+		 */
+		if (fep->tx_full) {
+			fep->tx_full = 0;
+			if (rtnetif_queue_stopped(rtdev))
+				rtnetif_wake_queue(rtdev);
+		}
+	}
+	fep->dirty_tx = (cbd_t *)bdp;
+	rtdm_lock_put(&fep->lock);
+}
+
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static void
+fec_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct	fec_enet_private *fep;
+	volatile fec_t	*fecp;
+	volatile cbd_t *bdp;
+	struct	rtskb *skb;
+	ushort	pkt_len;
+	__u8 *data;
+
+	fep = rtdev->priv;
+	fecp = (volatile fec_t*)rtdev->base_addr;
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = fep->cur_rx;
+
+while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
+
+#ifndef final_version
+	/* Since we have allocated space to hold a complete frame,
+	 * the last indicator should be set.
+	 */
+	if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
+		rtdm_printk("FEC ENET: rcv is not +last\n");
+#endif
+
+	/* Check for errors. */
+	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+		fep->stats.rx_errors++;
+		if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+		/* Frame too long or too short. */
+			fep->stats.rx_length_errors++;
+		}
+		if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
+			fep->stats.rx_frame_errors++;
+		if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
+			fep->stats.rx_crc_errors++;
+		if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
+			fep->stats.rx_crc_errors++;
+	}
+
+	/* Report late collisions as a frame error.
+	 * On this error, the BD is closed, but we don't know what we
+	 * have in the buffer.  So, just drop this frame on the floor.
+	 */
+	if (bdp->cbd_sc & BD_ENET_RX_CL) {
+		fep->stats.rx_errors++;
+		fep->stats.rx_frame_errors++;
+		goto rx_processing_done;
+	}
+
+	/* Process the incoming frame.
+	 */
+	fep->stats.rx_packets++;
+	pkt_len = bdp->cbd_datlen;
+	fep->stats.rx_bytes += pkt_len;
+	data = fep->rx_vaddr[bdp - fep->rx_bd_base];
+
+	/* This does 16 byte alignment, exactly what we need.
+	 * The packet length includes FCS, but we don't want to
+	 * include that when passing upstream as it messes up
+	 * bridging applications.
+	 */
+	skb = rtnetdev_alloc_rtskb(rtdev, pkt_len-4);
+
+	if (skb == NULL) {
+		rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
+		fep->stats.rx_dropped++;
+	} else {
+		rtskb_put(skb,pkt_len-4); /* Make room */
+		memcpy(skb->data, data, pkt_len-4);
+		skb->protocol=rt_eth_type_trans(skb,rtdev);
+		skb->time_stamp = *time_stamp;
+		rtnetif_rx(skb);
+		(*packets)++;
+	}
+rx_processing_done:
+
+	/* Clear the status flags for this buffer.
+	*/
+	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+	/* Mark the buffer empty.
+	*/
+	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+	/* Update BD pointer to next entry.
+	*/
+	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+		bdp = fep->rx_bd_base;
+	else
+		bdp++;
+
+	/* Doing this here will keep the FEC running while we process
+	 * incoming frames.  On a heavily loaded network, we should be
+	 * able to keep up at the expense of system resources.
+	 */
+	fecp->fec_r_des_active = 0x01000000;
+   } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
+	fep->cur_rx = (cbd_t *)bdp;
+
+}
+
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void
+fec_enet_mii(struct net_device *dev)
+{
+	struct	fec_enet_private *fep;
+	volatile fec_t	*ep;
+	mii_list_t	*mip;
+	uint		mii_reg;
+
+	fep = (struct fec_enet_private *)dev->priv;
+	ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec);
+	mii_reg = ep->fec_mii_data;
+
+	if ((mip = mii_head) == NULL) {
+		printk("MII and no head!\n");
+		return;
+	}
+
+	if (mip->mii_func != NULL)
+		(*(mip->mii_func))(mii_reg, dev, mip->mii_data);
+
+	mii_head = mip->mii_next;
+	mip->mii_next = mii_free;
+	mii_free = mip;
+
+	if ((mip = mii_head) != NULL) {
+		ep->fec_mii_data = mip->mii_regval;
+	}
+}
+
+static int
+mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *, uint), uint data)
+{
+	struct fec_enet_private *fep;
+	unsigned long	flags;
+	mii_list_t	*mip;
+	int		retval;
+
+	/* Add PHY address to register command.
+	*/
+	fep = dev->priv;
+	regval |= fep->phy_addr << 23;
+
+	retval = 0;
+
+	save_flags(flags);
+	cli();
+
+	if ((mip = mii_free) != NULL) {
+		mii_free = mip->mii_next;
+		mip->mii_regval = regval;
+		mip->mii_func = func;
+		mip->mii_next = NULL;
+		mip->mii_data = data;
+		if (mii_head) {
+			mii_tail->mii_next = mip;
+			mii_tail = mip;
+		} else {
+			mii_head = mii_tail = mip;
+			(&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec))->fec_mii_data = regval;
+		}
+	} else {
+		retval = 1;
+	}
+
+	restore_flags(flags);
+
+	return(retval);
+}
+
+static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
+{
+	int k;
+
+	if(!c)
+		return;
+
+	for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
+		mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0);
+}
+
+static void mii_parse_sr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+	if (mii_reg & 0x0004)
+		s |= PHY_STAT_LINK;
+	if (mii_reg & 0x0010)
+		s |= PHY_STAT_FAULT;
+	if (mii_reg & 0x0020)
+		s |= PHY_STAT_ANC;
+
+	fep->phy_status = s;
+	fep->link = (s & PHY_STAT_LINK) ? 1 : 0;
+}
+
+static void mii_parse_cr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+	if (mii_reg & 0x1000)
+		s |= PHY_CONF_ANE;
+	if (mii_reg & 0x4000)
+		s |= PHY_CONF_LOOP;
+
+	fep->phy_status = s;
+}
+
+static void mii_parse_anar(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_SPMASK);
+
+	if (mii_reg & 0x0020)
+		s |= PHY_CONF_10HDX;
+	if (mii_reg & 0x0040)
+		s |= PHY_CONF_10FDX;
+	if (mii_reg & 0x0080)
+		s |= PHY_CONF_100HDX;
+	if (mii_reg & 0x0100)
+		s |= PHY_CONF_100FDX;
+
+	fep->phy_status = s;
+}
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT970 is used by many boards				     */
+
+#ifdef CONFIG_FEC_LXT970
+
+#define MII_LXT970_MIRROR    16  /* Mirror register           */
+#define MII_LXT970_IER       17  /* Interrupt Enable Register */
+#define MII_LXT970_ISR       18  /* Interrupt Status Register */
+#define MII_LXT970_CONFIG    19  /* Configuration Register    */
+#define MII_LXT970_CSR       20  /* Chip Status Register      */
+
+static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0800) {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	else {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt970 = {
+	0x07810000,
+	"LXT970",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* read SR and ISR to acknowledge */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT970_ISR), NULL },
+
+		/* find out the current status */
+
+		{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT970 */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards                  */
+
+#ifdef CONFIG_FEC_LXT971
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR       16  /* Port Control Register     */
+#define MII_LXT971_SR2       17  /* Status Register 2         */
+#define MII_LXT971_IER       18  /* Interrupt Enable Register */
+#define MII_LXT971_ISR       19  /* Interrupt Status Register */
+#define MII_LXT971_LCR       20  /* LED Control Register      */
+#define MII_LXT971_TCR       30  /* Transmit Control Register */
+
+/*
+ * I had some nice ideas of running the MDIO faster...
+ * The 971 should support 8MHz and I tried it, but things acted really
+ * weird, so 2.5 MHz ought to be enough for anyone...
+ */
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x4000) {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	else {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	if (mii_reg & 0x0008)
+		s |= PHY_STAT_FAULT;
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt971 = {
+	0x0001378e,
+	"LXT971",
+
+	(const phy_cmd_t []) {  /* config */
+//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+
+		/* Somehow does the 971 tell me that the link is down
+		 * the first read after power-up.
+		 * read here to get a valid value in ack_int */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+
+		/* we only need to read ISR to acknowledge */
+
+		{ mk_mii_read(MII_LXT971_ISR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT971 */
+
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
+
+#ifdef CONFIG_FEC_QS6612
+
+/* register definitions */
+
+#define MII_QS6612_MCR       17  /* Mode Control Register      */
+#define MII_QS6612_FTR       27  /* Factory Test Register      */
+#define MII_QS6612_MCO       28  /* Misc. Control Register     */
+#define MII_QS6612_ISR       29  /* Interrupt Source Register  */
+#define MII_QS6612_IMR       30  /* Interrupt Mask Register    */
+#define MII_QS6612_PCR       31  /* 100BaseTx PHY Control Reg. */
+
+static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	switch((mii_reg >> 2) & 7) {
+	case 1: s |= PHY_STAT_10HDX;  break;
+	case 2: s |= PHY_STAT_100HDX; break;
+	case 5: s |= PHY_STAT_10FDX;  break;
+	case 6: s |= PHY_STAT_100FDX; break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_qs6612 = {
+	0x00181440,
+	"QS6612",
+
+	(const phy_cmd_t []) {  /* config */
+//	{ mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10  Mbps */
+
+		/* The PHY powers up isolated on the RPX,
+		 * so send a command to allow operation.
+		 */
+
+		{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
+
+		/* parse cr and anar to get some info */
+
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+
+		/* we need to read ISR, SR and ANER to acknowledge */
+
+		{ mk_mii_read(MII_QS6612_ISR), NULL },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_ANER), NULL },
+
+		/* read pcr to get info */
+
+		{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_QS6612 */
+
+/* ------------------------------------------------------------------------- */
+/* The Advanced Micro Devices AM79C874 is used on the ICU862		     */
+
+#ifdef CONFIG_FEC_AM79C874
+
+/* register definitions for the 79C874 */
+
+#define MII_AM79C874_MFR	16  /* Miscellaneous Features Register      */
+#define MII_AM79C874_ICSR	17  /* Interrupt Control/Status Register    */
+#define MII_AM79C874_DR		18  /* Diagnostic Register		    */
+#define MII_AM79C874_PMLR	19  /* Power Management & Loopback Register */
+#define MII_AM79C874_MCR	21  /* Mode Control Register		    */
+#define MII_AM79C874_DC		23  /* Disconnect Counter		    */
+#define MII_AM79C874_REC	24  /* Receiver Error Counter		    */
+
+static void mii_parse_amd79c874_dr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	/* Register 18: Bit 10 is data rate, 11 is Duplex */
+	switch ((mii_reg >> 10) & 3) {
+	case 0:	s |= PHY_STAT_10HDX;	break;
+	case 1:	s |= PHY_STAT_100HDX;	break;
+	case 2:	s |= PHY_STAT_10FDX;	break;
+	case 3:	s |= PHY_STAT_100FDX;	break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_amd79c874 = {
+	0x00022561,
+	"AM79C874",
+
+	(const phy_cmd_t []) {  /* config */
+//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_AM79C874_DR), mii_parse_amd79c874_dr },
+
+		/* we only need to read ICSR to acknowledge */
+
+		{ mk_mii_read(MII_AM79C874_ICSR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_AM79C874 */
+
+/* -------------------------------------------------------------------- */
+/* The National Semiconductor DP83843BVJE is used on a Mediatrix board  */
+/* -------------------------------------------------------------------- */
+
+#ifdef CONFIG_FEC_DP83843
+
+/* Register definitions */
+#define MII_DP83843_PHYSTS 0x10  /* PHY Status Register */
+#define MII_DP83843_MIPSCR 0x11  /* Specific Status Register */
+#define MII_DP83843_MIPGSR 0x12  /* Generic Status Register */
+
+static void mii_parse_dp83843_physts(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0002)
+	{
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	else
+	{
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_dp83843 = {
+	0x020005c1,
+	"DP83843BVJE",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL  }, /* Auto-Negociation Register Control set to    */
+							       /* auto-negociate 10/100MBps, Half/Full duplex */
+		{ mk_mii_read(MII_REG_CR),   mii_parse_cr   },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_DP83843_MIPSCR, 0x0002), NULL }, /* Enable interrupts */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL         }, /* Enable and Restart Auto-Negotiation */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr		 },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_DP83843_PHYSTS), mii_parse_dp83843_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		{ mk_mii_read(MII_DP83843_MIPGSR), NULL },  /* Acknowledge interrupts */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },  /* Find out the current status */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_DP83843_PHYSTS), mii_parse_dp83843_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_end, }
+	}
+};
+
+#endif /* CONFIG_FEC_DP83843 */
+
+
+/* ----------------------------------------------------------------- */
+/* The National Semiconductor DP83846A is used on a Mediatrix board  */
+/* ----------------------------------------------------------------- */
+
+#ifdef CONFIG_FEC_DP83846A
+
+/* Register definitions */
+#define MII_DP83846A_PHYSTS 0x10  /* PHY Status Register */
+
+static void mii_parse_dp83846a_physts(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = (struct fec_enet_private *)dev->priv;
+	uint s = fep->phy_status;
+	int link_change_mask;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0002) {
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	else {
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+
+	fep->phy_status = s;
+
+	link_change_mask = PHY_STAT_LINK | PHY_STAT_10FDX | PHY_STAT_10HDX | PHY_STAT_100FDX | PHY_STAT_100HDX;
+	if(fep->old_status != (link_change_mask & s))
+	{
+		fep->old_status = (link_change_mask & s);
+		mii_queue_relink(mii_reg, dev, 0);
+	}
+}
+
+static phy_info_t phy_info_dp83846a = {
+	0x020005c2,
+	"DP83846A",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL  }, /* Auto-Negociation Register Control set to    */
+							       /* auto-negociate 10/100MBps, Half/Full duplex */
+		{ mk_mii_read(MII_REG_CR),   mii_parse_cr   },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83846A_PHYSTS), mii_parse_dp83846a_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83846A_PHYSTS), mii_parse_dp83846a_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_end, }
+	}
+};
+
+#endif /* CONFIG_FEC_DP83846A */
+
+
+static phy_info_t *phy_info[] = {
+
+#ifdef CONFIG_FEC_LXT970
+	&phy_info_lxt970,
+#endif /* CONFIG_FEC_LXT970 */
+
+#ifdef CONFIG_FEC_LXT971
+	&phy_info_lxt971,
+#endif /* CONFIG_FEC_LXT971 */
+
+#ifdef CONFIG_FEC_QS6612
+	&phy_info_qs6612,
+#endif /* CONFIG_FEC_QS6612 */
+
+#ifdef CONFIG_FEC_AM79C874
+	&phy_info_amd79c874,
+#endif /* CONFIG_FEC_AM79C874 */
+
+#ifdef CONFIG_FEC_DP83843
+	&phy_info_dp83843,
+#endif /* CONFIG_FEC_DP83843 */
+
+#ifdef CONFIG_FEC_DP83846A
+	&phy_info_dp83846a,
+#endif /* CONFIG_FEC_DP83846A */
+
+	NULL
+};
+
+static void mii_display_status(struct net_device *dev)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	if (!fep->link && !fep->old_link) {
+		/* Link is still down - don't print anything */
+		return;
+	}
+
+	printk("%s: status: ", dev->name);
+
+	if (!fep->link) {
+		printk("link down");
+	} else {
+		printk("link up");
+
+		switch(s & PHY_STAT_SPMASK) {
+		case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
+		case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
+		case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
+		case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
+		default:
+			printk(", Unknown speed/duplex");
+		}
+
+		if (s & PHY_STAT_ANC)
+			printk(", auto-negotiation complete");
+	}
+
+	if (s & PHY_STAT_FAULT)
+		printk(", remote fault");
+
+	printk(".\n");
+}
+
+static void mii_display_config(struct net_device *dev)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	printk("%s: config: auto-negotiation ", dev->name);
+
+	if (s & PHY_CONF_ANE)
+		printk("on");
+	else
+		printk("off");
+
+	if (s & PHY_CONF_100FDX)
+		printk(", 100FDX");
+	if (s & PHY_CONF_100HDX)
+		printk(", 100HDX");
+	if (s & PHY_CONF_10FDX)
+		printk(", 10FDX");
+	if (s & PHY_CONF_10HDX)
+		printk(", 10HDX");
+	if (!(s & PHY_CONF_SPMASK))
+		printk(", No speed/duplex selected?");
+
+	if (s & PHY_CONF_LOOP)
+		printk(", loopback enabled");
+
+	printk(".\n");
+
+	fep->sequence_done = 1;
+}
+
+static void mii_relink(struct net_device *dev)
+{
+	struct fec_enet_private *fep = dev->priv;
+	int duplex;
+
+	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
+	mii_display_status(dev);
+	fep->old_link = fep->link;
+
+	if (fep->link) {
+		duplex = 0;
+		if (fep->phy_status
+		    & (PHY_STAT_100FDX | PHY_STAT_10FDX))
+			duplex = 1;
+		fec_restart(dev, duplex);
+
+		if (netif_queue_stopped(dev)) {
+			netif_wake_queue(dev);
+		}
+	} else {
+		netif_stop_queue(dev);
+		fec_stop(dev);
+	}
+}
+
+static void mii_queue_relink(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_relink;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+static void mii_queue_config(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_display_config;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+
+
+phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink },
+			       { mk_mii_end, } };
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+			       { mk_mii_end, } };
+
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep;
+	int	i;
+
+	fep = dev->priv;
+	fep->phy_id |= (mii_reg & 0xffff);
+
+	for(i = 0; phy_info[i]; i++)
+		if(phy_info[i]->id == (fep->phy_id >> 4))
+			break;
+
+	if(!phy_info[i])
+		panic("%s: PHY id 0x%08x is not supported!\n",
+		      dev->name, fep->phy_id);
+
+	fep->phy = phy_info[i];
+	fep->phy_id_done = 1;
+
+	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
+		dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID.  This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep;
+	uint	phytype;
+
+	fep = dev->priv;
+
+	if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
+
+		/* Got first part of ID, now get remainder.
+		*/
+		fep->phy_id = phytype << 16;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3, 0);
+	} else {
+		fep->phy_addr++;
+		if (fep->phy_addr < 32) {
+			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+							mii_discover_phy, 0);
+		} else {
+			printk("fec: No PHY device found.\n");
+		}
+	}
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* This interrupt occurs when the PHY detects a link change.
+*/
+static void
+#ifdef CONFIG_RPXCLASSIC
+mii_link_interrupt(void *dev_id)
+#else
+mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+#endif
+{
+	struct	net_device *dev = dev_id;
+	struct fec_enet_private *fep = dev->priv;
+	volatile immap_t *immap = (immap_t *)IMAP_ADDR;
+	volatile fec_t *fecp = &(immap->im_cpm.cp_fec);
+	unsigned int ecntrl = fecp->fec_ecntrl;
+
+	/*
+	 * Acknowledge the interrupt if possible. If we have not
+	 * found the PHY yet we can't process or acknowledge the
+	 * interrupt now. Instead we ignore this interrupt for now,
+	 * which we can do since it is edge triggered. It will be
+	 * acknowledged later by fec_enet_open().
+	 */
+	if (fep->phy) {
+		/*
+		 * We need the FEC enabled to access the MII
+		 */
+		if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) {
+			fecp->fec_ecntrl |= FEC_ECNTRL_ETHER_EN;
+		}
+
+		mii_do_cmd(dev, fep->phy->ack_int);
+		mii_do_cmd(dev, phy_cmd_relink);  /* restart and display status */
+
+		if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) {
+			fecp->fec_ecntrl = ecntrl;	/* restore old settings */
+		}
+	}
+
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+static int
+fec_enet_open(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep = rtdev->priv;
+
+	/* I should reset the ring buffers here, but I don't yet know
+	 * a simple way to do that.
+	 */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	fep->sequence_done = 0;
+	fep->link = 0;
+
+	if (fep->phy) {
+		mii_do_cmd(dev, fep->phy->config);
+		mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
+		while(!fep->sequence_done)
+			schedule();
+
+		mii_do_cmd(dev, fep->phy->startup);
+
+#if defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) && defined(CONFIG_FEC_DP83846A)
+		if(fep->phy == &phy_info_dp83846a)
+		{
+			/* Initializing timers
+			 */
+			init_timer( &fep->phy_timer_list );
+
+			/* Starting timer for periodic link status check
+			 * After 100 milli-seconds, mdio_timer_callback function is called.
+			 */
+			fep->phy_timer_list.expires  = jiffies + (100 * HZ / 1000);
+			fep->phy_timer_list.data     = (unsigned long)dev;
+			fep->phy_timer_list.function = mdio_timer_callback;
+			add_timer( &fep->phy_timer_list );
+		}
+
+#if defined(CONFIG_IP_PNP)
+	rtdm_printk("%s: Waiting for the link to be up...\n", rtdev->name);
+
+	while(fep->link == 0 || ((((volatile fec_t*)rtdev->base_addr)->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0))
+	{
+	    schedule();
+	}
+#endif /* CONFIG_IP_PNP */
+
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO && CONFIG_FEC_DP83846A */
+
+		netif_start_queue(dev);
+		return 0;		/* Success */
+	}
+	return -ENODEV;		/* No PHY we understand */
+#else	/* !CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+	fep->link = 1;
+	rtnetif_start_queue(rtdev);
+
+	return 0;	/* Success */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+}
+
+static int
+fec_enet_close(struct rtnet_device *rtdev)
+{
+	/* Don't know what to do yet.
+	*/
+	rtnetif_stop_queue(rtdev);
+
+	fec_stop(rtdev);
+
+	return 0;
+}
+
+static struct net_device_stats *fec_enet_get_stats(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep = (struct fec_enet_private *)rtdev->priv;
+
+	return &fep->stats;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+
+#if defined(CONFIG_FEC_DP83846A)
+/* Execute the ack_int command set and schedules next timer call back.  */
+static void mdio_timer_callback(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct fec_enet_private *fep = (struct fec_enet_private *)(dev->priv);
+	mii_do_cmd(dev, fep->phy->ack_int);
+
+	if(fep->link == 0)
+	{
+		fep->phy_timer_list.expires  = jiffies + (100 * HZ / 1000); /* Sleep for 100ms */
+	}
+	else
+	{
+		fep->phy_timer_list.expires  = jiffies + (1 * HZ); /* Sleep for 1 sec. */
+	}
+	add_timer( &fep->phy_timer_list );
+}
+#endif /* CONFIG_FEC_DP83846A */
+
+static void mdio_callback(uint regval, struct net_device *dev, uint data)
+{
+	mdio_read_data_t* mrd = (mdio_read_data_t *)data;
+	mrd->regval = 0xFFFF & regval;
+	wake_up_process(mrd->sleeping_task);
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	uint retval;
+	mdio_read_data_t* mrd = (mdio_read_data_t *)kmalloc(sizeof(*mrd), GFP_KERNEL);
+
+	mrd->sleeping_task = current;
+	set_current_state(TASK_INTERRUPTIBLE);
+	mii_queue(dev, mk_mii_read(location), mdio_callback, (unsigned int) mrd);
+	schedule();
+
+	retval = mrd->regval;
+
+	kfree(mrd);
+
+	return retval;
+}
+
+void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+	mii_queue(dev, mk_mii_write(location, value), NULL, 0);
+}
+
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct fec_enet_private *cep = (struct fec_enet_private *)dev->priv;
+	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
+
+	int phy = cep->phy_addr & 0x1f;
+	int retval;
+
+	if (data == NULL)
+	{
+		retval = -EINVAL;
+	}
+	else
+	{
+		switch(cmd)
+		{
+		case SIOCETHTOOL:
+			return netdev_ethtool_ioctl(dev, (void*)rq->ifr_data);
+			break;
+
+		case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
+		case SIOCDEVPRIVATE:		/* for binary compat, remove in 2.5 */
+			data->phy_id = phy;
+
+		case SIOCGMIIREG:		/* Read MII PHY register.	*/
+		case SIOCDEVPRIVATE+1:		/* for binary compat, remove in 2.5 */
+			data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+			retval = 0;
+			break;
+
+		case SIOCSMIIREG:		/* Write MII PHY register.	*/
+		case SIOCDEVPRIVATE+2:		/* for binary compat, remove in 2.5 */
+			if (!capable(CAP_NET_ADMIN))
+			{
+				retval = -EPERM;
+			}
+			else
+			{
+				mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+				retval = 0;
+			}
+			break;
+
+		default:
+			retval = -EOPNOTSUPP;
+			break;
+		}
+	}
+	return retval;
+}
+
+
+static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
+{
+	u32 ethcmd;
+
+	/* dev_ioctl() in ../../net/core/dev.c has already checked
+	   capable(CAP_NET_ADMIN), so don't bother with that here.  */
+
+	if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
+		return -EFAULT;
+
+	switch (ethcmd) {
+	case ETHTOOL_GDRVINFO:
+		{
+			struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+			strcpy (info.driver, dev->name);
+			strcpy (info.version, "0.3");
+			strcpy (info.bus_info, "");
+			if (copy_to_user (useraddr, &info, sizeof (info)))
+				return -EFAULT;
+			return 0;
+		}
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+#ifdef ORIGINAL_VERSION
+
+/* Returns the CRC needed when filling in the hash table for
+ * multicast group filtering
+ * pAddr must point to a MAC address (6 bytes)
+ */
+static u32 fec_mulicast_calc_crc(char *pAddr)
+{
+	u8	byte;
+	int	byte_count;
+	int	bit_count;
+	u32	crc = 0xffffffff;
+	u8	msb;
+
+	for (byte_count=0; byte_count<6; byte_count++) {
+		byte = pAddr[byte_count];
+		for (bit_count=0; bit_count<8; bit_count++) {
+			msb = crc >> 31;
+			crc <<= 1;
+			if (msb ^ (byte & 0x1)) {
+				crc ^= FEC_CRC_POLY;
+			}
+			byte >>= 1;
+		}
+	}
+	return (crc);
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+	struct	fec_enet_private *fep;
+	volatile fec_t *ep;
+
+	fep = (struct fec_enet_private *)dev->priv;
+	ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec);
+
+	if (dev->flags&IFF_PROMISC) {
+
+		/* Log any net taps. */
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		ep->fec_r_cntrl |= FEC_RCNTRL_PROM;
+	} else {
+
+		ep->fec_r_cntrl &= ~FEC_RCNTRL_PROM;
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Catch all multicast addresses, so set the
+			 * filter to all 1's.
+			 */
+			ep->fec_hash_table_high = 0xffffffff;
+			ep->fec_hash_table_low = 0xffffffff;
+		} else {
+			struct dev_mc_list *pmc = dev->mc_list;
+
+			/* Clear Hash-Table
+			*/
+			ep->fec_hash_table_high = 0;
+			ep->fec_hash_table_low = 0;
+
+			/* Now populate the hash table
+			*/
+#ifdef DEBUG_MULTICAST
+			if (pmc) {
+				printk ("%s: Recalculating hash-table:\n",
+					dev->name);
+				printk (" MAC Address         high     low\n");
+			}
+#endif
+
+			while (pmc) {
+				u32	crc;
+				int	temp;
+				u32	csrVal;
+				int	hash_index;
+
+				crc = fec_mulicast_calc_crc(pmc->dmi_addr);
+				temp = (crc & 0x3f) >> 1;
+				hash_index = ((temp & 0x01) << 4) |
+					     ((temp & 0x02) << 2) |
+					     ((temp & 0x04))      |
+					     ((temp & 0x08) >> 2) |
+					     ((temp & 0x10) >> 4);
+				csrVal = (1 << hash_index);
+				if (crc & 1) {
+					ep->fec_hash_table_high	|= csrVal;
+				}
+				else {
+					ep->fec_hash_table_low	|= csrVal;
+				}
+#ifdef DEBUG_MULTICAST
+				printk (" %02x:%02x:%02x:%02x:%02x:%02x   %08x %08x\n",
+					(int)pmc->dmi_addr[0],
+					(int)pmc->dmi_addr[1],
+					(int)pmc->dmi_addr[2],
+					(int)pmc->dmi_addr[3],
+					(int)pmc->dmi_addr[4],
+					(int)pmc->dmi_addr[5],
+					ep->fec_hash_table_high,
+					ep->fec_hash_table_low
+				);
+#endif
+				pmc = pmc->next;
+			}
+		}
+	}
+}
+#endif /* ORIGINAL_VERSION */
+
+/* Initialize the FEC Ethernet on 860T.
+ */
+int __init fec_enet_init(void)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct fec_enet_private *fep;
+	int i, j, k;
+	unsigned char	*eap, *iap, *ba;
+	unsigned long	mem_addr;
+	volatile	cbd_t	*bdp;
+	cbd_t		*cbd_base;
+	volatile	immap_t	*immap;
+	volatile	fec_t	*fecp;
+	bd_t		*bd;
+
+	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
+
+	bd = (bd_t *)__res;
+
+	if (!rx_pool_size)
+		rx_pool_size = RX_RING_SIZE * 2;
+
+	rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct fec_enet_private),
+					rx_pool_size + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "enet: Could not allocate ethernet device.\n");
+		return -1;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+
+	fep = (struct fec_enet_private *)rtdev->priv;
+	fecp = &(immap->im_cpm.cp_fec);
+
+	/* Whack a reset.  We should wait for this.
+	*/
+	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET;
+	for (i = 0;
+	     (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY);
+	     ++i) {
+		udelay(1);
+	}
+	if (i == FEC_RESET_DELAY) {
+		printk ("FEC Reset timeout!\n");
+	}
+
+	/* Set the Ethernet address.  If using multiple Enets on the 8xx,
+	 * this needs some work to get unique addresses.
+	 */
+	eap = (unsigned char *)my_enet_addr;
+	iap = bd->bi_enetaddr;
+
+#if defined(CONFIG_SCC_ENET) && !defined(ORIGINAL_VERSION)
+	/*
+	 * If a board has Ethernet configured both on a SCC and the
+	 * FEC, it needs (at least) 2 MAC addresses (we know that Sun
+	 * disagrees, but anyway). For the FEC port, we create
+	 * another address by setting one of the address bits above
+	 * something that would have (up to now) been allocated.
+	 */
+	{
+		unsigned char	tmpaddr[6];
+		for (i=0; i<6; i++)
+			tmpaddr[i] = *iap++;
+		tmpaddr[3] |= 0x80;
+		iap = tmpaddr;
+	}
+#endif
+
+	for (i=0; i<6; i++) {
+		rtdev->dev_addr[i] = *eap++ = *iap++;
+	}
+
+	/* Allocate memory for buffer descriptors.
+	*/
+	if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) {
+		printk("FEC init error.  Need more space.\n");
+		printk("FEC initialization failed.\n");
+		return 1;
+	}
+	cbd_base = (cbd_t *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, (void *)&mem_addr);
+
+	/* Set receive and transmit descriptor base.
+	*/
+	fep->rx_bd_base = cbd_base;
+	fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+
+	fep->skb_cur = fep->skb_dirty = 0;
+
+	/* Initialize the receive buffer descriptors.
+	*/
+	bdp = fep->rx_bd_base;
+	k = 0;
+	for (i=0; i<FEC_ENET_RX_PAGES; i++) {
+
+		/* Allocate a page.
+		*/
+		ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, (void *)&mem_addr);
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
+			bdp->cbd_sc = BD_ENET_RX_EMPTY;
+			bdp->cbd_bufaddr = mem_addr;
+			fep->rx_vaddr[k++] = ba;
+			mem_addr += FEC_ENET_RX_FRSIZE;
+			ba += FEC_ENET_RX_FRSIZE;
+			bdp++;
+		}
+	}
+
+	rtdm_lock_init(&fep->lock);
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Install our interrupt handler.
+	*/
+	rt_stack_connect(rtdev, &STACK_manager);
+	if ((i = rtdm_irq_request(&fep->irq_handle, FEC_INTERRUPT,
+				  fec_enet_interrupt, 0, "rt_mpc8xx_fec", rtdev))) {
+		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+	rtdev->base_addr = (unsigned long)fecp;
+
+#ifdef CONFIG_RPXCLASSIC
+/* If MDIO is disabled the PHY should not be allowed to
+ * generate interrupts telling us to read the PHY.
+ */
+# ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Make Port C, bit 15 an input that causes interrupts.
+	*/
+	immap->im_ioport.iop_pcpar &= ~0x0001;
+	immap->im_ioport.iop_pcdir &= ~0x0001;
+	immap->im_ioport.iop_pcso  &= ~0x0001;
+	immap->im_ioport.iop_pcint |=  0x0001;
+	cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
+# endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Make LEDS reflect Link status.
+	*/
+	*((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
+#endif	/* CONFIG_RPXCLASSIC */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+# ifndef PHY_INTERRUPT
+#  error Want to use MII, but PHY_INTERRUPT not defined!
+# endif
+	((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |=
+		(0x80000000 >> PHY_INTERRUPT);
+
+	if (request_8xxirq(PHY_INTERRUPT, mii_link_interrupt, 0, "mii", dev) != 0)
+		panic("Could not allocate MII IRQ!");
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	rtdev->base_addr = (unsigned long)fecp;
+
+	/* The FEC Ethernet specific entries in the device structure. */
+	rtdev->open = fec_enet_open;
+	rtdev->hard_start_xmit = fec_enet_start_xmit;
+	rtdev->stop = fec_enet_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->get_stats = fec_enet_get_stats;
+
+	if ((i = rt_register_rtnetdev(rtdev))) {
+		rtdm_irq_disable(&fep->irq_handle);
+		rtdm_irq_free(&fep->irq_handle);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	dev->do_ioctl = fec_enet_ioctl;
+
+	for (i=0; i<NMII-1; i++)
+		mii_cmds[i].mii_next = &mii_cmds[i+1];
+	mii_free = mii_cmds;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifndef CONFIG_ICU862
+	/* Configure all of port D for MII.
+	*/
+	immap->im_ioport.iop_pdpar = 0x1fff;
+
+#else	/* CONFIG_ICU862 */
+	/* Configure port A for MII.
+	*/
+
+	/* Has Utopia been configured? */
+	if (immap->im_ioport.iop_pdpar & (0x8000 >> 1)) {
+		/*
+		 * YES - Use MUXED mode for UTOPIA bus.
+		 * This frees Port A for use by MII (see 862UM table 41-6).
+		 */
+		immap->im_ioport.utmode &= ~0x80;
+	} else {
+		/*
+		 * NO - set SPLIT mode for UTOPIA bus.
+		 *
+		 * This doesn't really effect UTOPIA (which isn't
+		 * enabled anyway) but just tells the 862
+		 * to use port A for MII (see 862UM table 41-6).
+		 */
+		immap->im_ioport.utmode |= 0x80;
+	}
+
+# ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Now configure MII_MDC pin */
+	immap->im_ioport.iop_pdpar |= (0x8000 >> 8);
+# endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+#endif  /* CONFIG_ICU862 */
+
+	/* Bits moved from Rev. D onward.
+	*/
+	if ((mfspr(IMMR) & 0xffff) < 0x0501)
+		immap->im_ioport.iop_pddir = 0x1c58;	/* Pre rev. D */
+	else
+		immap->im_ioport.iop_pddir = 0x1fff;	/* Rev. D and later */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed to 2.5 MHz
+	*/
+	fecp->fec_mii_speed = fep->phy_speed =
+	    ((((bd->bi_intfreq + 4999999) / 2500000) / 2 ) & 0x3F ) << 1;
+#else
+	fecp->fec_mii_speed = 0;	/* turn off MDIO */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifndef ORIGINAL_VERSION
+	printk("%s: FEC ENET Version 0.3, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       rtdev->name, FEC_INTERRUPT,
+	       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
+	       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
+#else
+	printk ("%s: FEC ENET Version 0.3, FEC irq %d"
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		", with MDIO"
+#endif
+#ifdef PHY_INTERRUPT
+		", MII irq %d"
+#endif
+		", addr ",
+		dev->name, FEC_INTERRUPT
+#ifdef PHY_INTERRUPT
+		, PHY_INTERRUPT
+#endif
+	);
+	for (i=0; i<6; i++)
+		printk("%02x%c", rtdev->dev_addr[i], (i==5) ? '\n' : ':');
+#endif /* ORIGINAL_VERSION */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO	/* start in full duplex mode, and negotiate speed */
+	fec_restart (dev, 1);
+#else			/* always use half duplex mode only */
+	fec_restart (rtdev, 0);
+#endif
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Queue up command to detect the PHY and initialize the
+	 * remainder of the interface.
+	 */
+	fep->phy_id_done = 0;
+	fep->phy_addr = 0;
+	mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy, 0);
+
+	fep->old_status = 0;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	return 0;
+}
+
+/* This function is called to start or restart the FEC during a link
+ * change.  This only happens when switching between half and full
+ * duplex.
+ */
+static void
+fec_restart(struct rtnet_device *rtdev, int duplex)
+{
+	struct fec_enet_private *fep;
+	int i;
+	volatile	cbd_t	*bdp;
+	volatile	immap_t	*immap;
+	volatile	fec_t	*fecp;
+
+	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
+
+	fecp = &(immap->im_cpm.cp_fec);
+
+	fep = rtdev->priv;
+
+	/* Whack a reset.  We should wait for this.
+	*/
+	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET;
+	for (i = 0;
+	     (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY);
+	     ++i) {
+		udelay(1);
+	}
+	if (i == FEC_RESET_DELAY) {
+		printk ("FEC Reset timeout!\n");
+	}
+
+	/* Set station address.
+	*/
+	fecp->fec_addr_low  = (my_enet_addr[0] << 16) | my_enet_addr[1];
+	fecp->fec_addr_high =  my_enet_addr[2];
+
+	/* Reset all multicast.
+	*/
+	fecp->fec_hash_table_high = 0;
+	fecp->fec_hash_table_low  = 0;
+
+	/* Set maximum receive buffer size.
+	*/
+	fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
+	fecp->fec_r_hash = PKT_MAXBUF_SIZE;
+
+	/* Set receive and transmit descriptor base.
+	*/
+	fecp->fec_r_des_start = iopa((uint)(fep->rx_bd_base));
+	fecp->fec_x_des_start = iopa((uint)(fep->tx_bd_base));
+
+	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+	fep->cur_rx = fep->rx_bd_base;
+
+	/* Reset SKB transmit buffers.
+	*/
+	fep->skb_cur = fep->skb_dirty = 0;
+	for (i=0; i<=TX_RING_MOD_MASK; i++) {
+		if (fep->tx_skbuff[i] != NULL) {
+			dev_kfree_rtskb(fep->tx_skbuff[i]);
+			fep->tx_skbuff[i] = NULL;
+		}
+	}
+
+	/* Initialize the receive buffer descriptors.
+	*/
+	bdp = fep->rx_bd_base;
+	for (i=0; i<RX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = BD_ENET_RX_EMPTY;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* ...and the same for transmmit.
+	*/
+	bdp = fep->tx_bd_base;
+	for (i=0; i<TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Enable MII mode.
+	*/
+	if (duplex) {
+		fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE;	/* MII enable */
+		fecp->fec_x_cntrl = FEC_TCNTRL_FDEN;		/* FD enable */
+	}
+	else {
+		fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE | FEC_RCNTRL_DRT;
+		fecp->fec_x_cntrl = 0;
+	}
+
+	fep->full_duplex = duplex;
+
+	/* Enable big endian and don't care about SDMA FC.
+	*/
+	fecp->fec_fun_code = 0x78000000;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed.
+	*/
+	fecp->fec_mii_speed = fep->phy_speed;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Clear any outstanding interrupt.
+	*/
+	fecp->fec_ievent = 0xffc0;
+
+	fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
+
+	/* Enable interrupts we wish to service.
+	*/
+	fecp->fec_imask = ( FEC_ENET_TXF | FEC_ENET_TXB |
+			    FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII );
+
+	/* And last, enable the transmit and receive processing.
+	*/
+	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN;
+	fecp->fec_r_des_active = 0x01000000;
+
+	/* The tx ring is no longer full. */
+	if(fep->tx_full)
+	{
+		fep->tx_full = 0;
+		rtnetif_wake_queue(rtdev);
+	}
+}
+
+static void
+fec_stop(struct rtnet_device *rtdev)
+{
+	volatile	immap_t	*immap;
+	volatile	fec_t	*fecp;
+	int i;
+	struct fec_enet_private *fep;
+
+	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
+
+	fecp = &(immap->im_cpm.cp_fec);
+
+	if ((fecp->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0)
+		return;	/* already down */
+
+	fep = rtdev->priv;
+
+
+	fecp->fec_x_cntrl = 0x01;	/* Graceful transmit stop */
+
+	for (i = 0;
+	     ((fecp->fec_ievent & 0x10000000) == 0) && (i < FEC_RESET_DELAY);
+	     ++i) {
+		udelay(1);
+	}
+	if (i == FEC_RESET_DELAY) {
+		printk ("FEC timeout on graceful transmit stop\n");
+	}
+
+	/* Clear outstanding MII command interrupts.
+	*/
+	fecp->fec_ievent = FEC_ENET_MII;
+
+	/* Enable MII command finished interrupt
+	*/
+	fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
+	fecp->fec_imask = FEC_ENET_MII;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed.
+	*/
+	fecp->fec_mii_speed = fep->phy_speed;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Disable FEC
+	*/
+	fecp->fec_ecntrl &= ~(FEC_ECNTRL_ETHER_EN);
+}
+
+static void __exit fec_enet_cleanup(void)
+{
+	struct rtnet_device *rtdev = rtdev_root;
+	struct fec_enet_private *fep = rtdev->priv;
+
+	if (rtdev) {
+		rtdm_irq_disable(&fep->irq_handle);
+		rtdm_irq_free(&fep->irq_handle);
+
+		consistent_free(fep->rx_bd_base);
+
+		rt_stack_disconnect(rtdev);
+		rt_unregister_rtnetdev(rtdev);
+		rt_rtdev_disconnect(rtdev);
+
+		printk("%s: unloaded\n", rtdev->name);
+		rtdev_free(rtdev);
+		rtdev_root = NULL;
+	}
+}
+
+module_init(fec_enet_init);
+module_exit(fec_enet_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/rt_smc91111.h	2022-03-21 12:58:29.695885977 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/rt_fec.h	1970-01-01 01:00:00.000000000 +0100
+/*------------------------------------------------------------------------
+ . smc91111.h - macros for the LAN91C111 Ethernet Driver
+ .
+ . Copyright (C) 2001 Standard Microsystems Corporation (SMSC)
+ .       Developed by Simple Network Magic Corporation (SNMC)
+ . Copyright (C) 1996 by Erik Stahlman (ES)
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ .
+ . This file contains register information and access macros for 
+ . the LAN91C111 single chip ethernet controller.  It is a modified
+ . version of the smc9194.h file.
+ . 
+ . Information contained in this file was obtained from the LAN91C111
+ . manual from SMC.  To get a copy, if you really want one, you can find 
+ . information under www.smsc.com.
+ . 
+ . Authors
+ . 	Erik Stahlman				( erik@vt.edu )
+ .	Daris A Nevil				( dnevil@snmc.com )
+ .
+ . History
+ . 03/16/01		Daris A Nevil	Modified for use with LAN91C111 device
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC91111_H_
+#define _SMC91111_H_
+
+/* I want some simple types */
+
+typedef unsigned char			byte;
+typedef unsigned short			word;
+typedef unsigned long int 		dword;
+
+
+/* Because of bank switching, the LAN91xxx uses only 16 I/O ports */
+
+#define SMC_IO_EXTENT	16
+
+
+/*---------------------------------------------------------------
+ .  
+ . A description of the SMSC registers is probably in order here,
+ . although for details, the SMC datasheet is invaluable.  
+ . 
+ . Basically, the chip has 4 banks of registers ( 0 to 3 ), which
+ . are accessed by writing a number into the BANK_SELECT register
+ . ( I also use a SMC_SELECT_BANK macro for this ).
+ . 
+ . The banks are configured so that for most purposes, bank 2 is all
+ . that is needed for simple run time tasks.  
+ -----------------------------------------------------------------------*/
+
+/*
+ . Bank Select Register: 
+ .
+ .		yyyy yyyy 0000 00xx  
+ .		xx 		= bank number
+ .		yyyy yyyy	= 0x33, for identification purposes.
+*/
+#define	BANK_SELECT		14
+
+// Transmit Control Register
+/* BANK 0  */
+#define	TCR_REG 	0x0000 	// transmit control register 
+#define TCR_ENABLE	0x0001	// When 1 we can transmit
+#define TCR_LOOP	0x0002	// Controls output pin LBK
+#define TCR_FORCOL	0x0004	// When 1 will force a collision
+#define TCR_PAD_EN	0x0080	// When 1 will pad tx frames < 64 bytes w/0
+#define TCR_NOCRC	0x0100	// When 1 will not append CRC to tx frames
+#define TCR_MON_CSN	0x0400	// When 1 tx monitors carrier
+#define TCR_FDUPLX    	0x0800  // When 1 enables full duplex operation
+#define TCR_STP_SQET	0x1000	// When 1 stops tx if Signal Quality Error
+#define	TCR_EPH_LOOP	0x2000	// When 1 enables EPH block loopback
+#define	TCR_SWFDUP	0x8000	// When 1 enables Switched Full Duplex mode
+
+#define	TCR_CLEAR	0	/* do NOTHING */
+/* the default settings for the TCR register : */ 
+/* QUESTION: do I want to enable padding of short packets ? */
+#define	TCR_DEFAULT  	TCR_ENABLE 
+
+
+// EPH Status Register
+/* BANK 0  */
+#define EPH_STATUS_REG	0x0002
+#define ES_TX_SUC	0x0001	// Last TX was successful
+#define ES_SNGL_COL	0x0002	// Single collision detected for last tx
+#define ES_MUL_COL	0x0004	// Multiple collisions detected for last tx
+#define ES_LTX_MULT	0x0008	// Last tx was a multicast
+#define ES_16COL	0x0010	// 16 Collisions Reached
+#define ES_SQET		0x0020	// Signal Quality Error Test
+#define ES_LTXBRD	0x0040	// Last tx was a broadcast
+#define ES_TXDEFR	0x0080	// Transmit Deferred
+#define ES_LATCOL	0x0200	// Late collision detected on last tx
+#define ES_LOSTCARR	0x0400	// Lost Carrier Sense
+#define ES_EXC_DEF	0x0800	// Excessive Deferral
+#define ES_CTR_ROL	0x1000	// Counter Roll Over indication
+#define ES_LINK_OK	0x4000	// Driven by inverted value of nLNK pin
+#define ES_TXUNRN	0x8000	// Tx Underrun
+
+
+// Receive Control Register
+/* BANK 0  */
+#define	RCR_REG		0x0004
+#define	RCR_RX_ABORT	0x0001	// Set if a rx frame was aborted
+#define	RCR_PRMS	0x0002	// Enable promiscuous mode
+#define	RCR_ALMUL	0x0004	// When set accepts all multicast frames
+#define RCR_RXEN	0x0100	// IFF this is set, we can receive packets
+#define	RCR_STRIP_CRC	0x0200	// When set strips CRC from rx packets
+#define	RCR_ABORT_ENB	0x0200	// When set will abort rx on collision 
+#define	RCR_FILT_CAR	0x0400	// When set filters leading 12 bit s of carrier
+#define RCR_SOFTRST	0x8000 	// resets the chip
+
+/* the normal settings for the RCR register : */
+#define	RCR_DEFAULT	(RCR_STRIP_CRC | RCR_RXEN)
+#define RCR_CLEAR	0x0	// set it to a base state
+
+// Counter Register
+/* BANK 0  */
+#define	COUNTER_REG	0x0006
+
+// Memory Information Register
+/* BANK 0  */
+#define	MIR_REG		0x0008
+
+// Receive/Phy Control Register
+/* BANK 0  */
+#define	RPC_REG		0x000A
+#define	RPC_SPEED	0x2000	// When 1 PHY is in 100Mbps mode.
+#define	RPC_DPLX	0x1000	// When 1 PHY is in Full-Duplex Mode
+#define	RPC_ANEG	0x0800	// When 1 PHY is in Auto-Negotiate Mode
+#define	RPC_LSXA_SHFT	5	// Bits to shift LS2A,LS1A,LS0A to lsb
+#define	RPC_LSXB_SHFT	2	// Bits to get LS2B,LS1B,LS0B to lsb
+#define RPC_LED_100_10	(0x00)	// LED = 100Mbps OR's with 10Mbps link detect
+#define RPC_LED_RES	(0x01)	// LED = Reserved
+#define RPC_LED_10	(0x02)	// LED = 10Mbps link detect
+#define RPC_LED_FD	(0x03)	// LED = Full Duplex Mode
+#define RPC_LED_TX_RX	(0x04)	// LED = TX or RX packet occurred
+#define RPC_LED_100	(0x05)	// LED = 100Mbps link dectect
+#define RPC_LED_TX	(0x06)	// LED = TX packet occurred
+#define RPC_LED_RX	(0x07)	// LED = RX packet occurred
+#define RPC_DEFAULT (RPC_ANEG | (RPC_LED_100 << RPC_LSXA_SHFT) | (RPC_LED_FD << RPC_LSXB_SHFT) | RPC_SPEED | RPC_DPLX)
+
+/* Bank 0 0x000C is reserved */
+
+// Bank Select Register
+/* All Banks */
+#define BSR_REG	0x000E
+
+
+// Configuration Reg
+/* BANK 1 */
+#define CONFIG_REG	0x0000
+#define CONFIG_EXT_PHY	0x0200	// 1=external MII, 0=internal Phy
+#define CONFIG_GPCNTRL	0x0400	// Inverse value drives pin nCNTRL
+#define CONFIG_NO_WAIT	0x1000	// When 1 no extra wait states on ISA bus
+#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
+
+// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
+#define CONFIG_DEFAULT	(CONFIG_EPH_POWER_EN)
+
+
+// Base Address Register
+/* BANK 1 */
+#define	BASE_REG	0x0002
+
+
+// Individual Address Registers
+/* BANK 1 */
+#define	ADDR0_REG	0x0004
+#define	ADDR1_REG	0x0006
+#define	ADDR2_REG	0x0008
+
+
+// General Purpose Register
+/* BANK 1 */
+#define	GP_REG		0x000A
+
+
+// Control Register
+/* BANK 1 */
+#define	CTL_REG		0x000C
+#define CTL_RCV_BAD	0x4000 // When 1 bad CRC packets are received
+#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
+#define	CTL_LE_ENABLE	0x0080 // When 1 enables Link Error interrupt
+#define	CTL_CR_ENABLE	0x0040 // When 1 enables Counter Rollover interrupt
+#define	CTL_TE_ENABLE	0x0020 // When 1 enables Transmit Error interrupt
+#define	CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
+#define	CTL_RELOAD	0x0002 // When set reads EEPROM into registers
+#define	CTL_STORE	0x0001 // When set stores registers into EEPROM
+
+
+// MMU Command Register
+/* BANK 2 */
+#define MMU_CMD_REG	0x0000
+#define MC_BUSY		1	// When 1 the last release has not completed
+#define MC_NOP		(0<<5)	// No Op
+#define	MC_ALLOC	(1<<5) 	// OR with number of 256 byte packets
+#define	MC_RESET	(2<<5)	// Reset MMU to initial state
+#define	MC_REMOVE	(3<<5) 	// Remove the current rx packet
+#define MC_RELEASE  	(4<<5) 	// Remove and release the current rx packet
+#define MC_FREEPKT  	(5<<5) 	// Release packet in PNR register
+#define MC_ENQUEUE	(6<<5)	// Enqueue the packet for transmit
+#define MC_RSTTXFIFO	(7<<5)	// Reset the TX FIFOs
+
+
+// Packet Number Register
+/* BANK 2 */
+#define	PN_REG		0x0002
+
+
+// Allocation Result Register
+/* BANK 2 */
+#define	AR_REG		0x0003
+#define AR_FAILED	0x80	// Alocation Failed
+
+
+// RX FIFO Ports Register
+/* BANK 2 */
+#define RXFIFO_REG	0x0004	// Must be read as a word
+#define RXFIFO_REMPTY	0x8000	// RX FIFO Empty
+
+
+// TX FIFO Ports Register
+/* BANK 2 */
+#define TXFIFO_REG	RXFIFO_REG	// Must be read as a word
+#define TXFIFO_TEMPTY	0x80	// TX FIFO Empty
+
+
+// Pointer Register
+/* BANK 2 */
+#define PTR_REG		0x0006
+#define	PTR_RCV		0x8000 // 1=Receive area, 0=Transmit area
+#define	PTR_AUTOINC 	0x4000 // Auto increment the pointer on each access
+#define PTR_READ	0x2000 // When 1 the operation is a read
+
+
+// Data Register
+/* BANK 2 */
+#define	DATA_REG	0x0008
+
+
+// Interrupt Status/Acknowledge Register
+/* BANK 2 */
+#define	INT_REG		0x000C
+
+
+// Interrupt Mask Register
+/* BANK 2 */
+#define IM_REG		0x000D
+#define	IM_MDINT	0x80 // PHY MI Register 18 Interrupt
+#define	IM_ERCV_INT	0x40 // Early Receive Interrupt
+#define	IM_EPH_INT	0x20 // Set by Etheret Protocol Handler section
+#define	IM_RX_OVRN_INT	0x10 // Set by Receiver Overruns
+#define	IM_ALLOC_INT	0x08 // Set when allocation request is completed
+#define	IM_TX_EMPTY_INT	0x04 // Set if the TX FIFO goes empty
+#define	IM_TX_INT	0x02 // Transmit Interrrupt
+#define IM_RCV_INT	0x01 // Receive Interrupt
+
+
+// Multicast Table Registers
+/* BANK 3 */
+#define	MCAST_REG1	0x0000
+#define	MCAST_REG2	0x0002
+#define	MCAST_REG3	0x0004
+#define	MCAST_REG4	0x0006
+
+
+// Management Interface Register (MII)
+/* BANK 3 */
+#define	MII_REG		0x0008
+#define MII_MSK_CRS100	0x4000 // Disables CRS100 detection during tx half dup
+#define MII_MDOE	0x0008 // MII Output Enable
+#define MII_MCLK	0x0004 // MII Clock, pin MDCLK
+#define MII_MDI		0x0002 // MII Input, pin MDI
+#define MII_MDO		0x0001 // MII Output, pin MDO
+
+
+// Revision Register
+/* BANK 3 */
+#define	REV_REG		0x000A /* ( hi: chip id   low: rev # ) */
+
+
+// Early RCV Register
+/* BANK 3 */
+/* this is NOT on SMC9192 */
+#define	ERCV_REG	0x000C
+#define ERCV_RCV_DISCRD	0x0080 // When 1 discards a packet being received
+#define ERCV_THRESHOLD	0x001F // ERCV Threshold Mask
+
+// External Register
+/* BANK 7 */
+#define	EXT_REG		0x0000
+
+
+#define CHIP_9192	3
+#define CHIP_9194	4
+#define CHIP_9195	5
+#define CHIP_9196	6
+#define CHIP_91100	7
+#define CHIP_91100FD	8
+#define CHIP_91111FD	9
+
+static const char * chip_ids[ 15 ] =  { 
+	NULL, NULL, NULL, 
+	/* 3 */ "SMC91C90/91C92",
+	/* 4 */ "SMC91C94",
+	/* 5 */ "SMC91C95",
+	/* 6 */ "SMC91C96",
+	/* 7 */ "SMC91C100", 
+	/* 8 */ "SMC91C100FD", 
+	/* 9 */ "SMC91C11xFD", 
+	NULL, NULL, 
+	NULL, NULL, NULL};  
+
+/* 
+ . Transmit status bits 
+*/
+#define TS_SUCCESS 0x0001
+#define TS_LOSTCAR 0x0400
+#define TS_LATCOL  0x0200
+#define TS_16COL   0x0010
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR	0x8000
+#define RS_BRODCAST	0x4000
+#define RS_BADCRC	0x2000
+#define RS_ODDFRAME	0x1000	// bug: the LAN91C111 never sets this on receive
+#define RS_TOOLONG	0x0800
+#define RS_TOOSHORT	0x0400
+#define RS_MULTICAST	0x0001
+#define RS_ERRORS	(RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) 
+
+
+// PHY Types
+enum {
+	PHY_LAN83C183 = 1,	// LAN91C111 Internal PHY
+	PHY_LAN83C180
+};
+
+
+// PHY Register Addresses (LAN91C111 Internal PHY)
+
+// PHY Control Register
+#define PHY_CNTL_REG		0x00
+#define PHY_CNTL_RST		0x8000	// 1=PHY Reset
+#define PHY_CNTL_LPBK		0x4000	// 1=PHY Loopback
+#define PHY_CNTL_SPEED		0x2000	// 1=100Mbps, 0=10Mpbs
+#define PHY_CNTL_ANEG_EN	0x1000 // 1=Enable Auto negotiation
+#define PHY_CNTL_PDN		0x0800	// 1=PHY Power Down mode
+#define PHY_CNTL_MII_DIS	0x0400	// 1=MII 4 bit interface disabled
+#define PHY_CNTL_ANEG_RST	0x0200 // 1=Reset Auto negotiate
+#define PHY_CNTL_DPLX		0x0100	// 1=Full Duplex, 0=Half Duplex
+#define PHY_CNTL_COLTST		0x0080	// 1= MII Colision Test
+
+// PHY Status Register
+#define PHY_STAT_REG		0x01
+#define PHY_STAT_CAP_T4		0x8000	// 1=100Base-T4 capable
+#define PHY_STAT_CAP_TXF	0x4000	// 1=100Base-X full duplex capable
+#define PHY_STAT_CAP_TXH	0x2000	// 1=100Base-X half duplex capable
+#define PHY_STAT_CAP_TF		0x1000	// 1=10Mbps full duplex capable
+#define PHY_STAT_CAP_TH		0x0800	// 1=10Mbps half duplex capable
+#define PHY_STAT_CAP_SUPR	0x0040	// 1=recv mgmt frames with not preamble
+#define PHY_STAT_ANEG_ACK	0x0020	// 1=ANEG has completed
+#define PHY_STAT_REM_FLT	0x0010	// 1=Remote Fault detected
+#define PHY_STAT_CAP_ANEG	0x0008	// 1=Auto negotiate capable
+#define PHY_STAT_LINK		0x0004	// 1=valid link
+#define PHY_STAT_JAB		0x0002	// 1=10Mbps jabber condition
+#define PHY_STAT_EXREG		0x0001	// 1=extended registers implemented
+
+// PHY Identifier Registers
+#define PHY_ID1_REG		0x02	// PHY Identifier 1
+#define PHY_ID2_REG		0x03	// PHY Identifier 2
+
+// PHY Auto-Negotiation Advertisement Register
+#define PHY_AD_REG		0x04
+#define PHY_AD_NP		0x8000	// 1=PHY requests exchange of Next Page
+#define PHY_AD_ACK		0x4000	// 1=got link code word from remote
+#define PHY_AD_RF		0x2000	// 1=advertise remote fault
+#define PHY_AD_T4		0x0200	// 1=PHY is capable of 100Base-T4
+#define PHY_AD_TX_FDX		0x0100	// 1=PHY is capable of 100Base-TX FDPLX
+#define PHY_AD_TX_HDX		0x0080	// 1=PHY is capable of 100Base-TX HDPLX
+#define PHY_AD_10_FDX		0x0040	// 1=PHY is capable of 10Base-T FDPLX
+#define PHY_AD_10_HDX		0x0020	// 1=PHY is capable of 10Base-T HDPLX
+#define PHY_AD_CSMA		0x0001	// 1=PHY is capable of 802.3 CMSA
+
+// PHY Auto-negotiation Remote End Capability Register
+#define PHY_RMT_REG		0x05
+// Uses same bit definitions as PHY_AD_REG
+
+// PHY Configuration Register 1
+#define PHY_CFG1_REG		0x10
+#define PHY_CFG1_LNKDIS		0x8000	// 1=Rx Link Detect Function disabled
+#define PHY_CFG1_XMTDIS		0x4000	// 1=TP Transmitter Disabled
+#define PHY_CFG1_XMTPDN		0x2000	// 1=TP Transmitter Powered Down
+#define PHY_CFG1_BYPSCR		0x0400	// 1=Bypass scrambler/descrambler
+#define PHY_CFG1_UNSCDS		0x0200	// 1=Unscramble Idle Reception Disable
+#define PHY_CFG1_EQLZR		0x0100	// 1=Rx Equalizer Disabled
+#define PHY_CFG1_CABLE		0x0080	// 1=STP(150ohm), 0=UTP(100ohm)
+#define PHY_CFG1_RLVL0		0x0040	// 1=Rx Squelch level reduced by 4.5db
+#define PHY_CFG1_TLVL_SHIFT	2	// Transmit Output Level Adjust
+#define PHY_CFG1_TLVL_MASK	0x003C
+#define PHY_CFG1_TRF_MASK	0x0003	// Transmitter Rise/Fall time
+
+
+// PHY Configuration Register 2
+#define PHY_CFG2_REG		0x11
+#define PHY_CFG2_APOLDIS	0x0020	// 1=Auto Polarity Correction disabled
+#define PHY_CFG2_JABDIS		0x0010	// 1=Jabber disabled
+#define PHY_CFG2_MREG		0x0008	// 1=Multiple register access (MII mgt)
+#define PHY_CFG2_INTMDIO	0x0004	// 1=Interrupt signaled with MDIO pulseo
+
+// PHY Status Output (and Interrupt status) Register
+#define PHY_INT_REG		0x12	// Status Output (Interrupt Status)
+#define PHY_INT_INT		0x8000	// 1=bits have changed since last read
+#define	PHY_INT_LNKFAIL		0x4000	// 1=Link Not detected
+#define PHY_INT_LOSSSYNC	0x2000	// 1=Descrambler has lost sync
+#define PHY_INT_CWRD		0x1000	// 1=Invalid 4B5B code detected on rx
+#define PHY_INT_SSD		0x0800	// 1=No Start Of Stream detected on rx
+#define PHY_INT_ESD		0x0400	// 1=No End Of Stream detected on rx
+#define PHY_INT_RPOL		0x0200	// 1=Reverse Polarity detected
+#define PHY_INT_JAB		0x0100	// 1=Jabber detected
+#define PHY_INT_SPDDET		0x0080	// 1=100Base-TX mode, 0=10Base-T mode
+#define PHY_INT_DPLXDET		0x0040	// 1=Device in Full Duplex
+
+// PHY Interrupt/Status Mask Register
+#define PHY_MASK_REG		0x13	// Interrupt Mask
+// Uses the same bit definitions as PHY_INT_REG
+
+
+
+/*-------------------------------------------------------------------------
+ .  I define some macros to make it easier to do somewhat common
+ . or slightly complicated, repeated tasks. 
+ --------------------------------------------------------------------------*/
+
+/* select a register bank, 0 to 3  */
+
+#define SMC_SELECT_BANK(x)  { outw( x, ioaddr + BANK_SELECT ); } 
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) {\
+		unsigned char mask;\
+		SMC_SELECT_BANK(2);\
+		mask = inb( ioaddr + IM_REG );\
+		mask |= (x);\
+		outb( mask, ioaddr + IM_REG ); \
+}
+
+/* this disables an interrupt from the interrupt mask register */
+
+#define SMC_DISABLE_INT(x) {\
+		unsigned char mask;\
+		SMC_SELECT_BANK(2);\
+		mask = inb( ioaddr + IM_REG );\
+		mask &= ~(x);\
+		outb( mask, ioaddr + IM_REG ); \
+}
+
+/*----------------------------------------------------------------------
+ . Define the interrupts that I want to receive from the card
+ . 
+ . I want: 
+ .  IM_EPH_INT, for nasty errors
+ .  IM_RCV_INT, for happy received packets
+ .  IM_RX_OVRN_INT, because I have to kick the receiver
+ .  IM_MDINT, for PHY Register 18 Status Changes
+ --------------------------------------------------------------------------*/
+#define SMC_INTERRUPT_MASK   (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT | \
+	IM_MDINT) 
+
+
+#ifdef CONFIG_SYSCTL
+
+
+/*
+ * Declarations for the sysctl interface, which allows users the ability to
+ * control the finer aspects of the LAN91C111 chip.  Since the smc
+ * module currently registers its sysctl table dynamically, the sysctl path
+ * for module FOO is /proc/sys/dev/ethX/FOO
+ */
+#define CTL_SMC         (CTL_BUS+1389)      // arbitrary and hopefully unused
+
+enum {
+	CTL_SMC_INFO = 1,	// Sysctl files information
+	CTL_SMC_SWVER,		// Driver Software Version Info
+	CTL_SMC_SWFDUP,		// Switched Full Duplex Mode
+	CTL_SMC_EPHLOOP,	// EPH Block Internal Loopback
+	CTL_SMC_MIIOP,		// MII Operation
+	CTL_SMC_AUTONEG,	// Auto-negotiate Mode
+	CTL_SMC_RFDUPLX,	// Request Full Duplex Mode
+	CTL_SMC_RSPEED,		// Request Speed Selection
+	CTL_SMC_AFDUPLX,	// Actual Full Duplex Mode
+	CTL_SMC_ASPEED,		// Actual Speed Selection
+	CTL_SMC_LNKFAIL,	// Link Failed
+	CTL_SMC_FORCOL,		// Force a Collision
+	CTL_SMC_FILTCAR,	// Filter Carrier
+	CTL_SMC_FREEMEM,	// Free Buffer Memory
+	CTL_SMC_TOTMEM,		// Total Buffer Memory
+	CTL_SMC_LEDA,		// Output of LED-A
+	CTL_SMC_LEDB,		// Output of LED-B
+	CTL_SMC_CHIPREV,	// LAN91C111 Chip Revision ID
+#ifdef SMC_DEBUG
+	// Register access for debugging
+	CTL_SMC_REG_BSR,	// Bank Select
+	CTL_SMC_REG_TCR,	// Transmit Control
+	CTL_SMC_REG_ESR,	// EPH Status
+	CTL_SMC_REG_RCR,	// Receive Control
+	CTL_SMC_REG_CTRR,	// Counter
+	CTL_SMC_REG_MIR,	// Memory Information
+	CTL_SMC_REG_RPCR,	// Receive/Phy Control
+	CTL_SMC_REG_CFGR,	// Configuration
+	CTL_SMC_REG_BAR,	// Base Address
+	CTL_SMC_REG_IAR0,	// Individual Address 0
+	CTL_SMC_REG_IAR1,	// Individual Address 1 
+	CTL_SMC_REG_IAR2,	// Individual Address 2
+	CTL_SMC_REG_GPR,	// General Purpose
+	CTL_SMC_REG_CTLR,	// Control
+	CTL_SMC_REG_MCR,	// MMU Command
+	CTL_SMC_REG_PNR,	// Packet Number
+	CTL_SMC_REG_FPR,	// FIFO Ports
+	CTL_SMC_REG_PTR,	// Pointer
+	CTL_SMC_REG_DR,		// Data 
+	CTL_SMC_REG_ISR,	// Interrupt Status
+	CTL_SMC_REG_MTR1,	// Multicast Table Entry 1
+	CTL_SMC_REG_MTR2,	// Multicast Table Entry 2
+	CTL_SMC_REG_MTR3,	// Multicast Table Entry 3
+	CTL_SMC_REG_MTR4,	// Multicast Table Entry 4
+	CTL_SMC_REG_MIIR,	// Management Interface
+	CTL_SMC_REG_REVR,	// Revision
+	CTL_SMC_REG_ERCVR,	// Early RCV
+	CTL_SMC_REG_EXTR,	// External
+	CTL_SMC_PHY_CTRL,	// PHY Control
+	CTL_SMC_PHY_STAT,	// PHY Status
+	CTL_SMC_PHY_ID1,	// PHY ID1
+	CTL_SMC_PHY_ID2,	// PHY ID2
+	CTL_SMC_PHY_ADC,	// PHY Advertise Capability
+	CTL_SMC_PHY_REMC,	// PHY Advertise Capability
+	CTL_SMC_PHY_CFG1,	// PHY Configuration 1
+	CTL_SMC_PHY_CFG2,	// PHY Configuration 2
+	CTL_SMC_PHY_INT,	// PHY Interrupt/Status Output
+	CTL_SMC_PHY_MASK,	// PHY Interrupt/Status Mask
+#endif
+	// ---------------------------------------------------
+	CTL_SMC_LAST_ENTRY	// Add new entries above the line
+};
+
+#endif // CONFIG_SYSCTL
+ 
+#endif  /* _SMC_91111_H_ */
+
+
+++ linux-patched/drivers/xenomai/net/drivers/rt_fec.h	2022-03-21 12:58:29.690886026 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/****************************************************************************/
+
+/*
+ *	fec.h  --  Fast Ethernet Controller for Motorola ColdFire SoC
+ *		   processors.
+ *
+ *	(C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com)
+ *	(C) Copyright 2000-2001, Lineo (www.lineo.com)
+ */
+
+/****************************************************************************/
+#ifndef RT_FEC_H
+#define	RT_FEC_H
+/****************************************************************************/
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+/*
+ *	Just figures, Motorola would have to change the offsets for
+ *	registers in the same peripheral device on different models
+ *	of the ColdFire!
+ */
+#define FEC_IEVENT		0x004 /* Interrupt event reg */
+#define FEC_IMASK		0x008 /* Interrupt mask reg */
+#define FEC_R_DES_ACTIVE	0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE	0x014 /* Transmit descriptor reg */
+#define FEC_ECNTRL		0x024 /* Ethernet control reg */
+#define FEC_MII_DATA		0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED		0x044 /* MII speed control reg */
+#define FEC_MIB_CTRLSTAT	0x064 /* MIB control/status reg */
+#define FEC_R_CNTRL		0x084 /* Receive control reg */
+#define FEC_X_CNTRL		0x0c4 /* Transmit Control reg */
+#define FEC_ADDR_LOW		0x0e4 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH		0x0e8 /* High 16bits MAC address */
+#define FEC_OPD			0x0ec /* Opcode + Pause duration */
+#define FEC_HASH_TABLE_HIGH	0x118 /* High 32bits hash table */
+#define FEC_HASH_TABLE_LOW	0x11c /* Low 32bits hash table */
+#define FEC_GRP_HASH_TABLE_HIGH	0x120 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW	0x124 /* Low 32bits hash table */
+#define FEC_X_WMRK		0x144 /* FIFO transmit water mark */
+#define FEC_R_BOUND		0x14c /* FIFO receive bound reg */
+#define FEC_R_FSTART		0x150 /* FIFO receive start reg */
+#define FEC_R_DES_START		0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START		0x184 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE		0x188 /* Maximum receive buff size */
+#define FEC_TACC		0x1c0 /* Transmit accelerator reg */
+#define FEC_MIIGSK_CFGR		0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR		0x308 /* MIIGSK Enable reg */
+
+#define BM_MIIGSK_CFGR_MII		0x00
+#define BM_MIIGSK_CFGR_RMII		0x01
+#define BM_MIIGSK_CFGR_FRCONT_10M	0x40
+
+#else
+
+#define FEC_ECNTRL		0x000 /* Ethernet control reg */
+#define FEC_IEVENT		0x004 /* Interrupt even reg */
+#define FEC_IMASK		0x008 /* Interrupt mask reg */
+#define FEC_IVEC		0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE	0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE	0x014 /* Transmit descriptor reg */
+#define FEC_MII_DATA		0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED		0x044 /* MII speed control reg */
+#define FEC_R_BOUND		0x08c /* FIFO receive bound reg */
+#define FEC_R_FSTART		0x090 /* FIFO receive start reg */
+#define FEC_X_WMRK		0x0a4 /* FIFO transmit water mark */
+#define FEC_X_FSTART		0x0ac /* FIFO transmit start reg */
+#define FEC_R_CNTRL		0x104 /* Receive control reg */
+#define FEC_MAX_FRM_LEN		0x108 /* Maximum frame length reg */
+#define FEC_X_CNTRL		0x144 /* Transmit Control reg */
+#define FEC_ADDR_LOW		0x3c0 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH		0x3c4 /* High 16bits MAC address */
+#define FEC_GRP_HASH_TABLE_HIGH	0x3c8 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW	0x3cc /* Low 32bits hash table */
+#define FEC_R_DES_START		0x3d0 /* Receive descriptor ring */
+#define FEC_X_DES_START		0x3d4 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE		0x3d8 /* Maximum receive buff size */
+#define FEC_FIFO_RAM		0x400 /* FIFO RAM buffer */
+
+#endif /* CONFIG_M5272 */
+
+
+/*
+ *	Define the buffer descriptor structure.
+ */
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+struct bufdesc {
+	unsigned short cbd_datlen;	/* Data length */
+	unsigned short cbd_sc;	/* Control and status info */
+	unsigned long cbd_bufaddr;	/* Buffer address */
+};
+#else
+struct bufdesc {
+	unsigned short	cbd_sc;			/* Control and status info */
+	unsigned short	cbd_datlen;		/* Data length */
+	unsigned long	cbd_bufaddr;		/* Buffer address */
+};
+#endif
+
+/*
+ *	The following definitions courtesy of commproc.h, which where
+ *	Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
+ */
+#define BD_SC_EMPTY     ((ushort)0x8000)        /* Receive is empty */
+#define BD_SC_READY     ((ushort)0x8000)        /* Transmit is ready */
+#define BD_SC_WRAP      ((ushort)0x2000)        /* Last buffer descriptor */
+#define BD_SC_INTRPT    ((ushort)0x1000)        /* Interrupt on change */
+#define BD_SC_CM        ((ushort)0x0200)        /* Continuous mode */
+#define BD_SC_ID        ((ushort)0x0100)        /* Rec'd too many idles */
+#define BD_SC_P         ((ushort)0x0100)        /* xmt preamble */
+#define BD_SC_BR        ((ushort)0x0020)        /* Break received */
+#define BD_SC_FR        ((ushort)0x0010)        /* Framing error */
+#define BD_SC_PR        ((ushort)0x0008)        /* Parity error */
+#define BD_SC_OV        ((ushort)0x0002)        /* Overrun */
+#define BD_SC_CD        ((ushort)0x0001)        /* ?? */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+*/
+#define BD_ENET_RX_EMPTY        ((ushort)0x8000)
+#define BD_ENET_RX_WRAP         ((ushort)0x2000)
+#define BD_ENET_RX_INTR         ((ushort)0x1000)
+#define BD_ENET_RX_LAST         ((ushort)0x0800)
+#define BD_ENET_RX_FIRST        ((ushort)0x0400)
+#define BD_ENET_RX_MISS         ((ushort)0x0100)
+#define BD_ENET_RX_LG           ((ushort)0x0020)
+#define BD_ENET_RX_NO           ((ushort)0x0010)
+#define BD_ENET_RX_SH           ((ushort)0x0008)
+#define BD_ENET_RX_CR           ((ushort)0x0004)
+#define BD_ENET_RX_OV           ((ushort)0x0002)
+#define BD_ENET_RX_CL           ((ushort)0x0001)
+#define BD_ENET_RX_STATS        ((ushort)0x013f)        /* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+*/
+#define BD_ENET_TX_READY        ((ushort)0x8000)
+#define BD_ENET_TX_PAD          ((ushort)0x4000)
+#define BD_ENET_TX_WRAP         ((ushort)0x2000)
+#define BD_ENET_TX_INTR         ((ushort)0x1000)
+#define BD_ENET_TX_LAST         ((ushort)0x0800)
+#define BD_ENET_TX_TC           ((ushort)0x0400)
+#define BD_ENET_TX_DEF          ((ushort)0x0200)
+#define BD_ENET_TX_HB           ((ushort)0x0100)
+#define BD_ENET_TX_LC           ((ushort)0x0080)
+#define BD_ENET_TX_RL           ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK       ((ushort)0x003c)
+#define BD_ENET_TX_UN           ((ushort)0x0002)
+#define BD_ENET_TX_CSL          ((ushort)0x0001)
+#define BD_ENET_TX_STATS        ((ushort)0x03ff)        /* All status bits */
+
+
+/****************************************************************************/
+#endif /* RT_FEC_H */
+++ linux-patched/drivers/xenomai/net/drivers/Kconfig	2022-03-21 12:58:29.684886085 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/via-rhine.c	1970-01-01 01:00:00.000000000 +0100
+menu "Drivers"
+    depends on XENO_DRIVERS_NET
+
+comment "Common PCI Drivers"
+    depends on PCI
+
+config XENO_DRIVERS_NET_DRV_PCNET32
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "AMD PCnet32"
+
+
+config XENO_DRIVERS_NET_DRV_TULIP
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "DEC Tulip"
+
+
+config XENO_DRIVERS_NET_DRV_EEPRO100
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Intel EtherExpress PRO/100"
+    default y
+
+config XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT
+    depends on XENO_DRIVERS_NET && PCI
+    int "Command Timeout"
+    depends on XENO_DRIVERS_NET_DRV_EEPRO100
+    default 20
+    help
+    Timeout in microseconds of transmission or configuration commands that
+    are issued in real-time contexts.
+
+config XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+    depends on XENO_DRIVERS_NET && PCI
+    bool "Enable debugging and instrumentation"
+    depends on XENO_DRIVERS_NET_DRV_EEPRO100
+    help
+    This option switches on internal debugging code of the EEPRO/100 driver.
+    It also enables the collection of worst-case command delays in real-time
+    contexts in order to reduce the command timeout (which, effectively, will
+    also reduce the worst-case transmission latency).
+
+
+config XENO_DRIVERS_NET_DRV_E1000
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Intel(R) PRO/1000 (Gigabit)"
+    default y
+
+config XENO_DRIVERS_NET_DRV_E1000E
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "New Intel(R) PRO/1000 PCIe (Gigabit)"
+
+
+config XENO_DRIVERS_NET_DRV_NATSEMI
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "NatSemi"
+
+
+config XENO_DRIVERS_NET_DRV_8139
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Realtek 8139"
+    default y
+
+
+config XENO_DRIVERS_NET_DRV_VIA_RHINE
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "VIA Rhine"
+
+
+config XENO_DRIVERS_NET_DRV_IGB
+    select I2C
+    select I2C_ALGOBIT
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Intel(R) 82575 (Gigabit)"
+
+
+config XENO_DRIVERS_NET_DRV_R8169
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Realtek 8169 (Gigabit)"
+
+
+if PPC
+
+comment "Embedded MPC Drivers"
+    depends on XENO_DRIVERS_NET
+
+config XENO_DRIVERS_NET_DRV_FCC_ENET
+    depends on XENO_DRIVERS_NET
+    tristate "MPC8260 FCC Ethernet"
+
+
+config XENO_DRIVERS_NET_DRV_FEC_ENET
+    depends on XENO_DRIVERS_NET
+    tristate "MPC8xx FEC Ethernet"
+
+
+config XENO_DRIVERS_NET_DRV_SCC_ENET
+    depends on XENO_DRIVERS_NET
+    tristate "MPC8xx SCC Ethernet"
+
+
+config XENO_DRIVERS_NET_DRV_MPC52XX_FEC
+    depends on XENO_DRIVERS_NET
+    tristate "MPC52xx FEC Ethernet"
+
+endif
+
+
+comment "Misc Drivers"
+
+config XENO_DRIVERS_NET_DRV_LOOPBACK
+    depends on XENO_DRIVERS_NET
+    tristate "Loopback"
+    default y
+
+
+config XENO_DRIVERS_NET_DRV_SMC91111
+    depends on XENO_DRIVERS_NET
+    tristate "SMSC LAN91C111"
+
+if ARM
+
+config XENO_DRIVERS_NET_DRV_AT91_ETHER
+    depends on XENO_DRIVERS_NET && SOC_AT91RM9200
+    select XENO_DRIVERS_NET_DRV_MACB
+    tristate "AT91RM9200 Board Ethernet Driver"
+
+config XENO_DRIVERS_NET_DRV_MACB
+    depends on XENO_DRIVERS_NET
+    select AT91_PROGRAMMABLE_CLOCKS if ARCH_AT91
+    tristate "Cadence MACB/GEM devices"
+    help
+    Driver for internal MAC-controller on AT91SAM926x microcontrollers.
+    Porting by Cristiano Mantovani and Stefano Banzi (Marposs SpA).
+
+endif
+
+source "drivers/xenomai/net/drivers/experimental/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/net/drivers/via-rhine.c	2022-03-21 12:58:29.679886133 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/interrupt.c	1970-01-01 01:00:00.000000000 +0100
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+	Written 1998-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	This driver is designed for the VIA VT86C100A Rhine-I.
+	It also works with the 6102 Rhine-II, and 6105/6105M Rhine-III.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+
+	This driver contains some changes from the original Donald Becker
+	version. He may or may not be interested in bug reports on this
+	code. You can find his versions at:
+	http://www.scyld.com/network/via-rhine.html
+
+
+	Linux kernel version history:
+
+	LK1.1.0:
+	- Jeff Garzik: softnet 'n stuff
+
+	LK1.1.1:
+	- Justin Guyett: softnet and locking fixes
+	- Jeff Garzik: use PCI interface
+
+	LK1.1.2:
+	- Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
+
+	LK1.1.3:
+	- Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
+			 code) update "Theory of Operation" with
+			 softnet/locking changes
+	- Dave Miller: PCI DMA and endian fixups
+	- Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
+
+	LK1.1.4:
+	- Urban Widmark: fix gcc 2.95.2 problem and
+			 remove writel's to fixed address 0x7c
+
+	LK1.1.5:
+	- Urban Widmark: mdio locking, bounce buffer changes
+			 merges from Beckers 1.05 version
+			 added netif_running_on/off support
+
+	LK1.1.6:
+	- Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
+			 set netif_running_on/off on startup, del_timer_sync
+
+	LK1.1.7:
+	- Manfred Spraul: added reset into tx_timeout
+
+	LK1.1.9:
+	- Urban Widmark: merges from Beckers 1.10 version
+			 (media selection + eeprom reload)
+	- David Vrabel:  merges from D-Link "1.11" version
+			 (disable WOL and PME on startup)
+
+	LK1.1.10:
+	- Manfred Spraul: use "singlecopy" for unaligned buffers
+			  don't allocate bounce buffers for !ReqTxAlign cards
+
+	LK1.1.11:
+	- David Woodhouse: Set dev->base_addr before the first time we call
+					   wait_for_reset(). It's a lot happier that way.
+					   Free np->tx_bufs only if we actually allocated it.
+
+	LK1.1.12:
+	- Martin Eriksson: Allow Memory-Mapped IO to be enabled.
+
+	LK1.1.13 (jgarzik):
+	- Add ethtool support
+	- Replace some MII-related magic numbers with constants
+
+	LK1.1.14 (Ivan G.):
+	- fixes comments for Rhine-III
+	- removes W_MAX_TIMEOUT (unused)
+	- adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
+	  is R-I and has Davicom chip, flag is referenced in kernel driver)
+	- sends chip_id as a parameter to wait_for_reset since np is not
+	  initialized on first call
+	- changes mmio "else if (chip_id==VT6102)" to "else" so it will work
+	  for Rhine-III's (documentation says same bit is correct)
+	- transmit frame queue message is off by one - fixed
+	- adds IntrNormalSummary to "Something Wicked" exclusion list
+	  so normal interrupts will not trigger the message (src: Donald Becker)
+	(Roger Luethi)
+	- show confused chip where to continue after Tx error
+	- location of collision counter is chip specific
+	- allow selecting backoff algorithm (module parameter)
+
+	LK1.1.15 (jgarzik):
+	- Use new MII lib helper generic_mii_ioctl
+
+	LK1.1.16 (Roger Luethi)
+	- Etherleak fix
+	- Handle Tx buffer underrun
+	- Fix bugs in full duplex handling
+	- New reset code uses "force reset" cmd on Rhine-II
+	- Various clean ups
+
+	LK1.1.17 (Roger Luethi)
+	- Fix race in via_rhine_start_tx()
+	- On errors, wait for Tx engine to turn off before scavenging
+	- Handle Tx descriptor write-back race on Rhine-II
+	- Force flushing for PCI posted writes
+	- More reset code changes
+
+	Ported to RTnet: October 2003, Jan Kiszka <Jan.Kiszka@web.de>
+*/
+
+#define DRV_NAME	"via-rhine-rt"
+#define DRV_VERSION	"1.1.17-RTnet-0.1"
+#define DRV_RELDATE	"2003-10-05"
+
+
+/* A few user-configurable values.
+   These may be modified when a driver module is loaded. */
+
+static int local_debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+/*** RTnet ***
+static int rx_copybreak;
+ *** RTnet ***/
+
+/* Select a backoff algorithm (Ethernet capture effect) */
+static int backoff;
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability.
+   The media type is usually passed in 'options[]'.
+   The default is autonegotation for speed and duplex.
+     This should rarely be overridden.
+   Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+   Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+   Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   The Rhine has a 64 element 8390-like hash table.  */
+static const int multicast_filter_limit = 32;
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
+#define RX_RING_SIZE	8 /*** RTnet ***/
+
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (2*HZ)
+
+#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
+
+#if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error  You must compile this driver with "-O".
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/*** RTnet ***/
+#include <rtnet_port.h>
+
+#define DEFAULT_RX_POOL_SIZE    16
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** RTnet ***/
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+KERN_INFO DRV_NAME ".c:" DRV_VERSION "  " DRV_RELDATE "  Jan.Kiszka@web.de\n";
+
+static char shortname[] = DRV_NAME;
+
+
+/* This driver was written to use PCI memory space, however most versions
+   of the Rhine only work correctly with I/O space accesses. */
+/*#ifdef CONFIG_VIA_RHINE_MMIO
+#define USE_MEM
+#else*/
+#define USE_IO
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+/*#endif*/
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_DESCRIPTION("RTnet VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0444);
+module_param_named(debug, local_debug, int, 0444);
+/*** RTnet ***
+MODULE_PARM(rx_copybreak, "i");
+ *** RTnet ***/
+module_param(backoff, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+/*** RTnet ***
+MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+ *** RTnet ***/
+MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
+MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. For most of these settings, this driver assumes that they are
+correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of via_rhine_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing.  Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.  One
+is the send-packet routine, which enforces single-threaded use by the
+dev->priv->lock spinlock. The other thread is the interrupt handler, which
+is single threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. It locks the
+dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
+is not available it stops the transmit queue by calling netif_stop_queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. If at least half of the entries in
+the Rx ring are available the transmit queue is woken up if it was stopped.
+
+IV. Notes
+
+IVb. References
+
+Preliminary VT86C100A manual from http://www.via.com.tw/
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
+
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers, resulting
+in significant performance degradation for bounce buffer copies on transmit
+and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+*/
+
+
+/* This table drives the PCI probe routines.  It's mostly boilerplate in all
+   of the drivers, and will likely be provided by some future kernel.
+   Note the matching code -- the first table entry matchs all 56** cards but
+   second only the 1234 card.
+*/
+
+enum pci_flags_bit {
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+enum via_rhine_chips {
+	VT86C100A = 0,
+	VT6102,
+	VT6105,
+	VT6105M
+};
+
+struct via_rhine_chip_info {
+	const char *name;
+	u16 pci_flags;
+	int io_size;
+	int drv_flags;
+};
+
+
+enum chip_capability_flags {
+	CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
+	ReqTxAlign=0x10, HasWOL=0x20, };
+
+#ifdef USE_MEM
+#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
+#else
+#define RHINE_IOTYPE (PCI_USES_IO  | PCI_USES_MASTER | PCI_ADDR0)
+#endif
+/* Beware of PCI posted writes */
+#define IOSYNC	do { readb((void *)dev->base_addr + StationAddr); } while (0)
+
+/* directly indexed by enum via_rhine_chips, above */
+static struct via_rhine_chip_info via_rhine_chip_info[] =
+{
+	{ "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
+	  CanHaveMII | ReqTxAlign | HasDavicomPhy },
+	{ "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
+	  CanHaveMII | HasWOL },
+	{ "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
+	  CanHaveMII | HasWOL },
+	{ "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
+	  CanHaveMII | HasWOL },
+};
+
+static struct pci_device_id via_rhine_pci_tbl[] =
+{
+	{0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
+	{0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
+	{0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105},
+	{0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
+	{0,}			/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
+
+
+/* Offsets to the device registers. */
+enum register_offsets {
+	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+	IntrStatus=0x0C, IntrEnable=0x0E,
+	MulticastFilter0=0x10, MulticastFilter1=0x14,
+	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
+	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
+	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
+	StickyHW=0x83, IntrStatus2=0x84, WOLcrClr=0xA4, WOLcgClr=0xA7,
+	PwrcsrClr=0xAC,
+};
+
+/* Bits in ConfigD */
+enum backoff_bits {
+	BackOptional=0x01, BackModify=0x02,
+	BackCaptureEffect=0x04, BackRandom=0x08
+};
+
+#ifdef USE_MEM
+/* Registers we check that mmio and reg are the same. */
+int mmio_verify_registers[] = {
+	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
+	0
+};
+#endif
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+	IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+	IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
+	IntrPCIErr=0x0040,
+	IntrStatsMax=0x0080, IntrRxEarly=0x0100,
+	IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+	IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+	IntrRxWakeUp=0x8000,
+	IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+	IntrTxDescRace=0x080000,	/* mapped from IntrStatus2 */
+	IntrTxErrSummary=0x082218,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+	s32 rx_status;
+	u32 desc_length; /* Chain flag, Buffer/frame length */
+	u32 addr;
+	u32 next_desc;
+};
+struct tx_desc {
+	s32 tx_status;
+	u32 desc_length; /* Chain flag, Tx Config, Frame length */
+	u32 addr;
+	u32 next_desc;
+};
+
+/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
+#define TXDESC 0x00e08000
+
+enum rx_status_bits {
+	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
+};
+
+/* Bits in *_desc.*_status */
+enum desc_status_bits {
+	DescOwn=0x80000000
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+	CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
+	CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
+	CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
+	CmdNoTxPoll=0x0800, CmdReset=0x8000,
+};
+
+#define MAX_MII_CNT	4
+struct netdev_private {
+	/* Descriptor rings */
+	struct rx_desc *rx_ring;
+	struct tx_desc *tx_ring;
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_ring_dma;
+
+	/* The addresses of receive-in-place skbuffs. */
+	struct rtskb *rx_skbuff[RX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
+
+	/* The saved address of a sent-in-place packet/buffer, for later free(). */
+	struct rtskb *tx_skbuff[TX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
+
+	/* Tx bounce buffers */
+	unsigned char *tx_buf[TX_RING_SIZE];
+	unsigned char *tx_bufs;
+	dma_addr_t tx_bufs_dma;
+
+	struct pci_dev *pdev;
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	rtdm_lock_t lock;
+
+	/* Frequently used values: keep some adjacent for cache effect. */
+	int chip_id, drv_flags;
+	struct rx_desc *rx_head_desc;
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int cur_tx, dirty_tx;
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	u16 chip_cmd;						/* Current setting for ChipCmd */
+
+	/* These values are keep track of the transceiver/media in use. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+	u8 tx_thresh, rx_thresh;
+
+	/* MII transceiver section. */
+	unsigned char phys[MAX_MII_CNT];			/* MII device addresses. */
+	unsigned int mii_cnt;			/* number of MIIs found, but only the first one is used */
+	u16 mii_status;						/* last read MII status */
+	struct mii_if_info mii_if;
+	unsigned int mii_if_force_media; /*** RTnet, support for older kernels (e.g. 2.4.19) ***/
+
+	rtdm_irq_t irq_handle;
+};
+
+/*** RTnet ***/
+static int  mdio_read(struct rtnet_device *dev, int phy_id, int location);
+static void mdio_write(struct rtnet_device *dev, int phy_id, int location, int value);
+static int  via_rhine_open(struct rtnet_device *dev);
+static void via_rhine_check_duplex(struct rtnet_device *dev);
+/*static void via_rhine_timer(unsigned long data);
+static void via_rhine_tx_timeout(struct net_device *dev);*/
+static int  via_rhine_start_tx(struct rtskb *skb, struct rtnet_device *dev);
+static int via_rhine_interrupt(rtdm_irq_t *irq_handle);
+static void via_rhine_tx(struct rtnet_device *dev);
+static void via_rhine_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp);
+static void via_rhine_error(struct rtnet_device *dev, int intr_status);
+static void via_rhine_set_rx_mode(struct rtnet_device *dev);
+static struct net_device_stats *via_rhine_get_stats(struct rtnet_device *rtdev);
+/*static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);*/
+static int  via_rhine_close(struct rtnet_device *dev);
+/*** RTnet ***/
+
+static inline u32 get_intr_status(struct rtnet_device *dev) /*** RTnet ***/
+{
+	void *ioaddr = (void *)dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	u32 intr_status;
+
+	intr_status = readw(ioaddr + IntrStatus);
+	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
+	if (np->chip_id == VT6102)
+		intr_status |= readb(ioaddr + IntrStatus2) << 16;
+	return intr_status;
+}
+
+static void wait_for_reset(struct rtnet_device *dev, int chip_id, char *name) /*** RTnet ***/
+{
+	void *ioaddr = (void *)dev->base_addr;
+	int boguscnt = 20;
+
+	IOSYNC;
+
+	if (readw(ioaddr + ChipCmd) & CmdReset) {
+		printk(KERN_INFO "%s: Reset not complete yet. "
+			"Trying harder.\n", name);
+
+		/* Rhine-II needs to be forced sometimes */
+		if (chip_id == VT6102)
+			writeb(0x40, ioaddr + MiscCmd);
+
+		/* VT86C100A may need long delay after reset (dlink) */
+		/* Seen on Rhine-II as well (rl) */
+		while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
+			udelay(5);
+
+	}
+
+	if (local_debug > 1)
+		printk(KERN_INFO "%s: Reset %s.\n", name,
+			boguscnt ? "succeeded" : "failed");
+}
+
+#ifdef USE_MEM
+static void enable_mmio(long ioaddr, int chip_id)
+{
+	int n;
+	if (chip_id == VT86C100A) {
+		/* More recent docs say that this bit is reserved ... */
+		n = inb(ioaddr + ConfigA) | 0x20;
+		outb(n, ioaddr + ConfigA);
+	} else {
+		n = inb(ioaddr + ConfigD) | 0x80;
+		outb(n, ioaddr + ConfigD);
+	}
+}
+#endif
+
+static void reload_eeprom(long ioaddr)
+{
+	int i;
+	outb(0x20, ioaddr + MACRegEEcsr);
+	/* Typically 2 cycles to reload. */
+	for (i = 0; i < 150; i++)
+		if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
+			break;
+}
+
+static int via_rhine_init_one (struct pci_dev *pdev,
+					 const struct pci_device_id *ent)
+{
+	struct rtnet_device *dev; /*** RTnet ***/
+	struct netdev_private *np;
+	int i, option;
+	int chip_id = (int) ent->driver_data;
+	static int card_idx = -1;
+	void *ioaddr;
+	long memaddr;
+	unsigned int io_size;
+	int pci_flags;
+#ifdef USE_MEM
+	long ioaddr0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+	static int printed_version;
+	if (!printed_version++)
+		printk(version);
+#endif
+
+	card_idx++;
+	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+	io_size = via_rhine_chip_info[chip_id].io_size;
+	pci_flags = via_rhine_chip_info[chip_id].pci_flags;
+
+/*** RTnet ***/
+	if (cards[card_idx] == 0)
+		goto err_out;
+/*** RTnet ***/
+
+	if (pci_enable_device (pdev))
+		goto err_out;
+
+	/* this should always be supported */
+	if (pci_set_dma_mask(pdev, 0xffffffff)) {
+		printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
+		goto err_out;
+	}
+
+	/* sanity check */
+	if ((pci_resource_len (pdev, 0) < io_size) ||
+	    (pci_resource_len (pdev, 1) < io_size)) {
+		printk (KERN_ERR "Insufficient PCI resources, aborting\n");
+		goto err_out;
+	}
+
+	ioaddr = (void *)pci_resource_start (pdev, 0);
+	memaddr = pci_resource_start (pdev, 1);
+
+	if (pci_flags & PCI_USES_MASTER)
+		pci_set_master (pdev);
+
+/*** RTnet ***/
+	dev = rt_alloc_etherdev(sizeof(struct netdev_private),
+							RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL) {
+		printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
+		goto err_out;
+	}
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+/*** RTnet ***/
+
+	if (pci_request_regions(pdev, shortname))
+		goto err_out_free_netdev;
+
+#ifdef USE_MEM
+	ioaddr0 = (long)ioaddr;
+	enable_mmio(ioaddr0, chip_id);
+
+	ioaddr = ioremap (memaddr, io_size);
+	if (!ioaddr) {
+		printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+				pci_name(pdev), io_size, memaddr);
+		goto err_out_free_res;
+	}
+
+	/* Check that selected MMIO registers match the PIO ones */
+	i = 0;
+	while (mmio_verify_registers[i]) {
+		int reg = mmio_verify_registers[i++];
+		unsigned char a = inb(ioaddr0+reg);
+		unsigned char b = readb(ioaddr+reg);
+		if (a != b) {
+			printk (KERN_ERR "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+					reg, a, b);
+			goto err_out_unmap;
+		}
+	}
+#endif
+
+	/* D-Link provided reset code (with comment additions) */
+	if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
+		unsigned char byOrgValue;
+
+		/* clear sticky bit before reset & read ethernet address */
+		byOrgValue = readb(ioaddr + StickyHW);
+		byOrgValue = byOrgValue & 0xFC;
+		writeb(byOrgValue, ioaddr + StickyHW);
+
+		/* (bits written are cleared?) */
+		/* disable force PME-enable */
+		writeb(0x80, ioaddr + WOLcgClr);
+		/* disable power-event config bit */
+		writeb(0xFF, ioaddr + WOLcrClr);
+		/* clear power status (undocumented in vt6102 docs?) */
+		writeb(0xFF, ioaddr + PwrcsrClr);
+	}
+
+	/* Reset the chip to erase previous misconfiguration. */
+	writew(CmdReset, ioaddr + ChipCmd);
+
+	dev->base_addr = (long)ioaddr;
+	wait_for_reset(dev, chip_id, shortname);
+
+	/* Reload the station address from the EEPROM. */
+#ifdef USE_IO
+	reload_eeprom((long)ioaddr);
+#else
+	reload_eeprom(ioaddr0);
+	/* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
+	   If reload_eeprom() was done first this could be avoided, but it is
+	   not known if that still works with the "win98-reboot" problem. */
+	enable_mmio(ioaddr0, chip_id);
+#endif
+
+	for (i = 0; i < 6; i++)
+		dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
+		goto err_out_unmap;
+	}
+
+	if (chip_id == VT6102) {
+		/*
+		 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
+		 * turned on.  it makes MAC receive magic packet
+		 * automatically. So, we turn it off. (D-Link)
+		 */
+		writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
+	}
+
+	/* Select backoff algorithm */
+	if (backoff)
+		writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
+			ioaddr + ConfigD);
+
+	dev->irq = pdev->irq;
+
+	np = dev->priv;
+	rtdm_lock_init (&np->lock);
+	np->chip_id = chip_id;
+	np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
+	np->pdev = pdev;
+/*** RTnet ***
+	np->mii_if.dev = dev;
+	np->mii_if.mdio_read = mdio_read;
+	np->mii_if.mdio_write = mdio_write;
+	np->mii_if.phy_id_mask = 0x1f;
+	np->mii_if.reg_num_mask = 0x1f;
+ *** RTnet ***/
+
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = via_rhine_open;
+	dev->hard_start_xmit = via_rhine_start_tx;
+	dev->stop = via_rhine_close;
+	dev->get_stats = via_rhine_get_stats;
+/*** RTnet ***
+	dev->set_multicast_list = via_rhine_set_rx_mode;
+	dev->do_ioctl = netdev_ioctl;
+	dev->tx_timeout = via_rhine_tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+ *** RTnet ***/
+	if (np->drv_flags & ReqTxAlign)
+		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+	/* dev->name not defined before register_netdev()! */
+/*** RTnet ***/
+	i = rt_register_rtnetdev(dev);
+	if (i) {
+		goto err_out_unmap;
+	}
+/*** RTnet ***/
+
+	/* The lower four bits are the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->mii_if.full_duplex = 1;
+		np->default_port = option & 15;
+	}
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
+		np->mii_if.full_duplex = 1;
+
+	if (np->mii_if.full_duplex) {
+		printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+			   " disabled.\n", dev->name);
+		np->mii_if_force_media = 1; /*** RTnet ***/
+	}
+
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, via_rhine_chip_info[chip_id].name,
+		   (pci_flags & PCI_USES_IO) ? (long)ioaddr : memaddr);
+
+	for (i = 0; i < 5; i++)
+			printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
+
+	pci_set_drvdata(pdev, dev);
+
+	if (np->drv_flags & CanHaveMII) {
+		int phy, phy_idx = 0;
+		np->phys[0] = 1;		/* Standard for this chip. */
+		for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
+			int mii_status = mdio_read(dev, phy, 1);
+			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
+				np->phys[phy_idx++] = phy;
+				np->mii_if.advertising = mdio_read(dev, phy, 4);
+				printk(KERN_INFO "%s: MII PHY found at address %d, status "
+					   "0x%4.4x advertising %4.4x Link %4.4x.\n",
+					   dev->name, phy, mii_status, np->mii_if.advertising,
+					   mdio_read(dev, phy, 5));
+
+				/* set IFF_RUNNING */
+				if (mii_status & BMSR_LSTATUS)
+					rtnetif_carrier_on(dev); /*** RTnet ***/
+				else
+					rtnetif_carrier_off(dev); /*** RTnet ***/
+			}
+		}
+		np->mii_cnt = phy_idx;
+		np->mii_if.phy_id = np->phys[0];
+	}
+
+	/* Allow forcing the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->mii_if.full_duplex = 1;
+		np->default_port = option & 0x3ff;
+		if (np->default_port & 0x330) {
+			/* FIXME: shouldn't someone check this variable? */
+			/* np->medialock = 1; */
+			printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+				   (option & 0x300 ? 100 : 10),
+				   (option & 0x220 ? "full" : "half"));
+			if (np->mii_cnt)
+				mdio_write(dev, np->phys[0], MII_BMCR,
+						   ((option & 0x300) ? 0x2000 : 0) |  /* 100mbps? */
+						   ((option & 0x220) ? 0x0100 : 0));  /* Full duplex? */
+		}
+	}
+
+	return 0;
+
+err_out_unmap:
+#ifdef USE_MEM
+	iounmap((void *)ioaddr);
+err_out_free_res:
+#endif
+	pci_release_regions(pdev);
+err_out_free_netdev:
+/*** RTnet ***/
+	rt_rtdev_disconnect(dev);
+	rtdev_free(dev);
+/*** RTnet ***/
+err_out:
+	return -ENODEV;
+}
+
+static int alloc_ring(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ring;
+	dma_addr_t ring_dma;
+
+	ring = pci_alloc_consistent(np->pdev,
+				    RX_RING_SIZE * sizeof(struct rx_desc) +
+				    TX_RING_SIZE * sizeof(struct tx_desc),
+				    &ring_dma);
+	if (!ring) {
+		printk(KERN_ERR "Could not allocate DMA memory.\n");
+		return -ENOMEM;
+	}
+	if (np->drv_flags & ReqTxAlign) {
+		np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
+								   &np->tx_bufs_dma);
+		if (np->tx_bufs == NULL) {
+			pci_free_consistent(np->pdev,
+				    RX_RING_SIZE * sizeof(struct rx_desc) +
+				    TX_RING_SIZE * sizeof(struct tx_desc),
+				    ring, ring_dma);
+			return -ENOMEM;
+		}
+	}
+
+	np->rx_ring = ring;
+	np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
+	np->rx_ring_dma = ring_dma;
+	np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
+
+	return 0;
+}
+
+void free_ring(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+
+	pci_free_consistent(np->pdev,
+			    RX_RING_SIZE * sizeof(struct rx_desc) +
+			    TX_RING_SIZE * sizeof(struct tx_desc),
+			    np->rx_ring, np->rx_ring_dma);
+	np->tx_ring = NULL;
+
+	if (np->tx_bufs)
+		pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
+							np->tx_bufs, np->tx_bufs_dma);
+
+	np->tx_bufs = NULL;
+
+}
+
+static void alloc_rbufs(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	dma_addr_t next;
+	int i;
+
+	np->dirty_rx = np->cur_rx = 0;
+
+	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+	np->rx_head_desc = &np->rx_ring[0];
+	next = np->rx_ring_dma;
+
+	/* Init the ring entries */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].rx_status = 0;
+		np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
+		next += sizeof(struct rx_desc);
+		np->rx_ring[i].next_desc = cpu_to_le32(next);
+		np->rx_skbuff[i] = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
+
+	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz); /*** RTnet ***/
+		np->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;
+		np->rx_skbuff_dma[i] =
+			pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
+						   PCI_DMA_FROMDEVICE);
+
+		np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
+		np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+	}
+	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+}
+
+static void free_rbufs(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].rx_status = 0;
+		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+			pci_unmap_single(np->pdev,
+							 np->rx_skbuff_dma[i],
+							 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+			dev_kfree_rtskb(np->rx_skbuff[i]); /*** RTnet ***/
+		}
+		np->rx_skbuff[i] = 0;
+	}
+}
+
+static void alloc_tbufs(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	dma_addr_t next;
+	int i;
+
+	np->dirty_tx = np->cur_tx = 0;
+	next = np->tx_ring_dma;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = 0;
+		np->tx_ring[i].tx_status = 0;
+		np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+		next += sizeof(struct tx_desc);
+		np->tx_ring[i].next_desc = cpu_to_le32(next);
+		np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
+	}
+	np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
+
+}
+
+static void free_tbufs(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_ring[i].tx_status = 0;
+		np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+		np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+		if (np->tx_skbuff[i]) {
+			if (np->tx_skbuff_dma[i]) {
+				pci_unmap_single(np->pdev,
+								 np->tx_skbuff_dma[i],
+								 np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+			}
+			dev_kfree_rtskb(np->tx_skbuff[i]); /*** RTnet ***/
+		}
+		np->tx_skbuff[i] = 0;
+		np->tx_buf[i] = 0;
+	}
+}
+
+static void init_registers(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int i;
+
+	for (i = 0; i < 6; i++)
+		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+	/* Initialize other registers. */
+	writew(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
+	/* Configure initial FIFO thresholds. */
+	writeb(0x20, ioaddr + TxConfig);
+	np->tx_thresh = 0x20;
+	np->rx_thresh = 0x60;			/* Written in via_rhine_set_rx_mode(). */
+	np->mii_if.full_duplex = 0;
+
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
+
+	writel(np->rx_ring_dma, ioaddr + RxRingPtr);
+	writel(np->tx_ring_dma, ioaddr + TxRingPtr);
+
+	via_rhine_set_rx_mode(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+		   IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+		   IntrTxDone | IntrTxError | IntrTxUnderrun |
+		   IntrPCIErr | IntrStatsMax | IntrLinkChange,
+		   ioaddr + IntrEnable);
+
+	np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
+	if (np->mii_if_force_media) /*** RTnet ***/
+		np->chip_cmd |= CmdFDuplex;
+	writew(np->chip_cmd, ioaddr + ChipCmd);
+
+	via_rhine_check_duplex(dev);
+
+	/* The LED outputs of various MII xcvrs should be configured.  */
+	/* For NS or Mison phys, turn on bit 1 in register 0x17 */
+	/* For ESI phys, turn on bit 7 in register 0x17. */
+	mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
+			   (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
+}
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct rtnet_device *dev, int phy_id, int regnum) /*** RTnet ***/
+{
+	void *ioaddr = (void *)dev->base_addr;
+	int boguscnt = 1024;
+
+	/* Wait for a previous command to complete. */
+	while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+		;
+	writeb(0x00, ioaddr + MIICmd);
+	writeb(phy_id, ioaddr + MIIPhyAddr);
+	writeb(regnum, ioaddr + MIIRegAddr);
+	writeb(0x40, ioaddr + MIICmd);			/* Trigger read */
+	boguscnt = 1024;
+	while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
+		;
+	return readw(ioaddr + MIIData);
+}
+
+static void mdio_write(struct rtnet_device *dev, int phy_id, int regnum, int value) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int boguscnt = 1024;
+
+	if (phy_id == np->phys[0]) {
+		switch (regnum) {
+		case MII_BMCR:					/* Is user forcing speed/duplex? */
+			if (value & 0x9000)			/* Autonegotiation. */
+				np->mii_if_force_media = 0; /*** RTnet ***/
+			else
+				np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
+			break;
+		case MII_ADVERTISE:
+			np->mii_if.advertising = value;
+			break;
+		}
+	}
+
+	/* Wait for a previous command to complete. */
+	while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+		;
+	writeb(0x00, ioaddr + MIICmd);
+	writeb(phy_id, ioaddr + MIIPhyAddr);
+	writeb(regnum, ioaddr + MIIRegAddr);
+	writew(value, ioaddr + MIIData);
+	writeb(0x20, ioaddr + MIICmd);			/* Trigger write. */
+}
+
+
+static int via_rhine_open(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int i;
+
+	/* Reset the chip. */
+	writew(CmdReset, ioaddr + ChipCmd);
+
+/*** RTnet ***/
+	rt_stack_connect(dev, &STACK_manager);
+	i = rtdm_irq_request(&np->irq_handle, dev->irq, via_rhine_interrupt,
+			     RTDM_IRQTYPE_SHARED, "rt_via-rhine", dev);
+/*** RTnet ***/
+	if (i) {
+		return i;
+	}
+
+	if (local_debug > 1)
+		printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
+			   dev->name, np->pdev->irq);
+
+	i = alloc_ring(dev);
+	if (i) {
+		return i;
+	}
+	alloc_rbufs(dev);
+	alloc_tbufs(dev);
+	wait_for_reset(dev, np->chip_id, dev->name);
+	init_registers(dev);
+	if (local_debug > 2)
+		printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
+			   "MII status: %4.4x.\n",
+			   dev->name, readw(ioaddr + ChipCmd),
+			   mdio_read(dev, np->phys[0], MII_BMSR));
+
+	rtnetif_start_queue(dev); /*** RTnet ***/
+
+/*** RTnet ***/
+	/* Set the timer to check for link beat. */
+/*** RTnet ***/
+
+	return 0;
+}
+
+static void via_rhine_check_duplex(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+	int negotiated = mii_lpa & np->mii_if.advertising;
+	int duplex;
+
+	if (np->mii_if_force_media  ||  mii_lpa == 0xffff) /*** RTnet ***/
+		return;
+	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+	if (np->mii_if.full_duplex != duplex) {
+		np->mii_if.full_duplex = duplex;
+		if (local_debug)
+			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+				   " partner capability of %4.4x.\n", dev->name,
+				   duplex ? "full" : "half", np->phys[0], mii_lpa);
+		if (duplex)
+			np->chip_cmd |= CmdFDuplex;
+		else
+			np->chip_cmd &= ~CmdFDuplex;
+		writew(np->chip_cmd, ioaddr + ChipCmd);
+	}
+}
+
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int via_rhine_start_tx(struct rtskb *skb, struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	unsigned entry;
+	u32 intr_status;
+/*** RTnet ***/
+	rtdm_lockctx_t context;
+/*** RTnet ***/
+
+	/* Caution: the write order is important here, set the field
+	   with the "ownership" bits last. */
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	if (skb->len < ETH_ZLEN) {
+		skb = rtskb_padto(skb, ETH_ZLEN);
+		if(skb == NULL)
+			return 0;
+	}
+
+	np->tx_skbuff[entry] = skb;
+
+	if ((np->drv_flags & ReqTxAlign) &&
+		(((long)skb->data & 3) || /*** RTnet skb_shinfo(skb)->nr_frags != 0 || RTnet ***/ skb->ip_summed == CHECKSUM_PARTIAL)
+		) {
+		/* Must use alignment buffer. */
+		if (skb->len > PKT_BUF_SZ) {
+			/* packet too long, drop it */
+			dev_kfree_rtskb(skb); /*** RTnet ***/
+			np->tx_skbuff[entry] = NULL;
+			np->stats.tx_dropped++;
+			return 0;
+		}
+
+/*** RTnet ***/
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp) {
+			rtdm_lock_get_irqsave(&np->lock, context);
+
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+				*skb->xmit_stamp);
+
+			rtskb_copy_and_csum_dev(skb, np->tx_buf[entry]);
+		} else {
+			 /* no need to block the interrupts during copy */
+			rtskb_copy_and_csum_dev(skb, np->tx_buf[entry]);
+
+			rtdm_lock_get_irqsave(&np->lock, context);
+		}
+/*** RTnet ***/
+
+		np->tx_skbuff_dma[entry] = 0;
+		np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
+										  (np->tx_buf[entry] - np->tx_bufs));
+	} else {
+		np->tx_skbuff_dma[entry] =
+			pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+		np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
+
+/*** RTnet ***/
+		rtdm_lock_get_irqsave(&np->lock, context);
+
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp)
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+				*skb->xmit_stamp);
+/*** RTnet ***/
+	}
+
+	np->tx_ring[entry].desc_length =
+		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+	wmb();
+	np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+	wmb();
+
+	np->cur_tx++;
+
+	/* Non-x86 Todo: explicitly flush cache lines here. */
+
+	/*
+	 * Wake the potentially-idle transmit channel unless errors are
+	 * pending (the ISR must sort them out first).
+	 */
+	intr_status = get_intr_status(dev);
+	if ((intr_status & IntrTxErrSummary) == 0) {
+		writew(CmdTxDemand | np->chip_cmd, (void *)dev->base_addr + ChipCmd);
+	}
+	IOSYNC;
+
+	if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
+		rtnetif_stop_queue(dev); /*** RTnet ***/
+
+	/*dev->trans_start = jiffies; *** RTnet ***/
+
+/*** RTnet ***/
+	rtdm_lock_put_irqrestore(&np->lock, context);
+/*** RTnet ***/
+
+	if (local_debug > 4) {
+		rtdm_printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", /*** RTnet ***/
+			   dev->name, np->cur_tx-1, entry);
+	}
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int via_rhine_interrupt(rtdm_irq_t *irq_handle) /*** RTnet ***/
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+	struct rtnet_device *dev =
+	    rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/
+	long ioaddr;
+	u32 intr_status;
+	int boguscnt = max_interrupt_work;
+	struct netdev_private *np = dev->priv; /*** RTnet ***/
+	unsigned int old_packet_cnt = np->stats.rx_packets; /*** RTnet ***/
+	int ret = RTDM_IRQ_NONE;
+
+	ioaddr = dev->base_addr;
+
+	while ((intr_status = get_intr_status(dev))) {
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		if (intr_status & IntrTxDescRace)
+			writeb(0x08, (void *)ioaddr + IntrStatus2);
+		writew(intr_status & 0xffff, (void *)ioaddr + IntrStatus);
+		IOSYNC;
+
+		ret = RTDM_IRQ_HANDLED;
+
+		if (local_debug > 4)
+			rtdm_printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n", /*** RTnet ***/
+				   dev->name, intr_status);
+
+		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+						   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
+			via_rhine_rx(dev, &time_stamp);
+
+		if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
+			if (intr_status & IntrTxErrSummary) {
+/*** RTnet ***/
+				rtdm_printk(KERN_ERR "%s: via_rhine_interrupt(), Transmissions error\n", dev->name);
+/*** RTnet ***/
+			}
+			via_rhine_tx(dev);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & (IntrPCIErr | IntrLinkChange |
+				   IntrStatsMax | IntrTxError | IntrTxAborted |
+				   IntrTxUnderrun | IntrTxDescRace))
+			via_rhine_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			rtdm_printk(KERN_WARNING "%s: Too much work at interrupt, " /*** RTnet ***/
+				   "status=%#8.8x.\n",
+				   dev->name, intr_status);
+			break;
+		}
+	}
+
+	if (local_debug > 3)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n", /*** RTnet ***/
+			   dev->name, readw((void *)ioaddr + IntrStatus));
+
+/*** RTnet ***/
+	if (old_packet_cnt != np->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+	return ret;
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity. */
+static void via_rhine_tx(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
+
+	rtdm_lock_get(&np->lock); /*** RTnet ***/
+
+	/* find and cleanup dirty tx descriptors */
+	while (np->dirty_tx != np->cur_tx) {
+		txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
+		if (local_debug > 6)
+			rtdm_printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n", /*** RTnet ***/
+				   entry, txstatus);
+		if (txstatus & DescOwn)
+			break;
+		if (txstatus & 0x8000) {
+			if (local_debug > 1)
+				rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", /*** RTnet ***/
+					   dev->name, txstatus);
+			np->stats.tx_errors++;
+			if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
+			if (txstatus & 0x0200) np->stats.tx_window_errors++;
+			if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
+			if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
+			if (((np->chip_id == VT86C100A) && txstatus & 0x0002) ||
+				(txstatus & 0x0800) || (txstatus & 0x1000)) {
+				np->stats.tx_fifo_errors++;
+				np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+				break; /* Keep the skb - we try again */
+			}
+			/* Transmitter restarted in 'abnormal' handler. */
+		} else {
+			if (np->chip_id == VT86C100A)
+				np->stats.collisions += (txstatus >> 3) & 0x0F;
+			else
+				np->stats.collisions += txstatus & 0x0F;
+			if (local_debug > 6)
+				rtdm_printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n", /*** RTnet ***/
+					(txstatus >> 3) & 0xF,
+					txstatus & 0xF);
+			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+			np->stats.tx_packets++;
+		}
+		/* Free the original skb. */
+		if (np->tx_skbuff_dma[entry]) {
+			pci_unmap_single(np->pdev,
+							 np->tx_skbuff_dma[entry],
+							 np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+		}
+		dev_kfree_rtskb(np->tx_skbuff[entry]); /*** RTnet ***/
+		np->tx_skbuff[entry] = NULL;
+		entry = (++np->dirty_tx) % TX_RING_SIZE;
+	}
+	if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
+		rtnetif_wake_queue (dev); /*** RTnet ***/
+
+	rtdm_lock_put(&np->lock); /*** RTnet ***/
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity and better register allocation. */
+static void via_rhine_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+	if (local_debug > 4) {
+		rtdm_printk(KERN_DEBUG "%s: via_rhine_rx(), entry %d status %8.8x.\n", /*** RTnet ***/
+			   dev->name, entry, le32_to_cpu(np->rx_head_desc->rx_status));
+	}
+
+	/* If EOP is set on the next entry, it's a new packet. Send it up. */
+	while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+		struct rx_desc *desc = np->rx_head_desc;
+		u32 desc_status = le32_to_cpu(desc->rx_status);
+		int data_size = desc_status >> 16;
+
+		if (local_debug > 4)
+			rtdm_printk(KERN_DEBUG "  via_rhine_rx() status is %8.8x.\n", /*** RTnet ***/
+				   desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ( (desc_status & (RxWholePkt | RxErr)) !=  RxWholePkt) {
+			if ((desc_status & RxWholePkt) !=  RxWholePkt) {
+				rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " /*** RTnet ***/
+					   "multiple buffers, entry %#x length %d status %8.8x!\n",
+					   dev->name, entry, data_size, desc_status);
+				rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n", /*** RTnet ***/
+					   dev->name, np->rx_head_desc, &np->rx_ring[entry]);
+				np->stats.rx_length_errors++;
+			} else if (desc_status & RxErr) {
+				/* There was a error. */
+				if (local_debug > 2)
+					rtdm_printk(KERN_DEBUG "  via_rhine_rx() Rx error was %8.8x.\n", /*** RTnet ***/
+						   desc_status);
+				np->stats.rx_errors++;
+				if (desc_status & 0x0030) np->stats.rx_length_errors++;
+				if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
+				if (desc_status & 0x0004) np->stats.rx_frame_errors++;
+				if (desc_status & 0x0002)
+					/* RTnet: this is only updated in the interrupt handler */
+					np->stats.rx_crc_errors++;
+			}
+		} else {
+			struct rtskb *skb; /*** RTnet ***/
+			/* Length should omit the CRC */
+			int pkt_len = data_size - 4;
+
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+/*** RTnet ***/
+			{
+/*** RTnet ***/
+				skb = np->rx_skbuff[entry];
+				if (skb == NULL) {
+					rtdm_printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n", /*** RTnet ***/
+						   dev->name);
+					break;
+				}
+				np->rx_skbuff[entry] = NULL;
+				rtskb_put(skb, pkt_len); /*** RTnet ***/
+				pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
+								 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+			}
+/*** RTnet ***/
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			/*dev->last_rx = jiffies;*/
+/*** RTnet ***/
+			np->stats.rx_bytes += pkt_len;
+			np->stats.rx_packets++;
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+	}
+
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct rtskb *skb; /*** RTnet ***/
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz); /*** RTnet ***/
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break;			/* Better luck next round. */
+			np->rx_skbuff_dma[entry] =
+				pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
+							   PCI_DMA_FROMDEVICE);
+			np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
+		}
+		np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+	}
+
+	/* Pre-emptively restart Rx engine. */
+	writew(readw((void *)dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
+		   (void *)dev->base_addr + ChipCmd);
+}
+
+/* Clears the "tally counters" for CRC errors and missed frames(?).
+   It has been reported that some chips need a write of 0 to clear
+   these, for others the counters are set to 1 when written to and
+   instead cleared when read. So we clear them both ways ... */
+static inline void clear_tally_counters(void *ioaddr)
+{
+	writel(0, ioaddr + RxMissed);
+	readw(ioaddr + RxCRCErrs);
+	readw(ioaddr + RxMissed);
+}
+
+static void via_rhine_restart_tx(struct rtnet_device *dev) { /*** RTnet ***/
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int entry = np->dirty_tx % TX_RING_SIZE;
+	u32 intr_status;
+
+	/*
+	 * If new errors occured, we need to sort them out before doing Tx.
+	 * In that case the ISR will be back here RSN anyway.
+	 */
+	intr_status = get_intr_status(dev);
+
+	if ((intr_status & IntrTxErrSummary) == 0) {
+
+		/* We know better than the chip where it should continue. */
+		writel(np->tx_ring_dma + entry * sizeof(struct tx_desc),
+			   ioaddr + TxRingPtr);
+
+		writew(CmdTxDemand | np->chip_cmd, ioaddr + ChipCmd);
+		IOSYNC;
+	}
+	else {
+		/* This should never happen */
+		if (local_debug > 1)
+			rtdm_printk(KERN_WARNING "%s: via_rhine_restart_tx() " /*** RTnet ***/
+				   "Another error occured %8.8x.\n",
+				   dev->name, intr_status);
+	}
+
+}
+
+static void via_rhine_error(struct rtnet_device *dev, int intr_status) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+
+	rtdm_lock_get(&np->lock); /*** RTnet ***/
+
+	if (intr_status & (IntrLinkChange)) {
+		if (readb(ioaddr + MIIStatus) & 0x02) {
+			/* Link failed, restart autonegotiation. */
+			if (np->drv_flags & HasDavicomPhy)
+				mdio_write(dev, np->phys[0], MII_BMCR, 0x3300);
+		} else
+			via_rhine_check_duplex(dev);
+		if (local_debug)
+			rtdm_printk(KERN_ERR "%s: MII status changed: Autonegotiation " /*** RTnet ***/
+				   "advertising %4.4x  partner %4.4x.\n", dev->name,
+			   mdio_read(dev, np->phys[0], MII_ADVERTISE),
+			   mdio_read(dev, np->phys[0], MII_LPA));
+	}
+	if (intr_status & IntrStatsMax) {
+		np->stats.rx_crc_errors	+= readw(ioaddr + RxCRCErrs);
+		np->stats.rx_missed_errors	+= readw(ioaddr + RxMissed);
+		clear_tally_counters(ioaddr);
+	}
+	if (intr_status & IntrTxAborted) {
+		if (local_debug > 1)
+			rtdm_printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n", /*** RTnet ***/
+				   dev->name, intr_status);
+	}
+	if (intr_status & IntrTxUnderrun) {
+		if (np->tx_thresh < 0xE0)
+			writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+		if (local_debug > 1)
+			rtdm_printk(KERN_INFO "%s: Transmitter underrun, Tx " /*** RTnet ***/
+				   "threshold now %2.2x.\n",
+				   dev->name, np->tx_thresh);
+	}
+	if (intr_status & IntrTxDescRace) {
+		if (local_debug > 2)
+			rtdm_printk(KERN_INFO "%s: Tx descriptor write-back race.\n", /*** RTnet ***/
+				   dev->name);
+	}
+	if ((intr_status & IntrTxError) && ~( IntrTxAborted | IntrTxUnderrun |
+		IntrTxDescRace )) {
+		if (np->tx_thresh < 0xE0) {
+			writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+		}
+		if (local_debug > 1)
+			rtdm_printk(KERN_INFO "%s: Unspecified error. Tx " /*** RTnet ***/
+				"threshold now %2.2x.\n",
+				dev->name, np->tx_thresh);
+	}
+	if (intr_status & ( IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
+		IntrTxError ))
+		via_rhine_restart_tx(dev);
+
+	if (intr_status & ~( IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
+						 IntrTxError | IntrTxAborted | IntrNormalSummary |
+						 IntrTxDescRace )) {
+		if (local_debug > 1)
+			rtdm_printk(KERN_ERR "%s: Something Wicked happened! %8.8x.\n", /*** RTnet ***/
+				   dev->name, intr_status);
+	}
+
+	rtdm_lock_put(&np->lock); /*** RTnet ***/
+}
+
+static struct net_device_stats *via_rhine_get_stats(struct rtnet_device *rtdev)
+{
+	struct netdev_private *np = rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&np->lock, context);
+	np->stats.rx_crc_errors	+= readw(ioaddr + RxCRCErrs);
+	np->stats.rx_missed_errors	+= readw(ioaddr + RxMissed);
+	clear_tally_counters((void *)ioaddr);
+	rtdm_lock_put_irqrestore(&np->lock, context);
+
+	return &np->stats;
+}
+
+static void via_rhine_set_rx_mode(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	u32 mc_filter[2];			/* Multicast hash filter */
+	u8 rx_mode;					/* Note: 0x02=accept runt, 0x01=accept errs */
+
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+		rx_mode = 0x1C;
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter0);
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter1);
+	} else if (dev->flags & IFF_ALLMULTI) {
+		/* Too many to match, or accept all multicasts. */
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter0);
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter1);
+		rx_mode = 0x0C;
+	} else {
+		memset(mc_filter, 0, sizeof(mc_filter));
+		writel(mc_filter[0], (void *)ioaddr + MulticastFilter0);
+		writel(mc_filter[1], (void *)ioaddr + MulticastFilter1);
+		rx_mode = 0x0C;
+	}
+	writeb(np->rx_thresh | rx_mode, (void *)ioaddr + RxConfig);
+}
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int via_rhine_close(struct rtnet_device *dev) /*** RTnet ***/
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	int i; /*** RTnet ***/
+	rtdm_lockctx_t context;
+
+/*** RTnet ***
+	del_timer_sync(&np->timer);
+ *** RTnet ***/
+
+	rtdm_lock_get_irqsave(&np->lock, context); /*** RTnet ***/
+
+	rtnetif_stop_queue(dev); /*** RTnet ***/
+
+	if (local_debug > 1)
+		rtdm_printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", /*** RTnet ***/
+			   dev->name, readw((void *)ioaddr + ChipCmd));
+
+	/* Switch to loopback mode to avoid hardware races. */
+	writeb(np->tx_thresh | 0x02, (void *)ioaddr + TxConfig);
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	writew(0x0000, (void *)ioaddr + IntrEnable);
+
+	/* Stop the chip's Tx and Rx processes. */
+	writew(CmdStop, (void *)ioaddr + ChipCmd);
+
+	rtdm_lock_put_irqrestore(&np->lock, context); /*** RTnet ***/
+
+/*** RTnet ***/
+	if ( (i=rtdm_irq_free(&np->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(dev);
+/*** RTnet ***/
+
+	free_rbufs(dev);
+	free_tbufs(dev);
+	free_ring(dev);
+
+	return 0;
+}
+
+
+static void via_rhine_remove_one (struct pci_dev *pdev)
+{
+ /*** RTnet ***/
+	struct rtnet_device *dev = pci_get_drvdata(pdev);
+
+	rt_unregister_rtnetdev(dev);
+	rt_rtdev_disconnect(dev);
+/*** RTnet ***/
+
+	pci_release_regions(pdev);
+
+#ifdef USE_MEM
+	iounmap((char *)(dev->base_addr));
+#endif
+
+	rtdev_free(dev); /*** RTnet ***/
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+
+static struct pci_driver via_rhine_driver = {
+	.name		= DRV_NAME,
+	.id_table	= via_rhine_pci_tbl,
+	.probe		= via_rhine_init_one,
+	.remove		= via_rhine_remove_one,
+};
+
+
+static int __init via_rhine_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+	printk(version);
+#endif
+	return pci_register_driver (&via_rhine_driver);
+}
+
+
+static void __exit via_rhine_cleanup (void)
+{
+	pci_unregister_driver (&via_rhine_driver);
+}
+
+
+module_init(via_rhine_init);
+module_exit(via_rhine_cleanup);
+
+
+/*
+ * Local variables:
+ *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
+++ linux-patched/drivers/xenomai/net/drivers/tulip/interrupt.c	2022-03-21 12:58:29.673886192 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/21142.c	1970-01-01 01:00:00.000000000 +0100
+/*
+	drivers/net/tulip/interrupt.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include "tulip.h"
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+
+int tulip_rx_copybreak;
+unsigned int tulip_max_interrupt_work;
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+
+#define MIT_SIZE 15
+unsigned int mit_table[MIT_SIZE+1] =
+{
+	/*  CRS11 21143 hardware Mitigation Control Interrupt
+	    We use only RX mitigation we other techniques for
+	    TX intr. mitigation.
+
+	   31    Cycle Size (timer control)
+	   30:27 TX timer in 16 * Cycle size
+	   26:24 TX No pkts before Int.
+	   23:20 RX timer in Cycle size
+	   19:17 RX No pkts before Int.
+	   16       Continues Mode (CM)
+	*/
+
+	0x0,             /* IM disabled */
+	0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
+	0x80150000,
+	0x80270000,
+	0x80370000,
+	0x80490000,
+	0x80590000,
+	0x80690000,
+	0x807B0000,
+	0x808B0000,
+	0x809D0000,
+	0x80AD0000,
+	0x80BD0000,
+	0x80CF0000,
+	0x80DF0000,
+//       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
+	0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
+};
+#endif
+
+
+int tulip_refill_rx(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int entry;
+	int refilled = 0;
+
+	/* Refill the Rx ring buffers. */
+	for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+		entry = tp->dirty_rx % RX_RING_SIZE;
+		if (tp->rx_buffers[entry].skb == NULL) {
+			struct /*RTnet*/rtskb *skb;
+			dma_addr_t mapping;
+
+			skb = tp->rx_buffers[entry].skb = /*RTnet*/rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+			if (skb == NULL)
+				break;
+
+			mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+						 PCI_DMA_FROMDEVICE);
+			tp->rx_buffers[entry].mapping = mapping;
+
+			tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+			refilled++;
+		}
+		tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+	}
+	if(tp->chip_id == LC82C168) {
+		if(((inl(rtdev->base_addr + CSR5)>>17)&0x07) == 4) {
+			/* Rx stopped due to out of buffers,
+			 * restart it
+			 */
+			outl(0x01, rtdev->base_addr + CSR2);
+		}
+	}
+	return refilled;
+}
+
+
+static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_abs_t *time_stamp)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int entry = tp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+	int received = 0;
+
+	if (tulip_debug > 4)
+		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+			   tp->rx_ring[entry].status);
+	/* If we own the next entry, it is a new packet. Send it up. */
+	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+		if (tulip_debug > 5)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+				   rtdev->name, entry, status);
+		if (--rx_work_limit < 0)
+			break;
+		if ((status & 0x38008300) != 0x0300) {
+			if ((status & 0x38000300) != 0x0300) {
+				/* Ingore earlier buffers. */
+				if ((status & 0xffff) != 0x7fff) {
+					if (tulip_debug > 1)
+						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
+							   "spanned multiple buffers, status %8.8x!\n",
+							   rtdev->name, status);
+					tp->stats.rx_length_errors++;
+				}
+			} else if (status & RxDescFatalErr) {
+				/* There was a fatal error. */
+				if (tulip_debug > 2)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+						   rtdev->name, status);
+				tp->stats.rx_errors++; /* end of a packet.*/
+				if (status & 0x0890) tp->stats.rx_length_errors++;
+				if (status & 0x0004) tp->stats.rx_frame_errors++;
+				if (status & 0x0002) tp->stats.rx_crc_errors++;
+				if (status & 0x0001) tp->stats.rx_fifo_errors++;
+			}
+		} else {
+			/* Omit the four octet CRC from the length. */
+			short pkt_len = ((status >> 16) & 0x7ff) - 4;
+			struct /*RTnet*/rtskb *skb;
+
+#ifndef final_version
+			if (pkt_len > 1518) {
+				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+					   rtdev->name, pkt_len, pkt_len);
+				pkt_len = 1518;
+				tp->stats.rx_length_errors++;
+			}
+#endif
+
+			{
+				unsigned char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb, pkt_len);
+
+#ifndef final_version
+				if (tp->rx_buffers[entry].mapping !=
+				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+					       "do not match in tulip_rx: %08x vs. %08llx ? / %p.\n",
+					       rtdev->name,
+					       le32_to_cpu(tp->rx_ring[entry].buffer1),
+					       (unsigned long long)tp->rx_buffers[entry].mapping,
+					       temp);/*RTnet*/
+				}
+#endif
+
+				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
+						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+				tp->rx_buffers[entry].skb = NULL;
+				tp->rx_buffers[entry].mapping = 0;
+			}
+			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
+			skb->time_stamp = *time_stamp;
+			/*RTnet*/rtnetif_rx(skb);
+
+			tp->stats.rx_packets++;
+			tp->stats.rx_bytes += pkt_len;
+		}
+		received++;
+		entry = (++tp->cur_rx) % RX_RING_SIZE;
+	}
+	return received;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+int tulip_interrupt(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read();/*RTnet*/
+	struct rtnet_device *rtdev =
+	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	unsigned int csr5;
+	int entry;
+	int missed;
+	int rx = 0;
+	int tx = 0;
+	int oi = 0;
+	int maxrx = RX_RING_SIZE;
+	int maxtx = TX_RING_SIZE;
+	int maxoi = TX_RING_SIZE;
+	unsigned int work_count = tulip_max_interrupt_work;
+
+	/* Let's see whether the interrupt really is for us */
+	csr5 = inl(ioaddr + CSR5);
+
+	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
+		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
+		return RTDM_IRQ_NONE;
+	}
+
+	tp->nir++;
+
+	do {
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+		if (tulip_debug > 4)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
+				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));
+
+		if (csr5 & (RxIntr | RxNoBuf)) {
+			rx += tulip_rx(rtdev, &time_stamp);
+			tulip_refill_rx(rtdev);
+		}
+
+		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+			unsigned int dirty_tx;
+
+			rtdm_lock_get(&tp->lock);
+
+			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+				 dirty_tx++) {
+				int entry = dirty_tx % TX_RING_SIZE;
+				int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+				if (status < 0)
+					break;			/* It still has not been Txed */
+
+				/* Check for Rx filter setup frames. */
+				if (tp->tx_buffers[entry].skb == NULL) {
+					/* test because dummy frames not mapped */
+					if (tp->tx_buffers[entry].mapping)
+						pci_unmap_single(tp->pdev,
+							 tp->tx_buffers[entry].mapping,
+							 sizeof(tp->setup_frame),
+							 PCI_DMA_TODEVICE);
+					continue;
+				}
+
+				if (status & 0x8000) {
+					/* There was an major error, log it. */
+#ifndef final_version
+					if (tulip_debug > 1)
+						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+							   rtdev->name, status);
+#endif
+					tp->stats.tx_errors++;
+					if (status & 0x4104) tp->stats.tx_aborted_errors++;
+					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+					if (status & 0x0200) tp->stats.tx_window_errors++;
+					if (status & 0x0002) tp->stats.tx_fifo_errors++;
+					if ((status & 0x0080) && tp->full_duplex == 0)
+						tp->stats.tx_heartbeat_errors++;
+				} else {
+					tp->stats.tx_bytes +=
+						tp->tx_buffers[entry].skb->len;
+					tp->stats.collisions += (status >> 3) & 15;
+					tp->stats.tx_packets++;
+				}
+
+				pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+						 tp->tx_buffers[entry].skb->len,
+						 PCI_DMA_TODEVICE);
+
+				/* Free the original skb. */
+				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
+				tp->tx_buffers[entry].skb = NULL;
+				tp->tx_buffers[entry].mapping = 0;
+				tx++;
+				rtnetif_tx(rtdev);
+			}
+
+#ifndef final_version
+			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+					   rtdev->name, dirty_tx, tp->cur_tx);
+				dirty_tx += TX_RING_SIZE;
+			}
+#endif
+
+			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+				/*RTnet*/rtnetif_wake_queue(rtdev);
+
+			tp->dirty_tx = dirty_tx;
+			if (csr5 & TxDied) {
+				if (tulip_debug > 2)
+					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
+						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+				tulip_restart_rxtx(tp);
+			}
+			rtdm_lock_put(&tp->lock);
+		}
+
+		/* Log errors. */
+		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
+			if (csr5 == 0xffffffff)
+				break;
+			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
+			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
+			/* Clear all error sources, included undocumented ones! */
+			outl(0x0800f7ba, ioaddr + CSR5);
+			oi++;
+		}
+		if (csr5 & TimerInt) {
+
+			if (tulip_debug > 2)
+				/*RTnet*/rtdm_printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+					   rtdev->name, csr5);
+			outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+			tp->ttimer = 0;
+			oi++;
+		}
+		if (tx > maxtx || rx > maxrx || oi > maxoi) {
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Too much work during an interrupt, "
+					   "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", rtdev->name, csr5, tp->nir, tx, rx, oi);
+
+		       /* Acknowledge all interrupt sources. */
+			outl(0x8001ffff, ioaddr + CSR5);
+			if (tp->flags & HAS_INTR_MITIGATION) {
+		     /* Josip Loncaric at ICASE did extensive experimentation
+			to develop a good interrupt mitigation setting.*/
+				outl(0x8b240000, ioaddr + CSR11);
+			} else if (tp->chip_id == LC82C168) {
+				/* the LC82C168 doesn't have a hw timer.*/
+				outl(0x00, ioaddr + CSR7);
+			} else {
+			  /* Mask all interrupting sources, set timer to
+				re-enable. */
+			}
+			break;
+		}
+
+		work_count--;
+		if (work_count == 0)
+			break;
+
+		csr5 = inl(ioaddr + CSR5);
+	} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
+
+	tulip_refill_rx(rtdev);
+
+	/* check if the card is in suspend mode */
+	entry = tp->dirty_rx % RX_RING_SIZE;
+	if (tp->rx_buffers[entry].skb == NULL) {
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", rtdev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+		if (tp->chip_id == LC82C168)
+			outl(0x00, ioaddr + CSR7);
+		else {
+			if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", rtdev->name, tp->nir);
+				outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+					ioaddr + CSR7);
+				outl(TimerInt, ioaddr + CSR5);
+				outl(12, ioaddr + CSR11);
+				tp->ttimer = 1;
+			}
+		}
+	}
+
+	if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
+		tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+	}
+
+	if (tulip_debug > 4)
+		/*RTnet*/rtdm_printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+			   rtdev->name, inl(ioaddr + CSR5));
+	if (rx)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+++ linux-patched/drivers/xenomai/net/drivers/tulip/21142.c	2022-03-21 12:58:29.668886241 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/tulip_core.c	1970-01-01 01:00:00.000000000 +0100
+/*
+	drivers/net/tulip/21142.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+
+
+void t21142_start_nway(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int csr14 = ((tp->sym_advertise & 0x0780) << 9)  |
+		((tp->sym_advertise & 0x0020) << 1) | 0xffbf;
+
+	rtdev->if_port = 0;
+	tp->nway = tp->mediasense = 1;
+	tp->nwayset = tp->lpar = 0;
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
+			   rtdev->name, csr14);
+	outl(0x0001, ioaddr + CSR13);
+	udelay(100);
+	outl(csr14, ioaddr + CSR14);
+	tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+	outl(tp->csr6, ioaddr + CSR6);
+	if (tp->mtable  &&  tp->mtable->csr15dir) {
+		outl(tp->mtable->csr15dir, ioaddr + CSR15);
+		outl(tp->mtable->csr15val, ioaddr + CSR15);
+	} else
+		outw(0x0008, ioaddr + CSR15);
+	outl(0x1301, ioaddr + CSR12); 		/* Trigger NWAY. */
+}
+
+
+++ linux-patched/drivers/xenomai/net/drivers/tulip/tulip_core.c	2022-03-21 12:58:29.662886299 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/pnic.c	1970-01-01 01:00:00.000000000 +0100
+/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
+
+/*
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000-2002  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#define DRV_NAME	"tulip-rt"
+#define DRV_VERSION	"0.9.15-pre11-rt"
+#define DRV_RELDATE	"May 11, 2002"
+
+#include <linux/module.h>
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
+#ifdef __sparc__
+#include <asm/pbm.h>
+#endif
+
+#include <rtnet_port.h>
+
+static char version[] =
+	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
+
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static unsigned int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
+
+/*  The possible media types that can be set in options[] are: */
+const char * const medianame[32] = {
+	"10baseT", "10base2", "AUI", "100baseTx",
+	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+	"","","","", "","","","",  "","","","Transceiver reset",
+};
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
+	|| defined(__sparc_) || defined(__ia64__) \
+	|| defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+  Set the bus performance register.
+	Typical: Set 16 longword cache alignment, no burst limit.
+	Cache alignment bits 15:14           Burst length 13:8
+		0000	No alignment  0x00000000 unlimited		0800 8 longwords
+		4000	8  longwords		0100 1 longword		1000 16 longwords
+		8000	16 longwords		0200 2 longwords	2000 32 longwords
+		C000	32  longwords		0400 4 longwords
+	Warning: many older 486 systems are broken and require setting 0x00A04800
+	   8 longword cache alignment, 8 longword burst.
+	ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__)
+static int csr0 = 0x01A00000 | 0x8000;
+#elif defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+static int csr0 = 0x01A00000 | 0x9000;
+#elif defined(__arm__) || defined(__sh__)
+static int csr0 = 0x01A00000 | 0x4800;
+#elif defined(__mips__)
+static int csr0 = 0x00200000 | 0x4000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (4*HZ)
+
+
+MODULE_AUTHOR("The Linux Kernel Team");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+module_param(tulip_debug, int, 0444);
+module_param(max_interrupt_work, int, 0444);
+/*MODULE_PARM(rx_copybreak, "i");*/
+module_param(csr0, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+
+#define PFX DRV_NAME ": "
+
+#ifdef TULIP_DEBUG
+int tulip_debug = TULIP_DEBUG;
+#else
+int tulip_debug = 1;
+#endif
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+
+
+
+/*
+ * This table use during operation for capabilities and media timer.
+ *
+ * It is indexed via the values in 'enum chips'
+ */
+
+struct tulip_chip_table tulip_tbl[] = {
+  /* DC21040 */
+  { "Digital DC21040 Tulip", 128, 0x0001ebef, 0 },
+
+  /* DC21041 */
+  { "Digital DC21041 Tulip", 128, 0x0001ebef,
+	HAS_MEDIA_TABLE | HAS_NWAY },
+
+  /* DC21140 */
+  { "Digital DS21140 Tulip", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI },
+
+  /* DC21142, DC21143 */
+  { "Digital DS21143 Tulip", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
+	| HAS_INTR_MITIGATION | HAS_PCI_MWI },
+
+  /* LC82C168 */
+  { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
+	HAS_MII | HAS_PNICNWAY },
+
+  /* MX98713 */
+  { "Macronix 98713 PMAC", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM },
+
+  /* MX98715 */
+  { "Macronix 98715 PMAC", 256, 0x0001ebef,
+	HAS_MEDIA_TABLE },
+
+  /* MX98725 */
+  { "Macronix 98725 PMAC", 256, 0x0001ebef,
+	HAS_MEDIA_TABLE },
+
+  /* AX88140 */
+  { "ASIX AX88140", 128, 0x0001fbff,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX },
+
+  /* PNIC2 */
+  { "Lite-On PNIC-II", 256, 0x0801fbff,
+	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI },
+
+  /* COMET */
+  { "ADMtek Comet", 256, 0x0001abef,
+	MC_HASH_ONLY | COMET_MAC_ADDR },
+
+  /* COMPEX9881 */
+  { "Compex 9881 PMAC", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM },
+
+  /* I21145 */
+  { "Intel DS21145 Tulip", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
+	| HAS_NWAY | HAS_PCI_MWI },
+
+  /* DM910X */
+  { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI },
+};
+
+
+static struct pci_device_id tulip_pci_tbl[] = {
+	{ 0x1011, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21040 },
+	{ 0x1011, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21041 },
+	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
+	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
+	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
+	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
+	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+/*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
+	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
+	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
+	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
+	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
+
+
+/* A full-duplex map for media types. */
+const char tulip_media_cap[32] =
+{0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
+u8 t21040_csr13[] = {2,0x0C,8,4,  4,0,0,0, 0,0,0,0, 4,0,0,0};
+
+/* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+u16 t21041_csr13[] = {
+	csr13_mask_10bt,		/* 10-T */
+	csr13_mask_auibnc,		/* 10-2 */
+	csr13_mask_auibnc,		/* AUI */
+	csr13_mask_10bt,		/* 10-T */
+	csr13_mask_10bt,		/* 10T-FD */
+};
+u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+static void tulip_init_ring(/*RTnet*/struct rtnet_device *rtdev);
+static int tulip_start_xmit(struct /*RTnet*/rtskb *skb, /*RTnet*/struct rtnet_device *rtdev);
+static int tulip_open(/*RTnet*/struct rtnet_device *rtdev);
+static int tulip_close(/*RTnet*/struct rtnet_device *rtdev);
+static void tulip_up(/*RTnet*/struct rtnet_device *rtdev);
+static void tulip_down(/*RTnet*/struct rtnet_device *rtdev);
+static struct net_device_stats *tulip_get_stats(struct rtnet_device *rtdev);
+//static void set_rx_mode(struct net_device *dev);
+
+
+static void tulip_set_power_state (struct tulip_private *tp,
+				   int sleep, int snooze)
+{
+	if (tp->flags & HAS_ACPI) {
+		u32 tmp, newtmp;
+		pci_read_config_dword (tp->pdev, CFDD, &tmp);
+		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
+		if (sleep)
+			newtmp |= CFDD_Sleep;
+		else if (snooze)
+			newtmp |= CFDD_Snooze;
+		if (tmp != newtmp)
+			pci_write_config_dword (tp->pdev, CFDD, newtmp);
+	}
+
+}
+
+static void tulip_up(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int i;
+
+	/* Wake the chip from sleep/snooze mode. */
+	tulip_set_power_state (tp, 0, 0);
+
+	/* On some chip revs we must set the MII/SYM port before the reset!? */
+	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
+		outl(0x00040000, ioaddr + CSR6);
+
+	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+	outl(0x00000001, ioaddr + CSR0);
+	udelay(100);
+
+	/* Deassert reset.
+	   Wait the specified 50 PCI cycles after a reset by initializing
+	   Tx and Rx queues and the address filter list. */
+	outl(tp->csr0, ioaddr + CSR0);
+	udelay(100);
+
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", rtdev->name, rtdev->irq);
+
+	outl(tp->rx_ring_dma, ioaddr + CSR3);
+	outl(tp->tx_ring_dma, ioaddr + CSR4);
+	tp->cur_rx = tp->cur_tx = 0;
+	tp->dirty_rx = tp->dirty_tx = 0;
+
+	if (tp->flags & MC_HASH_ONLY) {
+		u32 addr_low = cpu_to_le32(get_unaligned((u32 *)rtdev->dev_addr));
+		u32 addr_high = cpu_to_le32(get_unaligned((u16 *)(rtdev->dev_addr+4)));
+		if (tp->chip_id == AX88140) {
+			outl(0, ioaddr + CSR13);
+			outl(addr_low,  ioaddr + CSR14);
+			outl(1, ioaddr + CSR13);
+			outl(addr_high, ioaddr + CSR14);
+		} else if (tp->flags & COMET_MAC_ADDR) {
+			outl(addr_low,  ioaddr + 0xA4);
+			outl(addr_high, ioaddr + 0xA8);
+			outl(0, ioaddr + 0xAC);
+			outl(0, ioaddr + 0xB0);
+		}
+	} else {
+		/* This is set_rx_mode(), but without starting the transmitter. */
+		u16 *eaddrs = (u16 *)rtdev->dev_addr;
+		u16 *setup_frm = &tp->setup_frame[15*6];
+		dma_addr_t mapping;
+
+		/* 21140 bug: you must add the broadcast address. */
+		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
+		/* Fill the final entry of the table with our physical address. */
+		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+
+		mapping = pci_map_single(tp->pdev, tp->setup_frame,
+					 sizeof(tp->setup_frame),
+					 PCI_DMA_TODEVICE);
+		tp->tx_buffers[tp->cur_tx].skb = NULL;
+		tp->tx_buffers[tp->cur_tx].mapping = mapping;
+
+		/* Put the setup frame on the Tx list. */
+		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
+		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
+		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
+
+		tp->cur_tx++;
+	}
+
+	tp->saved_if_port = rtdev->if_port;
+	if (rtdev->if_port == 0)
+		rtdev->if_port = tp->default_port;
+
+	/* Allow selecting a default media. */
+	i = 0;
+	if (tp->mtable == NULL)
+		goto media_picked;
+	if (rtdev->if_port) {
+		int looking_for = tulip_media_cap[rtdev->if_port] & MediaIsMII ? 11 :
+			(rtdev->if_port == 12 ? 0 : rtdev->if_port);
+		for (i = 0; i < tp->mtable->leafcount; i++)
+			if (tp->mtable->mleaf[i].media == looking_for) {
+				printk(KERN_INFO "%s: Using user-specified media %s.\n",
+					   rtdev->name, medianame[rtdev->if_port]);
+				goto media_picked;
+			}
+	}
+	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+		for (i = 0; i < tp->mtable->leafcount; i++)
+			if (tp->mtable->mleaf[i].media == looking_for) {
+				printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+					   rtdev->name, medianame[looking_for]);
+				goto media_picked;
+			}
+	}
+	/* Start sensing first non-full-duplex media. */
+	for (i = tp->mtable->leafcount - 1;
+		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+		;
+media_picked:
+
+	tp->csr6 = 0;
+	tp->cur_index = i;
+	tp->nwayset = 0;
+
+	if (rtdev->if_port) {
+		if (tp->chip_id == DC21143  &&
+		    (tulip_media_cap[rtdev->if_port] & MediaIsMII)) {
+			/* We must reset the media CSRs when we force-select MII mode. */
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+			outl(0x0008, ioaddr + CSR15);
+		}
+		tulip_select_media(rtdev, 1);
+	} else if (tp->chip_id == DC21041) {
+		rtdev->if_port = 0;
+		tp->nway = tp->mediasense = 1;
+		tp->nwayset = tp->lpar = 0;
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+		tp->csr6 = 0x80020000;
+		if (tp->sym_advertise & 0x0040)
+			tp->csr6 |= FullDuplex;
+		outl(tp->csr6, ioaddr + CSR6);
+		outl(0x0000EF01, ioaddr + CSR13);
+
+	} else if (tp->chip_id == DC21142) {
+		if (tp->mii_cnt) {
+			tulip_select_media(rtdev, 1);
+			if (tulip_debug > 1)
+				printk(KERN_INFO "%s: Using MII transceiver %d, status %4.4x.\n",
+					   rtdev->name, tp->phys[0], tulip_mdio_read(rtdev, tp->phys[0], 1));
+			outl(csr6_mask_defstate, ioaddr + CSR6);
+			tp->csr6 = csr6_mask_hdcap;
+			rtdev->if_port = 11;
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+		} else
+			t21142_start_nway(rtdev);
+	} else if (tp->chip_id == PNIC2) {
+		/* for initial startup advertise 10/100 Full and Half */
+		tp->sym_advertise = 0x01E0;
+		/* enable autonegotiate end interrupt */
+		outl(inl(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
+		outl(inl(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
+		pnic2_start_nway(rtdev);
+	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
+		if (tp->mii_cnt) {
+			rtdev->if_port = 11;
+			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
+			outl(0x0001, ioaddr + CSR15);
+		} else if (inl(ioaddr + CSR5) & TPLnkPass)
+			pnic_do_nway(rtdev);
+		else {
+			/* Start with 10mbps to do autonegotiation. */
+			outl(0x32, ioaddr + CSR12);
+			tp->csr6 = 0x00420000;
+			outl(0x0001B078, ioaddr + 0xB8);
+			outl(0x0201B078, ioaddr + 0xB8);
+		}
+	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
+			   && ! tp->medialock) {
+		rtdev->if_port = 0;
+		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
+		outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
+		/* Provided by BOLO, Macronix - 12/10/1998. */
+		rtdev->if_port = 0;
+		tp->csr6 = 0x01a80200;
+		outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+		outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+	} else if (tp->chip_id == COMET) {
+		/* Enable automatic Tx underrun recovery. */
+		outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
+		rtdev->if_port = tp->mii_cnt ? 11 : 0;
+		tp->csr6 = 0x00040000;
+	} else if (tp->chip_id == AX88140) {
+		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+	} else
+		tulip_select_media(rtdev, 1);
+
+	/* Start the chip's Tx to process setup frame. */
+	tulip_stop_rxtx(tp);
+	barrier();
+	udelay(5);
+	outl(tp->csr6 | TxOn, ioaddr + CSR6);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+	tulip_start_rxtx(tp);
+	outl(0, ioaddr + CSR2);		/* Rx poll demand */
+
+	if (tulip_debug > 2) {
+		printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+			   rtdev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
+			   inl(ioaddr + CSR6));
+	}
+}
+
+
+static int
+tulip_open(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int retval;
+
+	if ((retval = /*RTnet*/rtdm_irq_request(&tp->irq_handle, rtdev->irq,
+						tulip_interrupt, 0, "rt_tulip",
+						rtdev))) {
+		printk("%s: Unable to install ISR for IRQ %d\n",
+			  rtdev->name,rtdev->irq);
+		return retval;
+	}
+
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	tulip_init_ring (rtdev);
+
+	tulip_up (rtdev);
+
+	rtnetif_start_queue (rtdev);
+
+	return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int i;
+
+	tp->susp_rx = 0;
+	tp->ttimer = 0;
+	tp->nir = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		tp->rx_ring[i].status = 0x00000000;
+		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
+		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
+		tp->rx_buffers[i].skb = NULL;
+		tp->rx_buffers[i].mapping = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
+	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		dma_addr_t mapping;
+
+		/* Note the receive buffer must be longword aligned.
+		   dev_alloc_skb() provides 16 byte alignment.  But do *not*
+		   use skb_reserve() to align the IP header! */
+		struct /*RTnet*/rtskb *skb = /*RTnet*/rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+		tp->rx_buffers[i].skb = skb;
+		if (skb == NULL)
+			break;
+		mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+		tp->rx_buffers[i].mapping = mapping;
+		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
+		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
+	}
+	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+	/* The Tx buffer descriptor is filled in as needed, but we
+	   do need to clear the ownership bit. */
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		tp->tx_buffers[i].skb = NULL;
+		tp->tx_buffers[i].mapping = 0;
+		tp->tx_ring[i].status = 0x00000000;
+		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
+	}
+	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
+}
+
+static int
+tulip_start_xmit(struct /*RTnet*/rtskb *skb, /*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int entry;
+	u32 flag;
+	dma_addr_t mapping;
+	/*RTnet*/
+	rtdm_lockctx_t context;
+
+
+	rtdm_lock_get_irqsave(&tp->lock, context);
+
+	/* TODO: move to rtdev_xmit, use queue */
+	if (rtnetif_queue_stopped(rtdev)) {
+		dev_kfree_rtskb(skb);
+		tp->stats.tx_dropped++;
+
+		rtdm_lock_put_irqrestore(&tp->lock, context);
+		return 0;
+	}
+	/*RTnet*/
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % TX_RING_SIZE;
+
+	tp->tx_buffers[entry].skb = skb;
+	mapping = pci_map_single(tp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+	tp->tx_buffers[entry].mapping = mapping;
+	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+		flag = 0x60000000; /* No interrupt */
+	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+		flag = 0xe0000000; /* Tx-done intr. */
+	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+		flag = 0x60000000; /* No Tx-done intr. */
+	} else {		/* Leave room for set_rx_mode() to fill entries. */
+		flag = 0xe0000000; /* Tx-done intr. */
+		rtnetif_stop_queue(rtdev);
+	}
+	if (entry == TX_RING_SIZE-1)
+		flag = 0xe0000000 | DESC_RING_WRAP;
+
+	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+	/* if we were using Transmit Automatic Polling, we would need a
+	 * wmb() here. */
+	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+
+	/*RTnet*/
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+	/*RTnet*/
+
+	wmb();
+
+	tp->cur_tx++;
+
+	/* Trigger an immediate transmit demand. */
+	outl(0, rtdev->base_addr + CSR1);
+
+	/*RTnet*/
+	rtdm_lock_put_irqrestore(&tp->lock, context);
+	/*RTnet*/
+
+	return 0;
+}
+
+static void tulip_clean_tx_ring(struct tulip_private *tp)
+{
+	unsigned int dirty_tx;
+
+	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
+		dirty_tx++) {
+		int entry = dirty_tx % TX_RING_SIZE;
+		int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+		if (status < 0) {
+			tp->stats.tx_errors++;	/* It wasn't Txed */
+			tp->tx_ring[entry].status = 0;
+		}
+
+		/* Check for Tx filter setup frames. */
+		if (tp->tx_buffers[entry].skb == NULL) {
+			/* test because dummy frames not mapped */
+			if (tp->tx_buffers[entry].mapping)
+				pci_unmap_single(tp->pdev,
+					tp->tx_buffers[entry].mapping,
+					sizeof(tp->setup_frame),
+					PCI_DMA_TODEVICE);
+			continue;
+		}
+
+		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+				tp->tx_buffers[entry].skb->len,
+				PCI_DMA_TODEVICE);
+
+		/* Free the original skb. */
+		/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
+		tp->tx_buffers[entry].skb = NULL;
+		tp->tx_buffers[entry].mapping = 0;
+	}
+}
+
+static struct net_device_stats *tulip_get_stats(struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
+	return &tp->stats;
+}
+
+static void tulip_down (/*RTnet*/struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
+
+	rtdm_irq_disable(&tp->irq_handle);
+	rtdm_lock_get(&tp->lock); /* sync with IRQ handler on other cpu -JK- */
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	outl (0x00000000, ioaddr + CSR7);
+
+	/* Stop the Tx and Rx processes. */
+	tulip_stop_rxtx(tp);
+
+	/* prepare receive buffers */
+	tulip_refill_rx(rtdev);
+
+	/* release any unconsumed transmit buffers */
+	tulip_clean_tx_ring(tp);
+
+	/* 21040 -- Leave the card in 10baseT state. */
+	if (tp->chip_id == DC21040)
+		outl (0x00000004, ioaddr + CSR13);
+
+	if (inl (ioaddr + CSR6) != 0xffffffff)
+		tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff;
+
+	rtdm_lock_put(&tp->lock);
+	rtdm_irq_enable(&tp->irq_handle);
+
+	rtdev->if_port = tp->saved_if_port;
+
+	/* Leave the driver in snooze, not sleep, mode. */
+	tulip_set_power_state (tp, 0, 1);
+}
+
+
+static int tulip_close (/*RTnet*/struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
+	int i;
+
+	rtnetif_stop_queue (rtdev);
+
+	tulip_down (rtdev);
+
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+			rtdev->name, inl (ioaddr + CSR5));
+
+	rtdm_irq_free(&tp->irq_handle);
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct /*RTnet*/rtskb *skb = tp->rx_buffers[i].skb;
+		dma_addr_t mapping = tp->rx_buffers[i].mapping;
+
+		tp->rx_buffers[i].skb = NULL;
+		tp->rx_buffers[i].mapping = 0;
+
+		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
+		tp->rx_ring[i].length = 0;
+		tp->rx_ring[i].buffer1 = 0xBADF00D0;	/* An invalid address. */
+		if (skb) {
+			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
+					 PCI_DMA_FROMDEVICE);
+			/*RTnet*/dev_kfree_rtskb (skb);
+		}
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		struct /*RTnet*/rtskb *skb = tp->tx_buffers[i].skb;
+
+		if (skb != NULL) {
+			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
+					 skb->len, PCI_DMA_TODEVICE);
+			/*RTnet*/dev_kfree_rtskb (skb);
+		}
+		tp->tx_buffers[i].skb = NULL;
+		tp->tx_buffers[i].mapping = 0;
+	}
+
+	rt_stack_disconnect(rtdev);
+
+	return 0;
+}
+
+#ifdef XXX_CONFIG_TULIP_MWI
+static void tulip_mwi_config (struct pci_dev *pdev,
+					struct net_device *dev)
+{
+	struct tulip_private *tp = rtdev->priv;
+	u8 cache;
+	u16 pci_command;
+	u32 csr0;
+
+	if (tulip_debug > 3)
+		printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
+
+	tp->csr0 = csr0 = 0;
+
+	/* if we have any cache line size at all, we can do MRM */
+	csr0 |= MRM;
+
+	/* ...and barring hardware bugs, MWI */
+	if (!(tp->chip_id == DC21143 && tp->revision == 65))
+		csr0 |= MWI;
+
+	/* set or disable MWI in the standard PCI command bit.
+	 * Check for the case where  mwi is desired but not available
+	 */
+	if (csr0 & MWI)	pci_set_mwi(pdev);
+	else		pci_clear_mwi(pdev);
+
+	/* read result from hardware (in case bit refused to enable) */
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
+		csr0 &= ~MWI;
+
+	/* if cache line size hardwired to zero, no MWI */
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+	if ((csr0 & MWI) && (cache == 0)) {
+		csr0 &= ~MWI;
+		pci_clear_mwi(pdev);
+	}
+
+	/* assign per-cacheline-size cache alignment and
+	 * burst length values
+	 */
+	switch (cache) {
+	case 8:
+		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
+		break;
+	case 16:
+		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
+		break;
+	case 32:
+		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
+		break;
+	default:
+		cache = 0;
+		break;
+	}
+
+	/* if we have a good cache line size, we by now have a good
+	 * csr0, so save it and exit
+	 */
+	if (cache)
+		goto out;
+
+	/* we don't have a good csr0 or cache line size, disable MWI */
+	if (csr0 & MWI) {
+		pci_clear_mwi(pdev);
+		csr0 &= ~MWI;
+	}
+
+	/* sane defaults for burst length and cache alignment
+	 * originally from de4x5 driver
+	 */
+	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
+
+out:
+	tp->csr0 = csr0;
+	if (tulip_debug > 2)
+		printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
+		       pci_name(pdev), cache, csr0);
+}
+#endif
+
+
+static int tulip_init_one (struct pci_dev *pdev,
+				     const struct pci_device_id *ent)
+{
+	struct tulip_private *tp;
+	/* See note below on the multiport cards. */
+	static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+	static struct pci_device_id early_486_chipsets[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
+		{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
+		{ },
+	};
+#if defined(__i386__)
+	static int last_irq;
+#endif
+	u8 chip_rev;
+	unsigned int i, irq;
+	unsigned short sum;
+	u8 ee_data[EEPROM_SIZE];
+	/*RTnet*/struct rtnet_device *rtdev;
+	long ioaddr;
+	static int board_idx = -1;
+	int chip_idx = ent->driver_data;
+	unsigned int t2104x_mode = 0;
+	unsigned int eeprom_missing = 0;
+
+#ifndef MODULE
+	static int did_version;		/* Already printed version info. */
+	if (tulip_debug > 0  &&  did_version++ == 0)
+		printk(KERN_INFO "%s", version);
+#endif
+
+	board_idx++;
+
+	if (cards[board_idx] == 0)
+		return -ENODEV;
+
+	/*
+	 *	Lan media wire a tulip chip to a wan interface. Needs a very
+	 *	different driver (lmc driver)
+	 */
+
+	if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
+		printk(KERN_ERR PFX "skipping LMC card.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 *	Early DM9100's need software CRC and the DMFE driver
+	 */
+
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
+	{
+		u32 dev_rev;
+		/* Read Chip revision */
+		pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
+		if(dev_rev < 0x02000030)
+		{
+			printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+			return -ENODEV;
+		}
+	}
+
+	/*
+	 *	Looks for early PCI chipsets where people report hangs
+	 *	without the workarounds being on.
+	 */
+
+	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
+	      aligned.  Aries might need this too. The Saturn errata are not
+	      pretty reading but thankfully it's an old 486 chipset.
+
+	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
+	      Saturn.
+	 */
+
+	if (pci_dev_present(early_486_chipsets))
+		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
+
+	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
+	if (chip_idx == AX88140) {
+		if ((csr0 & 0x3f00) == 0)
+			csr0 |= 0x2000;
+	}
+
+	/* PNIC doesn't have MWI/MRL/MRM... */
+	if (chip_idx == LC82C168)
+		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
+
+	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+		csr0 &= ~0x01f100ff;
+
+#if defined(__sparc__)
+	/* DM9102A needs 32-dword alignment/burst length on sparc - chip bug? */
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+		csr0 = (csr0 & ~0xff00) | 0xe000;
+#endif
+
+	/*
+	 *	And back to business
+	 */
+
+	i = pci_enable_device(pdev);
+	if (i) {
+		printk(KERN_ERR PFX
+			"Cannot enable tulip board #%d, aborting\n",
+			board_idx);
+		return i;
+	}
+
+	ioaddr = pci_resource_start (pdev, 0);
+	irq = pdev->irq;
+
+	/* alloc_etherdev ensures aligned and zeroed private structures */
+	rtdev = /*RTnet*/rt_alloc_etherdev (sizeof (*tp),
+					RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (!rtdev) {
+		printk(KERN_ERR PFX "ether device alloc failed, aborting\n");
+		return -ENOMEM;
+	}
+	//rtdev_alloc_name(rtdev, "eth%d");//Done by register_rtdev()
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+
+	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+		printk(KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
+			"aborting\n", pci_name(pdev),
+			(unsigned long long)pci_resource_len (pdev, 0),
+			(unsigned long long)pci_resource_start (pdev, 0));
+		goto err_out_free_netdev;
+	}
+
+	/* grab all resources from both PIO and MMIO regions, as we
+	 * don't want anyone else messing around with our hardware */
+	if (pci_request_regions (pdev, "tulip"))
+		goto err_out_free_netdev;
+
+#ifndef USE_IO_OPS
+	ioaddr = (unsigned long) ioremap (pci_resource_start (pdev, 1),
+					  tulip_tbl[chip_idx].io_size);
+	if (!ioaddr)
+		goto err_out_free_res;
+#endif
+
+	pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
+
+	/*
+	 * initialize private data structure 'tp'
+	 * it is zeroed and aligned in alloc_etherdev
+	 */
+	tp = rtdev->priv;
+
+	tp->rx_ring = pci_alloc_consistent(pdev,
+					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+					   &tp->rx_ring_dma);
+	if (!tp->rx_ring)
+		goto err_out_mtable;
+	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
+	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
+
+	tp->chip_id = chip_idx;
+	tp->flags = tulip_tbl[chip_idx].flags;
+	tp->pdev = pdev;
+	tp->base_addr = ioaddr;
+	tp->revision = chip_rev;
+	tp->csr0 = csr0;
+	rtdm_lock_init(&tp->lock);
+	spin_lock_init(&tp->mii_lock);
+
+	rtdev->base_addr = ioaddr;
+	rtdev->irq = irq;
+
+#ifdef XXX_CONFIG_TULIP_MWI
+	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+		tulip_mwi_config (pdev, rtdev);
+#else
+	/* MWI is broken for DC21143 rev 65... */
+	if (chip_idx == DC21143 && chip_rev == 65)
+		tp->csr0 &= ~MWI;
+#endif
+
+	/* Stop the chip's Tx and Rx processes. */
+	tulip_stop_rxtx(tp);
+
+	pci_set_master(pdev);
+
+	/* Clear the missed-packet counter. */
+	inl(ioaddr + CSR8);
+
+	if (chip_idx == DC21041) {
+		if (inl(ioaddr + CSR9) & 0x8000) {
+			chip_idx = DC21040;
+			t2104x_mode = 1;
+		} else {
+			t2104x_mode = 2;
+		}
+	}
+
+	/* The station address ROM is read byte serially.  The register must
+	   be polled, waiting for the value to be read bit serially from the
+	   EEPROM.
+	   */
+	sum = 0;
+	if (chip_idx == DC21040) {
+		outl(0, ioaddr + CSR9);		/* Reset the pointer with a dummy write. */
+		for (i = 0; i < 6; i++) {
+			int value, boguscnt = 100000;
+			do
+				value = inl(ioaddr + CSR9);
+			while (value < 0  && --boguscnt > 0);
+			rtdev->dev_addr[i] = value;
+			sum += value & 0xff;
+		}
+	} else if (chip_idx == LC82C168) {
+		for (i = 0; i < 3; i++) {
+			int value, boguscnt = 100000;
+			outl(0x600 | i, ioaddr + 0x98);
+			do
+				value = inl(ioaddr + CSR9);
+			while (value < 0  && --boguscnt > 0);
+			put_unaligned(le16_to_cpu(value), ((u16*)rtdev->dev_addr) + i);
+			sum += value & 0xffff;
+		}
+	} else if (chip_idx == COMET) {
+		/* No need to read the EEPROM. */
+		put_unaligned(inl(ioaddr + 0xA4), (u32 *)rtdev->dev_addr);
+		put_unaligned(inl(ioaddr + 0xA8), (u16 *)(rtdev->dev_addr + 4));
+		for (i = 0; i < 6; i ++)
+			sum += rtdev->dev_addr[i];
+	} else {
+		/* A serial EEPROM interface, we read now and sort it out later. */
+		int sa_offset = 0;
+		int ee_addr_size = tulip_read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
+
+		for (i = 0; i < sizeof(ee_data)/2; i++)
+			((u16 *)ee_data)[i] =
+				le16_to_cpu(tulip_read_eeprom(ioaddr, i, ee_addr_size));
+
+		/* DEC now has a specification (see Notes) but early board makers
+		   just put the address in the first EEPROM locations. */
+		/* This does  memcmp(eedata, eedata+16, 8) */
+		for (i = 0; i < 8; i ++)
+			if (ee_data[i] != ee_data[16+i])
+				sa_offset = 20;
+		if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&  ee_data[2] == 0)
+			sa_offset = 2;		/* Grrr, damn Matrox boards. */
+#ifdef CONFIG_DDB5476
+		if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 6)) {
+			/* DDB5476 MAC address in first EEPROM locations. */
+		       sa_offset = 0;
+		       /* No media table either */
+		       tp->flags &= ~HAS_MEDIA_TABLE;
+	       }
+#endif
+#ifdef CONFIG_DDB5477
+	       if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
+		       /* DDB5477 MAC address in first EEPROM locations. */
+		       sa_offset = 0;
+		       /* No media table either */
+		       tp->flags &= ~HAS_MEDIA_TABLE;
+	       }
+#endif
+#ifdef CONFIG_MIPS_COBALT
+	       if ((pdev->bus->number == 0) &&
+		   ((PCI_SLOT(pdev->devfn) == 7) ||
+		    (PCI_SLOT(pdev->devfn) == 12))) {
+		       /* Cobalt MAC address in first EEPROM locations. */
+		       sa_offset = 0;
+		       /* No media table either */
+		       tp->flags &= ~HAS_MEDIA_TABLE;
+	       }
+#endif
+		for (i = 0; i < 6; i ++) {
+			rtdev->dev_addr[i] = ee_data[i + sa_offset];
+			sum += ee_data[i + sa_offset];
+		}
+	}
+	/* Lite-On boards have the address byte-swapped. */
+	if ((rtdev->dev_addr[0] == 0xA0  ||  rtdev->dev_addr[0] == 0xC0)
+		&&  rtdev->dev_addr[1] == 0x00)
+		for (i = 0; i < 6; i+=2) {
+			char tmp = rtdev->dev_addr[i];
+			rtdev->dev_addr[i] = rtdev->dev_addr[i+1];
+			rtdev->dev_addr[i+1] = tmp;
+		}
+	/* On the Zynx 315 Etherarray and other multiport boards only the
+	   first Tulip has an EEPROM.
+	   On Sparc systems the mac address is held in the OBP property
+	   "local-mac-address".
+	   The addresses of the subsequent ports are derived from the first.
+	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+	   that here as well. */
+	if (sum == 0  || sum == 6*0xff) {
+#if defined(__sparc__)
+		struct pcidev_cookie *pcp = pdev->sysdata;
+#endif
+		eeprom_missing = 1;
+		for (i = 0; i < 5; i++)
+			rtdev->dev_addr[i] = last_phys_addr[i];
+		rtdev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__sparc__)
+		if ((pcp != NULL) && prom_getproplen(pcp->prom_node,
+			"local-mac-address") == 6) {
+			prom_getproperty(pcp->prom_node, "local-mac-address",
+			    rtdev->dev_addr, 6);
+		}
+#endif
+#if defined(__i386__)		/* Patch up x86 BIOS bug. */
+		if (last_irq)
+			irq = last_irq;
+#endif
+	}
+
+	for (i = 0; i < 6; i++)
+		last_phys_addr[i] = rtdev->dev_addr[i];
+#if defined(__i386__)
+	last_irq = irq;
+#endif
+
+	/* The lower four bits are the media type. */
+	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
+		/* Somehow required for this RTnet version, don't ask me why... */
+		if (!options[board_idx])
+			tp->default_port = 11; /*MII*/
+		/*RTnet*/
+
+		if (options[board_idx] & MEDIA_MASK)
+			tp->default_port = options[board_idx] & MEDIA_MASK;
+		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
+			tp->full_duplex = 1;
+		if (mtu[board_idx] > 0)
+			rtdev->mtu = mtu[board_idx];
+	}
+	if (rtdev->mem_start & MEDIA_MASK)
+		tp->default_port = rtdev->mem_start & MEDIA_MASK;
+	if (tp->default_port) {
+		printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
+		       board_idx, medianame[tp->default_port & MEDIA_MASK]);
+		tp->medialock = 1;
+		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
+			tp->full_duplex = 1;
+	}
+	if (tp->full_duplex)
+		tp->full_duplex_lock = 1;
+
+	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
+		u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+		tp->mii_advertise = media2advert[tp->default_port - 9];
+		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+	}
+
+	if (tp->flags & HAS_MEDIA_TABLE) {
+		memcpy(tp->eeprom, ee_data, sizeof(tp->eeprom));
+
+		sprintf(rtdev->name, "tulip%d", board_idx);	/* hack */
+		tulip_parse_eeprom(rtdev);
+		strcpy(rtdev->name, "rteth%d");			/* un-hack */
+	}
+
+	if ((tp->flags & ALWAYS_CHECK_MII) ||
+		(tp->mtable  &&  tp->mtable->has_mii) ||
+		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
+		if (tp->mtable  &&  tp->mtable->has_mii) {
+			for (i = 0; i < tp->mtable->leafcount; i++)
+				if (tp->mtable->mleaf[i].media == 11) {
+					tp->cur_index = i;
+					tp->saved_if_port = rtdev->if_port;
+					tulip_select_media(rtdev, 2);
+					rtdev->if_port = tp->saved_if_port;
+					break;
+				}
+		}
+
+		/* Find the connected MII xcvrs.
+		   Doing this in open() would allow detecting external xcvrs
+		   later, but takes much time. */
+		tulip_find_mii (rtdev, board_idx);
+	}
+
+	rtdev->open = tulip_open;
+	rtdev->stop = tulip_close;
+	rtdev->hard_header = rt_eth_header;
+	rtdev->hard_start_xmit = tulip_start_xmit;
+	rtdev->get_stats = tulip_get_stats;
+
+	if (/*RTnet*/rt_register_rtnetdev(rtdev)) {
+		goto err_out_free_ring;
+	}
+
+	printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+	       rtdev->name, tulip_tbl[chip_idx].chip_name, chip_rev, ioaddr);
+	pci_set_drvdata(pdev, rtdev);
+
+	if (t2104x_mode == 1)
+		printk(" 21040 compatible mode,");
+	else if (t2104x_mode == 2)
+		printk(" 21041 mode,");
+	if (eeprom_missing)
+		printk(" EEPROM not present,");
+	for (i = 0; i < 6; i++)
+		printk("%c%2.2X", i ? ':' : ' ', rtdev->dev_addr[i]);
+	printk(", IRQ %d.\n", irq);
+
+/*RTnet
+	if (tp->chip_id == PNIC2)
+		tp->link_change = pnic2_lnk_change;
+	else if ((tp->flags & HAS_NWAY)  || tp->chip_id == DC21041)
+		tp->link_change = t21142_lnk_change;
+	else if (tp->flags & HAS_PNICNWAY)
+		tp->link_change = pnic_lnk_change;
+ *RTnet*/
+ tp->link_change = NULL;
+
+	/* Reset the xcvr interface and turn on heartbeat. */
+	switch (chip_idx) {
+	case DC21041:
+		if (tp->sym_advertise == 0)
+			tp->sym_advertise = 0x0061;
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+		outl(inl(ioaddr + CSR6) | csr6_fd, ioaddr + CSR6);
+		outl(0x0000EF01, ioaddr + CSR13);
+		break;
+	case DC21040:
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0x00000004, ioaddr + CSR13);
+		break;
+	case DC21140:
+	case DM910X:
+	default:
+		if (tp->mtable)
+			outl(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+		break;
+	case DC21142:
+		if (tp->mii_cnt  ||  tulip_media_cap[rtdev->if_port] & MediaIsMII) {
+			outl(csr6_mask_defstate, ioaddr + CSR6);
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+			outl(csr6_mask_hdcap, ioaddr + CSR6);
+		} else
+			t21142_start_nway(rtdev);
+		break;
+	case PNIC2:
+		/* just do a reset for sanity sake */
+		outl(0x0000, ioaddr + CSR13);
+		outl(0x0000, ioaddr + CSR14);
+		break;
+	case LC82C168:
+		if ( ! tp->mii_cnt) {
+			tp->nway = 1;
+			tp->nwayset = 0;
+			outl(csr6_ttm | csr6_ca, ioaddr + CSR6);
+			outl(0x30, ioaddr + CSR12);
+			outl(0x0001F078, ioaddr + CSR6);
+			outl(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
+		}
+		break;
+	case MX98713:
+	case COMPEX9881:
+		outl(0x00000000, ioaddr + CSR6);
+		outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+		outl(0x00000001, ioaddr + CSR13);
+		break;
+	case MX98715:
+	case MX98725:
+		outl(0x01a80000, ioaddr + CSR6);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00001000, ioaddr + CSR12);
+		break;
+	case COMET:
+		/* No initialization necessary. */
+		break;
+	}
+
+	/* put the chip in snooze mode until opened */
+	tulip_set_power_state (tp, 0, 1);
+
+	return 0;
+
+err_out_free_ring:
+	pci_free_consistent (pdev,
+			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+			     tp->rx_ring, tp->rx_ring_dma);
+
+err_out_mtable:
+	if (tp->mtable)
+		kfree (tp->mtable);
+#ifndef USE_IO_OPS
+	iounmap((void *)ioaddr);
+
+err_out_free_res:
+#endif
+	pci_release_regions (pdev);
+
+err_out_free_netdev:
+	/*RTnet*/rtdev_free (rtdev);
+	return -ENODEV;
+}
+
+
+static void tulip_remove_one (struct pci_dev *pdev)
+{
+	struct rtnet_device *rtdev = (struct rtnet_device *) pci_get_drvdata (pdev);
+	struct tulip_private *tp;
+
+	if (!rtdev || !rtdev->priv)
+		return;
+
+	tp = rtdev->priv;
+	pci_free_consistent (pdev,
+			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+			     tp->rx_ring, tp->rx_ring_dma);
+	rt_unregister_rtnetdev (rtdev);
+	if (tp->mtable)
+		kfree (tp->mtable);
+#ifndef USE_IO_OPS
+	iounmap((void *)rtdev->base_addr);
+#endif
+	/*RTnet*/
+	rt_rtdev_disconnect(rtdev);
+	rtdev_free (rtdev);
+	/*RTnet*/
+	pci_release_regions (pdev);
+	pci_set_drvdata (pdev, NULL);
+
+	/* pci_power_off (pdev, -1); */
+}
+
+
+static struct pci_driver tulip_driver = {
+	name:		DRV_NAME,
+	id_table:	tulip_pci_tbl,
+	probe:		tulip_init_one,
+	remove:		tulip_remove_one,
+};
+
+
+static int __init tulip_init (void)
+{
+#ifdef MODULE
+	printk(KERN_INFO "%s", version);
+#endif
+
+	/* copy module parms into globals */
+	tulip_rx_copybreak = rx_copybreak;
+	tulip_max_interrupt_work = max_interrupt_work;
+
+	/* probe for and init boards */
+	return pci_register_driver (&tulip_driver);
+}
+
+
+static void __exit tulip_cleanup (void)
+{
+	pci_unregister_driver (&tulip_driver);
+}
+
+
+module_init(tulip_init);
+module_exit(tulip_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/tulip/pnic.c	2022-03-21 12:58:29.657886348 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/media.c	1970-01-01 01:00:00.000000000 +0100
+/*
+	drivers/net/tulip/pnic.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include <linux/kernel.h>
+#include "tulip.h"
+
+
+void pnic_do_nway(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	u32 phy_reg = inl(ioaddr + 0xB8);
+	u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+	if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+		if (phy_reg & 0x20000000)		rtdev->if_port = 5;
+		else if (phy_reg & 0x40000000)	rtdev->if_port = 3;
+		else if (phy_reg & 0x10000000)	rtdev->if_port = 4;
+		else if (phy_reg & 0x08000000)	rtdev->if_port = 0;
+		tp->nwayset = 1;
+		new_csr6 = (rtdev->if_port & 1) ? 0x01860000 : 0x00420000;
+		outl(0x32 | (rtdev->if_port & 1), ioaddr + CSR12);
+		if (rtdev->if_port & 1)
+			outl(0x1F868, ioaddr + 0xB8);
+		if (phy_reg & 0x30000000) {
+			tp->full_duplex = 1;
+			new_csr6 |= 0x00000200;
+		}
+		if (tulip_debug > 1)
+			/*RTnet*/printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+				   rtdev->name, phy_reg, medianame[rtdev->if_port]);
+		if (tp->csr6 != new_csr6) {
+			tp->csr6 = new_csr6;
+			/* Restart Tx */
+			tulip_restart_rxtx(tp);
+		}
+	}
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/tulip/media.c	2022-03-21 12:58:29.651886407 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+	drivers/net/tulip/media.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "tulip.h"
+
+
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+   to support a pre-NWay full-duplex signaling mechanism using short frames.
+   No one knows what it should be, but if left at its default value some
+   10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC	0x6969
+
+/* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+   MDIO protocol.  It is just different enough from the EEPROM protocol
+   to not share code.  The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK		0x10000
+#define MDIO_DATA_WRITE0	0x00000
+#define MDIO_DATA_WRITE1	0x20000
+#define MDIO_ENB		0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN		0x40000
+#define MDIO_DATA_READ		0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+	0xB4, 0xB8, 0xBC, 0xC0,  0xC4, 0xC8, 0xCC, 0,  0,0,0,0,  0,0,0,0,
+	0,0xD0,0,0,  0,0,0,0,  0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+
+/* MII transceiver control section.
+   Read and write the MII registers using software-generated serial
+   MDIO protocol.  See the MII specifications or DP83840A data sheet
+   for details. */
+
+int tulip_mdio_read(struct rtnet_device *rtdev, int phy_id, int location)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int i;
+	int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+	int retval = 0;
+	long ioaddr = rtdev->base_addr;
+	long mdio_addr = ioaddr + CSR9;
+	unsigned long flags;
+
+	if (location & ~0x1f)
+		return 0xffff;
+
+	if (tp->chip_id == COMET  &&  phy_id == 30) {
+		if (comet_miireg2offset[location])
+			return inl(ioaddr + comet_miireg2offset[location]);
+		return 0xffff;
+	}
+
+	spin_lock_irqsave(&tp->mii_lock, flags);
+	if (tp->chip_id == LC82C168) {
+		int i = 1000;
+		outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		while (--i > 0) {
+			barrier();
+			if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+				break;
+		}
+		spin_unlock_irqrestore(&tp->mii_lock, flags);
+		return retval & 0xffff;
+	}
+
+	/* Establish sync by sending at least 32 logic ones. */
+	for (i = 32; i >= 0; i--) {
+		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+		outl(MDIO_ENB | dataval, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		outl(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+
+	spin_unlock_irqrestore(&tp->mii_lock, flags);
+	return (retval>>1) & 0xffff;
+}
+
+void tulip_mdio_write(struct rtnet_device *rtdev, int phy_id, int location, int val)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int i;
+	int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff);
+	long ioaddr = rtdev->base_addr;
+	long mdio_addr = ioaddr + CSR9;
+	unsigned long flags;
+
+	if (location & ~0x1f)
+		return;
+
+	if (tp->chip_id == COMET && phy_id == 30) {
+		if (comet_miireg2offset[location])
+			outl(val, ioaddr + comet_miireg2offset[location]);
+		return;
+	}
+
+	spin_lock_irqsave(&tp->mii_lock, flags);
+	if (tp->chip_id == LC82C168) {
+		int i = 1000;
+		outl(cmd, ioaddr + 0xA0);
+		do {
+			barrier();
+			if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+				break;
+		} while (--i > 0);
+		spin_unlock_irqrestore(&tp->mii_lock, flags);
+		return;
+	}
+
+	/* Establish sync by sending 32 logic ones. */
+	for (i = 32; i >= 0; i--) {
+		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+		outl(MDIO_ENB | dataval, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		outl(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+
+	spin_unlock_irqrestore(&tp->mii_lock, flags);
+}
+
+
+/* Set up the transceiver control registers for the selected media type. */
+void tulip_select_media(struct rtnet_device *rtdev, int startup)
+{
+	long ioaddr = rtdev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	struct mediatable *mtable = tp->mtable;
+	u32 new_csr6;
+	int i;
+
+	if (mtable) {
+		struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+		unsigned char *p = mleaf->leafdata;
+		switch (mleaf->type) {
+		case 0:					/* 21140 non-MII xcvr. */
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+					   " with control setting %2.2x.\n",
+					   rtdev->name, p[1]);
+			rtdev->if_port = p[0];
+			if (startup)
+				outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+			outl(p[1], ioaddr + CSR12);
+			new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+			break;
+		case 2: case 4: {
+			u16 setup[5];
+			u32 csr13val, csr14val, csr15dir, csr15val;
+			for (i = 0; i < 5; i++)
+				setup[i] = get_u16(&p[i*2 + 1]);
+
+			rtdev->if_port = p[0] & MEDIA_MASK;
+			if (tulip_media_cap[rtdev->if_port] & MediaAlwaysFD)
+				tp->full_duplex = 1;
+
+			if (startup && mtable->has_reset) {
+				struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+				unsigned char *rst = rleaf->leafdata;
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+						   rtdev->name);
+				for (i = 0; i < rst[0]; i++)
+					outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+			}
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+					   "%4.4x/%4.4x.\n",
+					   rtdev->name, medianame[rtdev->if_port], setup[0], setup[1]);
+			if (p[0] & 0x40) {	/* SIA (CSR13-15) setup values are provided. */
+				csr13val = setup[0];
+				csr14val = setup[1];
+				csr15dir = (setup[3]<<16) | setup[2];
+				csr15val = (setup[4]<<16) | setup[2];
+				outl(0, ioaddr + CSR13);
+				outl(csr14val, ioaddr + CSR14);
+				outl(csr15dir, ioaddr + CSR15);	/* Direction */
+				outl(csr15val, ioaddr + CSR15);	/* Data */
+				outl(csr13val, ioaddr + CSR13);
+			} else {
+				csr13val = 1;
+				csr14val = 0;
+				csr15dir = (setup[0]<<16) | 0x0008;
+				csr15val = (setup[1]<<16) | 0x0008;
+				if (rtdev->if_port <= 4)
+					csr14val = t21142_csr14[rtdev->if_port];
+				if (startup) {
+					outl(0, ioaddr + CSR13);
+					outl(csr14val, ioaddr + CSR14);
+				}
+				outl(csr15dir, ioaddr + CSR15);	/* Direction */
+				outl(csr15val, ioaddr + CSR15);	/* Data */
+				if (startup) outl(csr13val, ioaddr + CSR13);
+			}
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_DEBUG "%s:  Setting CSR15 to %8.8x/%8.8x.\n",
+					   rtdev->name, csr15dir, csr15val);
+			if (mleaf->type == 4)
+				new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
+			else
+				new_csr6 = 0x82420000;
+			break;
+		}
+		case 1: case 3: {
+			int phy_num = p[0];
+			int init_length = p[1];
+			u16 *misc_info, tmp_info;
+
+			rtdev->if_port = 11;
+			new_csr6 = 0x020E0000;
+			if (mleaf->type == 3) {	/* 21142 */
+				u16 *init_sequence = (u16*)(p+2);
+				u16 *reset_sequence = &((u16*)(p+3))[init_length];
+				int reset_length = p[2 + init_length*2];
+				misc_info = reset_sequence + reset_length;
+				if (startup)
+					for (i = 0; i < reset_length; i++)
+						outl(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+				for (i = 0; i < init_length; i++)
+					outl(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+			} else {
+				u8 *init_sequence = p + 2;
+				u8 *reset_sequence = p + 3 + init_length;
+				int reset_length = p[2 + init_length];
+				misc_info = (u16*)(reset_sequence + reset_length);
+				if (startup) {
+					outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+					for (i = 0; i < reset_length; i++)
+						outl(reset_sequence[i], ioaddr + CSR12);
+				}
+				for (i = 0; i < init_length; i++)
+					outl(init_sequence[i], ioaddr + CSR12);
+			}
+			tmp_info = get_u16(&misc_info[1]);
+			if (tmp_info)
+				tp->advertising[phy_num] = tmp_info | 1;
+			if (tmp_info && startup < 2) {
+				if (tp->mii_advertise == 0)
+					tp->mii_advertise = tp->advertising[phy_num];
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s:  Advertising %4.4x on MII %d.\n",
+					       rtdev->name, tp->mii_advertise, tp->phys[phy_num]);
+				tulip_mdio_write(rtdev, tp->phys[phy_num], 4, tp->mii_advertise);
+			}
+			break;
+		}
+		case 5: case 6: {
+			u16 setup[5];
+
+			new_csr6 = 0; /* FIXME */
+
+			for (i = 0; i < 5; i++)
+				setup[i] = get_u16(&p[i*2 + 1]);
+
+			if (startup && mtable->has_reset) {
+				struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+				unsigned char *rst = rleaf->leafdata;
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+						   rtdev->name);
+				for (i = 0; i < rst[0]; i++)
+					outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+			}
+
+			break;
+		}
+		default:
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s:  Invalid media table selection %d.\n",
+					   rtdev->name, mleaf->type);
+			new_csr6 = 0x020E0000;
+		}
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+				   rtdev->name, medianame[rtdev->if_port],
+				   inl(ioaddr + CSR12) & 0xff);
+	} else if (tp->chip_id == DC21041) {
+		int port = rtdev->if_port <= 4 ? rtdev->if_port : 0;
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+				   rtdev->name, medianame[port == 3 ? 12: port],
+				   inl(ioaddr + CSR12));
+		outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+		outl(t21041_csr14[port], ioaddr + CSR14);
+		outl(t21041_csr15[port], ioaddr + CSR15);
+		outl(t21041_csr13[port], ioaddr + CSR13);
+		new_csr6 = 0x80020000;
+	} else if (tp->chip_id == LC82C168) {
+		if (startup && ! tp->medialock)
+			rtdev->if_port = tp->mii_cnt ? 11 : 0;
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+				   rtdev->name, inl(ioaddr + 0xB8), medianame[rtdev->if_port]);
+		if (tp->mii_cnt) {
+			new_csr6 = 0x810C0000;
+			outl(0x0001, ioaddr + CSR15);
+			outl(0x0201B07A, ioaddr + 0xB8);
+		} else if (startup) {
+			/* Start with 10mbps to do autonegotiation. */
+			outl(0x32, ioaddr + CSR12);
+			new_csr6 = 0x00420000;
+			outl(0x0001B078, ioaddr + 0xB8);
+			outl(0x0201B078, ioaddr + 0xB8);
+		} else if (rtdev->if_port == 3  ||  rtdev->if_port == 5) {
+			outl(0x33, ioaddr + CSR12);
+			new_csr6 = 0x01860000;
+			/* Trigger autonegotiation. */
+			outl(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+		} else {
+			outl(0x32, ioaddr + CSR12);
+			new_csr6 = 0x00420000;
+			outl(0x1F078, ioaddr + 0xB8);
+		}
+	} else if (tp->chip_id == DC21040) {					/* 21040 */
+		/* Turn on the xcvr interface. */
+		int csr12 = inl(ioaddr + CSR12);
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+				   rtdev->name, medianame[rtdev->if_port], csr12);
+		if (tulip_media_cap[rtdev->if_port] & MediaAlwaysFD)
+			tp->full_duplex = 1;
+		new_csr6 = 0x20000;
+		/* Set the full duplux match frame. */
+		outl(FULL_DUPLEX_MAGIC, ioaddr + CSR11);
+		outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+		if (t21040_csr13[rtdev->if_port] & 8) {
+			outl(0x0705, ioaddr + CSR14);
+			outl(0x0006, ioaddr + CSR15);
+		} else {
+			outl(0xffff, ioaddr + CSR14);
+			outl(0x0000, ioaddr + CSR15);
+		}
+		outl(0x8f01 | t21040_csr13[rtdev->if_port], ioaddr + CSR13);
+	} else {					/* Unknown chip type with no media table. */
+		if (tp->default_port == 0)
+			rtdev->if_port = tp->mii_cnt ? 11 : 3;
+		if (tulip_media_cap[rtdev->if_port] & MediaIsMII) {
+			new_csr6 = 0x020E0000;
+		} else if (tulip_media_cap[rtdev->if_port] & MediaIsFx) {
+			new_csr6 = 0x02860000;
+		} else
+			new_csr6 = 0x03860000;
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: No media description table, assuming "
+				   "%s transceiver, CSR12 %2.2x.\n",
+				   rtdev->name, medianame[rtdev->if_port],
+				   inl(ioaddr + CSR12));
+	}
+
+	tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
+
+	mdelay(1);
+
+	return;
+}
+
+/*
+  Check the MII negotiated duplex and change the CSR6 setting if
+  required.
+  Return 0 if everything is OK.
+  Return < 0 if the transceiver is missing or has no link beat.
+  */
+int tulip_check_duplex(struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = rtdev->priv;
+	unsigned int bmsr, lpa, negotiated, new_csr6;
+
+	bmsr = tulip_mdio_read(rtdev, tp->phys[0], MII_BMSR);
+	lpa = tulip_mdio_read(rtdev, tp->phys[0], MII_LPA);
+	if (tulip_debug > 1)
+		/*RTnet*/rtdm_printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
+			   "%4.4x.\n", rtdev->name, bmsr, lpa);
+	if (bmsr == 0xffff)
+		return -2;
+	if ((bmsr & BMSR_LSTATUS) == 0) {
+		int new_bmsr = tulip_mdio_read(rtdev, tp->phys[0], MII_BMSR);
+		if ((new_bmsr & BMSR_LSTATUS) == 0) {
+			if (tulip_debug  > 1)
+				/*RTnet*/rtdm_printk(KERN_INFO "%s: No link beat on the MII interface,"
+					   " status %4.4x.\n", rtdev->name, new_bmsr);
+			return -1;
+		}
+	}
+	negotiated = lpa & tp->advertising[0];
+	tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated);
+
+	new_csr6 = tp->csr6;
+
+	if (negotiated & LPA_100) new_csr6 &= ~TxThreshold;
+	else			  new_csr6 |= TxThreshold;
+	if (tp->full_duplex) new_csr6 |= FullDuplex;
+	else		     new_csr6 &= ~FullDuplex;
+
+	if (new_csr6 != tp->csr6) {
+		tp->csr6 = new_csr6;
+		tulip_restart_rxtx(tp);
+
+		if (tulip_debug > 0)
+			/*RTnet*/rtdm_printk(KERN_INFO "%s: Setting %s-duplex based on MII"
+				   "#%d link partner capability of %4.4x.\n",
+				   rtdev->name, tp->full_duplex ? "full" : "half",
+				   tp->phys[0], lpa);
+		return 1;
+	}
+
+	return 0;
+}
+
+void tulip_find_mii (struct rtnet_device *rtdev, int board_idx)
+{
+	struct tulip_private *tp = rtdev->priv;
+	int phyn, phy_idx = 0;
+	int mii_reg0;
+	int mii_advert;
+	unsigned int to_advert, new_bmcr, ane_switch;
+
+	/* Find the connected MII xcvrs.
+	   Doing this in open() would allow detecting external xcvrs later,
+	   but takes much time. */
+	for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+		int phy = phyn & 0x1f;
+		int mii_status = tulip_mdio_read (rtdev, phy, MII_BMSR);
+		if ((mii_status & 0x8301) == 0x8001 ||
+		    ((mii_status & BMSR_100BASE4) == 0
+		     && (mii_status & 0x7800) != 0)) {
+			/* preserve Becker logic, gain indentation level */
+		} else {
+			continue;
+		}
+
+		mii_reg0 = tulip_mdio_read (rtdev, phy, MII_BMCR);
+		mii_advert = tulip_mdio_read (rtdev, phy, MII_ADVERTISE);
+		ane_switch = 0;
+
+		/* if not advertising at all, gen an
+		 * advertising value from the capability
+		 * bits in BMSR
+		 */
+		if ((mii_advert & ADVERTISE_ALL) == 0) {
+			unsigned int tmpadv = tulip_mdio_read (rtdev, phy, MII_BMSR);
+			mii_advert = ((tmpadv >> 6) & 0x3e0) | 1;
+		}
+
+		if (tp->mii_advertise) {
+			tp->advertising[phy_idx] =
+			to_advert = tp->mii_advertise;
+		} else if (tp->advertising[phy_idx]) {
+			to_advert = tp->advertising[phy_idx];
+		} else {
+			tp->advertising[phy_idx] =
+			tp->mii_advertise =
+			to_advert = mii_advert;
+		}
+
+		tp->phys[phy_idx++] = phy;
+
+		/*RTnet*/rtdm_printk(KERN_INFO "tulip%d:  MII transceiver #%d "
+			"config %4.4x status %4.4x advertising %4.4x.\n",
+			board_idx, phy, mii_reg0, mii_status, mii_advert);
+
+		/* Fixup for DLink with miswired PHY. */
+		if (mii_advert != to_advert) {
+			/*RTnet*/rtdm_printk(KERN_DEBUG "tulip%d:  Advertising %4.4x on PHY %d,"
+				" previously advertising %4.4x.\n",
+				board_idx, to_advert, phy, mii_advert);
+			tulip_mdio_write (rtdev, phy, 4, to_advert);
+		}
+
+		/* Enable autonegotiation: some boards default to off. */
+		if (tp->default_port == 0) {
+			new_bmcr = mii_reg0 | BMCR_ANENABLE;
+			if (new_bmcr != mii_reg0) {
+				new_bmcr |= BMCR_ANRESTART;
+				ane_switch = 1;
+			}
+		}
+		/* ...or disable nway, if forcing media */
+		else {
+			new_bmcr = mii_reg0 & ~BMCR_ANENABLE;
+			if (new_bmcr != mii_reg0)
+				ane_switch = 1;
+		}
+
+		/* clear out bits we never want at this point */
+		new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE |
+			      BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK |
+			      BMCR_RESET);
+
+		if (tp->full_duplex)
+			new_bmcr |= BMCR_FULLDPLX;
+		if (tulip_media_cap[tp->default_port] & MediaIs100)
+			new_bmcr |= BMCR_SPEED100;
+
+		if (new_bmcr != mii_reg0) {
+			/* some phys need the ANE switch to
+			 * happen before forced media settings
+			 * will "take."  However, we write the
+			 * same value twice in order not to
+			 * confuse the sane phys.
+			 */
+			if (ane_switch) {
+				tulip_mdio_write (rtdev, phy, MII_BMCR, new_bmcr);
+				udelay (10);
+			}
+			tulip_mdio_write (rtdev, phy, MII_BMCR, new_bmcr);
+		}
+	}
+	tp->mii_cnt = phy_idx;
+	if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+		/*RTnet*/rtdm_printk(KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+			board_idx);
+		tp->phys[0] = 1;
+	}
+}
+++ linux-patched/drivers/xenomai/net/drivers/tulip/Makefile	2022-03-21 12:58:29.645886465 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/pnic2.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_TULIP) += rt_tulip.o
+
+rt_tulip-y := \
+	tulip_core.o \
+	eeprom.o \
+	interrupt.o \
+	media.o \
+	21142.o \
+	pnic.o \
+	pnic2.o
+++ linux-patched/drivers/xenomai/net/drivers/tulip/pnic2.c	2022-03-21 12:58:29.640886514 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/tulip.h	1970-01-01 01:00:00.000000000 +0100
+/*
+	drivers/net/tulip/pnic2.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+        Modified to hep support PNIC_II by Kevin B. Hendricks
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+
+/* Understanding the PNIC_II - everything is this file is based
+ * on the PNIC_II_PDF datasheet which is sorely lacking in detail
+ *
+ * As I understand things, here are the registers and bits that
+ * explain the masks and constants used in this file that are
+ * either different from the 21142/3 or important for basic operation.
+ *
+ *
+ * CSR 6  (mask = 0xfe3bd1fd of bits not to change)
+ * -----
+ * Bit 24    - SCR
+ * Bit 23    - PCS
+ * Bit 22    - TTM (Trasmit Threshold Mode)
+ * Bit 18    - Port Select
+ * Bit 13    - Start - 1, Stop - 0 Transmissions
+ * Bit 11:10 - Loop Back Operation Mode
+ * Bit 9     - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
+ * Bit 1     - Start - 1, Stop - 0 Receive
+ *
+ *
+ * CSR 14  (mask = 0xfff0ee39 of bits not to change)
+ * ------
+ * Bit 19    - PAUSE-Pause
+ * Bit 18    - Advertise T4
+ * Bit 17    - Advertise 100baseTx-FD
+ * Bit 16    - Advertise 100baseTx-HD
+ * Bit 12    - LTE - Link Test Enable
+ * Bit 7     - ANE - Auto Negotiate Enable
+ * Bit 6     - HDE - Advertise 10baseT-HD
+ * Bit 2     - Reset to Power down - kept as 1 for normal operation
+ * Bit 1     -  Loop Back enable for 10baseT MCC
+ *
+ *
+ * CSR 12
+ * ------
+ * Bit 25    - Partner can do T4
+ * Bit 24    - Partner can do 100baseTx-FD
+ * Bit 23    - Partner can do 100baseTx-HD
+ * Bit 22    - Partner can do 10baseT-FD
+ * Bit 21    - Partner can do 10baseT-HD
+ * Bit 15    - LPN is 1 if all above bits are valid other wise 0
+ * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
+ * Bit 3     - Autopolarity state
+ * Bit 2     - LS10B - link state of 10baseT 0 - good, 1 - failed
+ * Bit 1     - LS100B - link state of 100baseT 0 - good, 1- faild
+ *
+ *
+ * Data Port Selection Info
+ *-------------------------
+ *
+ * CSR14<7>   CSR6<18>    CSR6<22>    CSR6<23>    CSR6<24>   MODE/PORT
+ *   1           0           0 (X)       0 (X)       1        NWAY
+ *   0           0           1           0 (X)       0        10baseT
+ *   0           1           0           1           1 (X)    100baseT
+ *
+ *
+ */
+
+
+
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+
+void pnic2_start_nway(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+        int csr14;
+        int csr12;
+
+        /* set up what to advertise during the negotiation */
+
+        /* load in csr14  and mask off bits not to touch
+         * comment at top of file explains mask value
+         */
+	csr14 = (inl(ioaddr + CSR14) & 0xfff0ee39);
+
+        /* bit 17 - advetise 100baseTx-FD */
+        if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
+
+        /* bit 16 - advertise 100baseTx-HD */
+        if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
+
+        /* bit 6 - advertise 10baseT-HD */
+        if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
+
+        /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
+         * and bit 0 Don't PowerDown 10baseT
+         */
+        csr14 |= 0x00001184;
+
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
+                      "csr14=%8.8x.\n", rtdev->name, csr14);
+
+        /* tell pnic2_lnk_change we are doing an nway negotiation */
+	rtdev->if_port = 0;
+	tp->nway = tp->mediasense = 1;
+	tp->nwayset = tp->lpar = 0;
+
+        /* now we have to set up csr6 for NWAY state */
+
+	tp->csr6 = inl(ioaddr + CSR6);
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: On Entry to Nway, "
+                      "csr6=%8.8x.\n", rtdev->name, tp->csr6);
+
+        /* mask off any bits not to touch
+         * comment at top of file explains mask value
+         */
+	tp->csr6 = tp->csr6 & 0xfe3bd1fd;
+
+        /* don't forget that bit 9 is also used for advertising */
+        /* advertise 10baseT-FD for the negotiation (bit 9) */
+        if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
+
+        /* set bit 24 for nway negotiation mode ...
+         * see Data Port Selection comment at top of file
+         * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
+         */
+        tp->csr6 |= 0x01000000;
+	outl(csr14, ioaddr + CSR14);
+	outl(tp->csr6, ioaddr + CSR6);
+        udelay(100);
+
+        /* all set up so now force the negotiation to begin */
+
+        /* read in current values and mask off all but the
+	 * Autonegotiation bits 14:12.  Writing a 001 to those bits
+         * should start the autonegotiation
+         */
+        csr12 = (inl(ioaddr + CSR12) & 0xffff8fff);
+        csr12 |= 0x1000;
+	outl(csr12, ioaddr + CSR12);
+}
+
+
+++ linux-patched/drivers/xenomai/net/drivers/tulip/tulip.h	2022-03-21 12:58:29.634886572 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/tulip/eeprom.c	1970-01-01 01:00:00.000000000 +0100
+/*
+        drivers/net/tulip/tulip.h
+
+        Copyright 2000,2001  The Linux Kernel Team
+        Written/copyright 1994-2001 by Donald Becker.
+
+        This software may be used and distributed according to the terms
+        of the GNU General Public License, incorporated herein by reference.
+
+        Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+        for more information on this driver, or visit the project
+        Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#ifndef __NET_TULIP_H__
+#define __NET_TULIP_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <rtnet_port.h>
+
+
+
+/* undefine, or define to various debugging levels (>4 == obscene levels) */
+#define TULIP_DEBUG 1
+
+/* undefine USE_IO_OPS for MMIO, define for PIO */
+#ifdef CONFIG_TULIP_MMIO
+# undef USE_IO_OPS
+#else
+# define USE_IO_OPS 1
+#endif
+
+
+
+struct tulip_chip_table {
+        char *chip_name;
+        unsigned int io_size;
+        int valid_intrs;	/* CSR7 interrupt enable settings */
+        int flags;
+};
+
+
+enum tbl_flag {
+        HAS_MII			= 0x0001,
+        HAS_MEDIA_TABLE		= 0x0002,
+        CSR12_IN_SROM		= 0x0004,
+        ALWAYS_CHECK_MII	= 0x0008,
+        HAS_ACPI		= 0x0010,
+        MC_HASH_ONLY		= 0x0020, /* Hash-only multicast filter. */
+        HAS_PNICNWAY		= 0x0080,
+        HAS_NWAY		= 0x0040, /* Uses internal NWay xcvr. */
+        HAS_INTR_MITIGATION	= 0x0100,
+        IS_ASIX			= 0x0200,
+        HAS_8023X		= 0x0400,
+        COMET_MAC_ADDR		= 0x0800,
+        HAS_PCI_MWI		= 0x1000,
+};
+
+
+/* chip types.  careful!  order is VERY IMPORTANT here, as these
+ * are used throughout the driver as indices into arrays */
+/* Note 21142 == 21143. */
+enum chips {
+        DC21040 = 0,
+        DC21041 = 1,
+        DC21140 = 2,
+        DC21142 = 3, DC21143 = 3,
+        LC82C168,
+        MX98713,
+        MX98715,
+        MX98725,
+        AX88140,
+        PNIC2,
+        COMET,
+        COMPEX9881,
+        I21145,
+        DM910X,
+};
+
+
+enum MediaIs {
+        MediaIsFD = 1,
+        MediaAlwaysFD = 2,
+        MediaIsMII = 4,
+        MediaIsFx = 8,
+        MediaIs100 = 16
+};
+
+
+/* Offsets to the Command and Status Registers, "CSRs".  All accesses
+   must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+        CSR0 = 0,
+        CSR1 = 0x08,
+        CSR2 = 0x10,
+        CSR3 = 0x18,
+        CSR4 = 0x20,
+        CSR5 = 0x28,
+        CSR6 = 0x30,
+        CSR7 = 0x38,
+        CSR8 = 0x40,
+        CSR9 = 0x48,
+        CSR10 = 0x50,
+        CSR11 = 0x58,
+        CSR12 = 0x60,
+        CSR13 = 0x68,
+        CSR14 = 0x70,
+        CSR15 = 0x78,
+};
+
+/* register offset and bits for CFDD PCI config reg */
+enum pci_cfg_driver_reg {
+        CFDD = 0x40,
+        CFDD_Sleep = (1 << 31),
+        CFDD_Snooze = (1 << 30),
+};
+
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+        TimerInt = 0x800,
+        SytemError = 0x2000,
+        TPLnkFail = 0x1000,
+        TPLnkPass = 0x10,
+        NormalIntr = 0x10000,
+        AbnormalIntr = 0x8000,
+        RxJabber = 0x200,
+        RxDied = 0x100,
+        RxNoBuf = 0x80,
+        RxIntr = 0x40,
+        TxFIFOUnderflow = 0x20,
+        TxJabber = 0x08,
+        TxNoBuf = 0x04,
+        TxDied = 0x02,
+        TxIntr = 0x01,
+};
+
+
+enum tulip_mode_bits {
+        TxThreshold		= (1 << 22),
+        FullDuplex		= (1 << 9),
+        TxOn			= 0x2000,
+        AcceptBroadcast		= 0x0100,
+        AcceptAllMulticast	= 0x0080,
+        AcceptAllPhys		= 0x0040,
+        AcceptRunt		= 0x0008,
+        RxOn			= 0x0002,
+        RxTx			= (TxOn | RxOn),
+};
+
+
+enum tulip_busconfig_bits {
+        MWI			= (1 << 24),
+        MRL			= (1 << 23),
+        MRM			= (1 << 21),
+        CALShift		= 14,
+        BurstLenShift		= 8,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+        s32 status;
+        s32 length;
+        u32 buffer1;
+        u32 buffer2;
+};
+
+
+struct tulip_tx_desc {
+        s32 status;
+        s32 length;
+        u32 buffer1;
+        u32 buffer2;		/* We use only buffer 1.  */
+};
+
+
+enum desc_status_bits {
+        DescOwned = 0x80000000,
+        RxDescFatalErr = 0x8000,
+        RxWholePkt = 0x0300,
+};
+
+
+enum t21041_csr13_bits {
+        csr13_eng = (0xEF0<<4), /* for eng. purposes only, hardcode at EF0h */
+        csr13_aui = (1<<3), /* clear to force 10bT, set to force AUI/BNC */
+        csr13_cac = (1<<2), /* CSR13/14/15 autoconfiguration */
+        csr13_srl = (1<<0), /* When reset, resets all SIA functions, machines */
+
+        csr13_mask_auibnc = (csr13_eng | csr13_aui | csr13_srl),
+        csr13_mask_10bt = (csr13_eng | csr13_srl),
+};
+
+enum t21143_csr6_bits {
+        csr6_sc = (1<<31),
+        csr6_ra = (1<<30),
+        csr6_ign_dest_msb = (1<<26),
+        csr6_mbo = (1<<25),
+        csr6_scr = (1<<24),  /* scramble mode flag: can't be set */
+        csr6_pcs = (1<<23),  /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */
+        csr6_ttm = (1<<22),  /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */
+        csr6_sf = (1<<21),   /* Store and forward. If set ignores TR bits */
+        csr6_hbd = (1<<19),  /* Heart beat disable. Disables SQE function in 10baseT */
+        csr6_ps = (1<<18),   /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */
+        csr6_ca = (1<<17),   /* Collision Offset Enable. If set uses special algorithm in low collision situations */
+        csr6_trh = (1<<15),  /* Transmit Threshold high bit */
+        csr6_trl = (1<<14),  /* Transmit Threshold low bit */
+
+        /***************************************************************
+         * This table shows transmit threshold values based on media   *
+         * and these two registers (from PNIC1 & 2 docs) Note: this is *
+         * all meaningless if sf is set.                               *
+         ***************************************************************/
+
+        /***********************************
+         * (trh,trl) * 100BaseTX * 10BaseT *
+         ***********************************
+         *   (0,0)   *     128   *    72   *
+         *   (0,1)   *     256   *    96   *
+         *   (1,0)   *     512   *   128   *
+         *   (1,1)   *    1024   *   160   *
+         ***********************************/
+
+        csr6_fc = (1<<12),   /* Forces a collision in next transmission (for testing in loopback mode) */
+        csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
+        csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
+        /* set both and you get (PHY) loopback */
+        csr6_fd = (1<<9),    /* Full duplex mode, disables hearbeat, no loopback */
+        csr6_pm = (1<<7),    /* Pass All Multicast */
+        csr6_pr = (1<<6),    /* Promiscuous mode */
+        csr6_sb = (1<<5),    /* Start(1)/Stop(0) backoff counter */
+        csr6_if = (1<<4),    /* Inverse Filtering, rejects only addresses in address table: can't be set */
+        csr6_pb = (1<<3),    /* Pass Bad Frames, (1) causes even bad frames to be passed on */
+        csr6_ho = (1<<2),    /* Hash-only filtering mode: can't be set */
+        csr6_hp = (1<<0),    /* Hash/Perfect Receive Filtering Mode: can't be set */
+
+        csr6_mask_capture = (csr6_sc | csr6_ca),
+        csr6_mask_defstate = (csr6_mask_capture | csr6_mbo),
+        csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps),
+        csr6_mask_hdcaptt = (csr6_mask_hdcap  | csr6_trh | csr6_trl),
+        csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd),
+        csr6_mask_fullpromisc = (csr6_pr | csr6_pm),
+        csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if),
+        csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
+};
+
+
+/* Keep the ring sizes a power of two for efficiency.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define RX_RING_SIZE	8 /* RTnet: RX_RING_SIZE*2 rtskbs will be preallocated */
+
+#define MEDIA_MASK     31
+
+#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer. */
+
+#define TULIP_MIN_CACHE_LINE	8	/* in units of 32-bit words */
+
+#if defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+#define TULIP_MAX_CACHE_LINE	16	/* in units of 32-bit words */
+#else
+#define TULIP_MAX_CACHE_LINE	32	/* in units of 32-bit words */
+#endif
+
+
+/* Ring-wrap flag in length field, use for last ring entry.
+        0x01000000 means chain on buffer2 address,
+        0x02000000 means use the ring start address in CSR2/3.
+   Note: Some work-alike chips do not function correctly in chained mode.
+   The ASIX chip works only in chained mode.
+   Thus we indicates ring mode, but always write the 'next' field for
+   chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+
+#define EEPROM_SIZE 128         /* 2 << EEPROM_ADDRLEN */
+
+
+#define RUN_AT(x) (jiffies + (x))
+
+#if defined(__i386__)			/* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+struct medialeaf {
+        u8 type;
+        u8 media;
+        unsigned char *leafdata;
+};
+
+
+struct mediatable {
+        u16 defaultmedia;
+        u8 leafcount;
+        u8 csr12dir;		/* General purpose pin directions. */
+        unsigned has_mii:1;
+        unsigned has_nonmii:1;
+        unsigned has_reset:6;
+        u32 csr15dir;
+        u32 csr15val;		/* 21143 NWay setting. */
+        struct medialeaf mleaf[0];
+};
+
+
+struct mediainfo {
+        struct mediainfo *next;
+        int info_type;
+        int index;
+        unsigned char *info;
+};
+
+struct ring_info {
+        struct /*RTnet*/rtskb	*skb;
+        dma_addr_t	mapping;
+};
+
+
+struct tulip_private {
+        const char *product_name;
+        /*RTnet*/struct rtnet_device *next_module;
+        struct tulip_rx_desc *rx_ring;
+        struct tulip_tx_desc *tx_ring;
+        dma_addr_t rx_ring_dma;
+        dma_addr_t tx_ring_dma;
+        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+        struct ring_info tx_buffers[TX_RING_SIZE];
+        /* The addresses of receive-in-place skbuffs. */
+        struct ring_info rx_buffers[RX_RING_SIZE];
+        u16 setup_frame[96];	/* Pseudo-Tx frame to init address table. */
+        int chip_id;
+        int revision;
+        int flags;
+        struct net_device_stats stats;
+        u32 mc_filter[2];
+        /*RTnet*/rtdm_lock_t lock;
+        spinlock_t mii_lock;
+        unsigned int cur_rx, cur_tx;	/* The next free ring entry */
+        unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+#define RX_A_NBF_STOP 0xffffff3f /* To disable RX and RX-NOBUF ints. */
+        int fc_bit;
+        int mit_sel;
+        int mit_change; /* Signal for Interrupt Mitigtion */
+#endif
+        unsigned int full_duplex:1;	/* Full-duplex operation requested. */
+        unsigned int full_duplex_lock:1;
+        unsigned int fake_addr:1;	/* Multiport board faked address. */
+        unsigned int default_port:4;	/* Last dev->if_port value. */
+        unsigned int media2:4;	/* Secondary monitored media port. */
+        unsigned int medialock:1;	/* Don't sense media type. */
+        unsigned int mediasense:1;	/* Media sensing in progress. */
+        unsigned int nway:1, nwayset:1;		/* 21143 internal NWay. */
+        unsigned int csr0;	/* CSR0 setting. */
+        unsigned int csr6;	/* Current CSR6 control settings. */
+        unsigned char eeprom[EEPROM_SIZE];	/* Serial EEPROM contents. */
+        void (*link_change) (/*RTnet*/struct rtnet_device *rtdev, int csr5);
+        u16 sym_advertise, mii_advertise; /* NWay capabilities advertised.  */
+        u16 lpar;		/* 21143 Link partner ability. */
+        u16 advertising[4];
+        signed char phys[4], mii_cnt;	/* MII device addresses. */
+        struct mediatable *mtable;
+        int cur_index;		/* Current media index. */
+        int saved_if_port;
+        struct pci_dev *pdev;
+        int ttimer;
+        int susp_rx;
+        unsigned long nir;
+        unsigned long base_addr;
+        int pad0, pad1;		/* Used for 8-byte alignment */
+        rtdm_irq_t irq_handle;
+};
+
+
+struct eeprom_fixup {
+        char *name;
+        unsigned char addr0;
+        unsigned char addr1;
+        unsigned char addr2;
+        u16 newtable[32];	/* Max length below. */
+};
+
+
+/* 21142.c */
+extern u16 t21142_csr14[];
+void t21142_start_nway(/*RTnet*/struct rtnet_device *rtdev);
+void t21142_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+
+
+/* PNIC2.c */
+void pnic2_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+void pnic2_start_nway(/*RTnet*/struct rtnet_device *rtdev);
+void pnic2_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+
+/* eeprom.c */
+void tulip_parse_eeprom(struct rtnet_device *rtdev);
+int tulip_read_eeprom(long ioaddr, int location, int addr_len);
+
+/* interrupt.c */
+extern unsigned int tulip_max_interrupt_work;
+extern int tulip_rx_copybreak;
+int tulip_interrupt(rtdm_irq_t *irq_handle);
+int tulip_refill_rx(/*RTnet*/struct rtnet_device *rtdev);
+
+/* media.c */
+int tulip_mdio_read(struct rtnet_device *dev, int phy_id, int location);
+void tulip_mdio_write(struct rtnet_device *dev, int phy_id, int location, int value);
+void tulip_select_media(struct rtnet_device *dev, int startup);
+int tulip_check_duplex(struct rtnet_device *dev);
+void tulip_find_mii (struct rtnet_device *dev, int board_idx);
+
+/* pnic.c */
+void pnic_do_nway(/*RTnet*/struct rtnet_device *rtdev);
+void pnic_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+
+/* tulip_core.c */
+extern int tulip_debug;
+extern const char * const medianame[];
+extern const char tulip_media_cap[];
+extern struct tulip_chip_table tulip_tbl[];
+extern u8 t21040_csr13[];
+extern u16 t21041_csr13[];
+extern u16 t21041_csr14[];
+extern u16 t21041_csr15[];
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb(addr) readb((void*)(addr))
+#define inw(addr) readw((void*)(addr))
+#define inl(addr) readl((void*)(addr))
+#define outb(val,addr) writeb((val), (void*)(addr))
+#define outw(val,addr) writew((val), (void*)(addr))
+#define outl(val,addr) writel((val), (void*)(addr))
+#endif /* !USE_IO_OPS */
+
+
+
+static inline void tulip_start_rxtx(struct tulip_private *tp)
+{
+        long ioaddr = tp->base_addr;
+        outl(tp->csr6 | RxTx, ioaddr + CSR6);
+        barrier();
+        (void) inl(ioaddr + CSR6); /* mmio sync */
+}
+
+static inline void tulip_stop_rxtx(struct tulip_private *tp)
+{
+        long ioaddr = tp->base_addr;
+        u32 csr6 = inl(ioaddr + CSR6);
+
+        if (csr6 & RxTx) {
+                outl(csr6 & ~RxTx, ioaddr + CSR6);
+                barrier();
+                (void) inl(ioaddr + CSR6); /* mmio sync */
+        }
+}
+
+static inline void tulip_restart_rxtx(struct tulip_private *tp)
+{
+        tulip_stop_rxtx(tp);
+        rtdm_task_busy_sleep(5);
+        tulip_start_rxtx(tp);
+}
+
+#endif /* __NET_TULIP_H__ */
+++ linux-patched/drivers/xenomai/net/drivers/tulip/eeprom.c	2022-03-21 12:58:29.629886621 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/eepro100.c	1970-01-01 01:00:00.000000000 +0100
+/*
+	drivers/net/tulip/eeprom.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include "tulip.h"
+#include <linux/init.h>
+#include <asm/unaligned.h>
+
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+   Search www.digital.com for "21X4 SROM" to get details.
+   This code is very complex, and will require changes to support
+   additional cards, so I'll be verbose about what is going on.
+   */
+
+/* Known cards that have old-style EEPROMs. */
+static struct eeprom_fixup eeprom_fixups[] = {
+  {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+			  0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+  {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+			   0x0000, 0x009E, /* 10baseT */
+			   0x0004, 0x009E, /* 10baseT-FD */
+			   0x0903, 0x006D, /* 100baseTx */
+			   0x0905, 0x006D, /* 100baseTx-FD */ }},
+  {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+				 0x0107, 0x8021, /* 100baseFx */
+				 0x0108, 0x8021, /* 100baseFx-FD */
+				 0x0100, 0x009E, /* 10baseT */
+				 0x0104, 0x009E, /* 10baseT-FD */
+				 0x0103, 0x006D, /* 100baseTx */
+				 0x0105, 0x006D, /* 100baseTx-FD */ }},
+  {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+				   0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+				   0x0000, 0x009E, /* 10baseT */
+				   0x0004, 0x009E, /* 10baseT-FD */
+				   0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+				   0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+  {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+				  0x1B01, 0x0000, /* 10base2,   CSR12 0x1B */
+				  0x0B00, 0x009E, /* 10baseT,   CSR12 0x0B */
+				  0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+				  0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+				  0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+   }},
+  {"NetWinder", 0x00, 0x10, 0x57,
+	/* Default media = MII
+	 * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1
+	 */
+	{ 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 }
+  },
+  {0, 0, 0, 0, {}}};
+
+
+static const char *block_name[] = {
+	"21140 non-MII",
+	"21140 MII PHY",
+	"21142 Serial PHY",
+	"21142 MII PHY",
+	"21143 SYM PHY",
+	"21143 reset method"
+};
+
+
+void tulip_parse_eeprom(/*RTnet*/struct rtnet_device *rtdev)
+{
+	/* The last media info list parsed, for multiport boards.  */
+	static struct mediatable *last_mediatable;
+	static unsigned char *last_ee_data;
+	static int controller_index;
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	unsigned char *ee_data = tp->eeprom;
+	int i;
+
+	tp->mtable = 0;
+	/* Detect an old-style (SA only) EEPROM layout:
+	   memcmp(eedata, eedata+16, 8). */
+	for (i = 0; i < 8; i ++)
+		if (ee_data[i] != ee_data[16+i])
+			break;
+	if (i >= 8) {
+		if (ee_data[0] == 0xff) {
+			if (last_mediatable) {
+				controller_index++;
+				/*RTnet*/rtdm_printk(KERN_INFO "%s:  Controller %d of multiport board.\n",
+					   rtdev->name, controller_index);
+				tp->mtable = last_mediatable;
+				ee_data = last_ee_data;
+				goto subsequent_board;
+			} else
+				/*RTnet*/rtdm_printk(KERN_INFO "%s:  Missing EEPROM, this interface may "
+					   "not work correctly!\n",
+			   rtdev->name);
+			return;
+		}
+	  /* Do a fix-up based on the vendor half of the station address prefix. */
+	  for (i = 0; eeprom_fixups[i].name; i++) {
+		if (rtdev->dev_addr[0] == eeprom_fixups[i].addr0
+			&&  rtdev->dev_addr[1] == eeprom_fixups[i].addr1
+			&&  rtdev->dev_addr[2] == eeprom_fixups[i].addr2) {
+		  if (rtdev->dev_addr[2] == 0xE8  &&  ee_data[0x1a] == 0x55)
+			  i++;			/* An Accton EN1207, not an outlaw Maxtech. */
+		  memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+				 sizeof(eeprom_fixups[i].newtable));
+		  /*RTnet*/rtdm_printk(KERN_INFO "%s: Old format EEPROM on '%s' board.  Using"
+				 " substitute media control info.\n",
+				 rtdev->name, eeprom_fixups[i].name);
+		  break;
+		}
+	  }
+	  if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+		  /*RTnet*/rtdm_printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+				 "information.\n",
+			   rtdev->name);
+		return;
+	  }
+	}
+
+	controller_index = 0;
+	if (ee_data[19] > 1) {		/* Multiport board. */
+		last_ee_data = ee_data;
+	}
+subsequent_board:
+
+	if (ee_data[27] == 0) {		/* No valid media table. */
+	} else if (tp->chip_id == DC21041) {
+		unsigned char *p = (void *)ee_data + ee_data[27 + controller_index*3];
+		int media = get_u16(p);
+		int count = p[2];
+		p += 3;
+
+		/*RTnet*/rtdm_printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+			   rtdev->name, media,
+			   media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+		for (i = 0; i < count; i++) {
+			unsigned char media_block = *p++;
+			int media_code = media_block & MEDIA_MASK;
+			if (media_block & 0x40)
+				p += 6;
+			/*RTnet*/rtdm_printk(KERN_INFO "%s:  21041 media #%d, %s.\n",
+				   rtdev->name, media_code, medianame[media_code]);
+		}
+	} else {
+		unsigned char *p = (void *)ee_data + ee_data[27];
+		unsigned char csr12dir = 0;
+		int count, new_advertise = 0;
+		struct mediatable *mtable;
+		u16 media = get_u16(p);
+
+		p += 2;
+		if (tp->flags & CSR12_IN_SROM)
+			csr12dir = *p++;
+		count = *p++;
+
+	        /* there is no phy information, don't even try to build mtable */
+	        if (count == 0) {
+			if (tulip_debug > 0)
+				/*RTnet*/rtdm_printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", rtdev->name);
+		        return;
+		}
+
+		mtable = (struct mediatable *)
+		    kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf), GFP_KERNEL);
+
+		if (mtable == NULL)
+			return;				/* Horrible, impossible failure. */
+		last_mediatable = tp->mtable = mtable;
+		mtable->defaultmedia = media;
+		mtable->leafcount = count;
+		mtable->csr12dir = csr12dir;
+		mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+		mtable->csr15dir = mtable->csr15val = 0;
+
+		/*RTnet*/rtdm_printk(KERN_INFO "%s:  EEPROM default media type %s.\n", rtdev->name,
+			   media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+		for (i = 0; i < count; i++) {
+			struct medialeaf *leaf = &mtable->mleaf[i];
+
+			if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+				leaf->type = 0;
+				leaf->media = p[0] & 0x3f;
+				leaf->leafdata = p;
+				if ((p[2] & 0x61) == 0x01)	/* Bogus, but Znyx boards do it. */
+					mtable->has_mii = 1;
+				p += 4;
+			} else {
+				leaf->type = p[1];
+				if (p[1] == 0x05) {
+					mtable->has_reset = i;
+					leaf->media = p[2] & 0x0f;
+				} else if (tp->chip_id == DM910X && p[1] == 0x80) {
+					/* Hack to ignore Davicom delay period block */
+					mtable->leafcount--;
+					count--;
+					i--;
+					leaf->leafdata = p + 2;
+					p += (p[0] & 0x3f) + 1;
+					continue;
+				} else if (p[1] & 1) {
+					int gpr_len, reset_len;
+
+					mtable->has_mii = 1;
+					leaf->media = 11;
+					gpr_len=p[3]*2;
+					reset_len=p[4+gpr_len]*2;
+					new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
+				} else {
+					mtable->has_nonmii = 1;
+					leaf->media = p[2] & MEDIA_MASK;
+					/* Davicom's media number for 100BaseTX is strange */
+					if (tp->chip_id == DM910X && leaf->media == 1)
+						leaf->media = 3;
+					switch (leaf->media) {
+					case 0: new_advertise |= 0x0020; break;
+					case 4: new_advertise |= 0x0040; break;
+					case 3: new_advertise |= 0x0080; break;
+					case 5: new_advertise |= 0x0100; break;
+					case 6: new_advertise |= 0x0200; break;
+					}
+					if (p[1] == 2  &&  leaf->media == 0) {
+						if (p[2] & 0x40) {
+							u32 base15 = get_unaligned((u16*)&p[7]);
+							mtable->csr15dir =
+								(get_unaligned((u16*)&p[9])<<16) + base15;
+							mtable->csr15val =
+								(get_unaligned((u16*)&p[11])<<16) + base15;
+						} else {
+							mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
+							mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
+						}
+					}
+				}
+				leaf->leafdata = p + 2;
+				p += (p[0] & 0x3f) + 1;
+			}
+			if (tulip_debug > 1  &&  leaf->media == 11) {
+				unsigned char *bp = leaf->leafdata;
+				/*RTnet*/rtdm_printk(KERN_INFO "%s:  MII interface PHY %d, setup/reset "
+					   "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+					   rtdev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+					   bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+			}
+			/*RTnet*/rtdm_printk(KERN_INFO "%s:  Index #%d - Media %s (#%d) described "
+				   "by a %s (%d) block.\n",
+				   rtdev->name, i, medianame[leaf->media & 15], leaf->media,
+				   leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+				   leaf->type);
+		}
+		if (new_advertise)
+			tp->sym_advertise = new_advertise;
+	}
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK	0x02	/* EEPROM shift clock. */
+#define EE_CS			0x01	/* EEPROM chip select. */
+#define EE_DATA_WRITE	0x04	/* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0		0x01
+#define EE_WRITE_1		0x05
+#define EE_DATA_READ	0x08	/* Data from the EEPROM chip. */
+#define EE_ENB			(0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+   Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
+   We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay()	inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD		(6)
+
+/* Note: this routine returns extra data bits for size detection. */
+int tulip_read_eeprom(long ioaddr, int location, int addr_len)
+{
+	int i;
+	unsigned retval = 0;
+	long ee_addr = ioaddr + CSR9;
+	int read_cmd = location | (EE_READ_CMD << addr_len);
+
+	outl(EE_ENB & ~EE_CS, ee_addr);
+	outl(EE_ENB, ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 4 + addr_len; i >= 0; i--) {
+		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+		outl(EE_ENB | dataval, ee_addr);
+		eeprom_delay();
+		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+	}
+	outl(EE_ENB, ee_addr);
+	eeprom_delay();
+
+	for (i = 16; i > 0; i--) {
+		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+		outl(EE_ENB, ee_addr);
+		eeprom_delay();
+	}
+
+	/* Terminate the EEPROM access. */
+	outl(EE_ENB & ~EE_CS, ee_addr);
+	return retval;
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/eepro100.c	2022-03-21 12:58:29.623886679 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/smc91111.c	1970-01-01 01:00:00.000000000 +0100
+/* rtnet/drivers/eepro100-rt.c: An Intel i82557-559 Real-Time-Ethernet driver for Linux. */
+/*
+	RTnet porting 2002 by Jan Kiszka <Jan.Kiszka@web.de>
+	Originally written 1996-1999 by Donald Becker.
+
+	The driver also contains updates by different kernel developers
+	(see incomplete list below).
+	Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
+	Please use this email address and linux-kernel mailing list for bug reports.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+	It should work with all i82557/558/559 boards.
+
+	Version history:
+	1998 Apr - 2000 Feb  Andrey V. Savochkin <saw@saw.sw.com.sg>
+		Serious fixes for multicast filter list setting, TX timeout routine;
+		RX ring refilling logic;  other stuff
+	2000 Feb  Jeff Garzik <jgarzik@mandrakesoft.com>
+		Convert to new PCI driver interface
+	2000 Mar 24  Dragan Stancevic <visitor@valinux.com>
+		Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
+	2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
+		PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
+
+	2002 May 16 Jan Kiszka <Jan.Kiszka@web.de>
+		Ported to RTnet (RTAI version)
+*/
+
+static const char *version =
+"eepro100-rt.c:1.36-RTnet-0.8 2002-2006 Jan Kiszka <Jan.Kiszka@web.de>\n"
+"eepro100-rt.c: based on eepro100.c 1.36 by D. Becker, A. V. Savochkin and others\n";
+
+/* A few user-configurable values that apply to all boards.
+   First set is undocumented and spelled per Intel recommendations. */
+
+static int txfifo = 8;		/* Tx FIFO threshold in 4 byte units, 0-15 */
+static int rxfifo = 8;		/* Rx FIFO threshold, default 32 bytes. */
+/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+static int txdmacount = 128;
+static int rxdmacount /* = 0 */;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+static int multicast_filter_limit = 64;
+
+/* 'options' is used to pass a transceiver override or full-duplex flag
+   e.g. "options=16" for FD, "options=32" for 100mbps-only. */
+static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int local_debug = -1;	/* The debug level */
+
+/* A few values that may be tweaked. */
+/* The ring sizes should be a power of two for efficiency. */
+#define TX_RING_SIZE	32
+#define RX_RING_SIZE	8 /* RX_RING_SIZE*2 rtskbs will be preallocated */
+/* How much slots multicast filter setup may take.
+   Do not descrease without changing set_rx_mode() implementaion. */
+#define TX_MULTICAST_SIZE   2
+#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
+/* Actual number of TX packets queued, must be
+   <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
+#define TX_QUEUE_LIMIT  (TX_RING_SIZE-TX_MULTICAST_RESERV)
+/* Hysteresis marking queue as no longer full. */
+#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT		(2*HZ)
+/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
+#define PKT_BUF_SZ		VLAN_ETH_FRAME_LEN
+
+#if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+// *** RTnet ***
+#include <linux/if_vlan.h>
+#include <rtnet_port.h>
+
+#define MAX_UNITS               8
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+// *** RTnet ***
+
+MODULE_AUTHOR("Maintainer: Jan Kiszka <Jan.Kiszka@web.de>");
+MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
+MODULE_LICENSE("GPL");
+module_param_named(debug, local_debug, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+module_param(txfifo, int, 0444);
+module_param(rxfifo, int, 0444);
+module_param(txdmacount, int, 0444);
+module_param(rxdmacount, int, 0444);
+module_param(max_interrupt_work, int, 0444);
+module_param(multicast_filter_limit, int, 0444);
+MODULE_PARM_DESC(debug, "eepro100 debug level (0-6)");
+MODULE_PARM_DESC(options, "eepro100: Bits 0-3: tranceiver type, bit 4: full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC(full_duplex, "eepro100 full duplex setting(s) (1)");
+MODULE_PARM_DESC(txfifo, "eepro100 Tx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(rxfifo, "eepro100 Rx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(txdmaccount, "eepro100 Tx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rxdmaccount, "eepro100 Rx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(max_interrupt_work, "eepro100 maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit, "eepro100 maximum number of filtered multicast addresses");
+
+#define RUN_AT(x) (jiffies + (x))
+
+// *** RTnet - no power management ***
+#undef pci_set_power_state
+#define pci_set_power_state null_set_power_state
+static inline int null_set_power_state(struct pci_dev *dev, int state)
+{
+	return 0;
+}
+// *** RTnet ***
+
+#define netdevice_start(dev)
+#define netdevice_stop(dev)
+#define netif_set_tx_timeout(dev, tf, tm) \
+								do { \
+									(dev)->tx_timeout = (tf); \
+									(dev)->watchdog_timeo = (tm); \
+								} while(0)
+
+
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+static int speedo_debug = 1;
+#else
+#define speedo_debug 0
+#endif
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
+single-chip fast Ethernet controller for PCI, as used on the Intel
+EtherExpress Pro 100 adapter.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board.  The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line.  While it's
+possible to share PCI interrupt lines, it negatively impacts performance and
+only recent kernels support it.
+
+III. Driver operation
+
+IIIA. General
+The Speedo3 is very similar to other Intel network chips, that is to say
+"apparently designed on a different planet".  This chips retains the complex
+Rx and Tx descriptors and multiple buffers pointers as previous chips, but
+also has simplified Tx and Rx buffer modes.  This driver uses the "flexible"
+Tx mode, but in a simplified lower-overhead manner: it associates only a
+single buffer descriptor with each frame descriptor.
+
+Despite the extra space overhead in each receive skbuff, the driver must use
+the simplified Rx buffer mode to assure that only a single data buffer is
+associated with each RxFD. The driver implements this by reserving space
+for the Rx descriptor at the head of each Rx skbuff.
+
+The Speedo-3 has receive and command unit base addresses that are added to
+almost all descriptor pointers.  The driver sets these to zero, so that all
+pointer fields are absolute addresses.
+
+The System Control Block (SCB) of some previous Intel chips exists on the
+chip in both PCI I/O and memory space.  This driver uses the I/O space
+registers, but might switch to memory mapped mode to better support non-x86
+processors.
+
+IIIB. Transmit structure
+
+The driver must use the complex Tx command+descriptor mode in order to
+have a indirect pointer to the skbuff data section.  Each Tx command block
+(TxCB) is associated with two immediately appended Tx Buffer Descriptor
+(TxBD).  A fixed ring of these TxCB+TxBD pairs are kept as part of the
+speedo_private data structure for each adapter instance.
+
+The newer i82558 explicitly supports this structure, and can read the two
+TxBDs in the same PCI burst as the TxCB.
+
+This ring structure is used for all normal transmit packets, but the
+transmit packet descriptors aren't long enough for most non-Tx commands such
+as CmdConfigure.  This is complicated by the possibility that the chip has
+already loaded the link address in the previous descriptor.  So for these
+commands we convert the next free descriptor on the ring to a NoOp, and point
+that descriptor's link to the complex command.
+
+An additional complexity of these non-transmit commands are that they may be
+added asynchronous to the normal transmit queue, so we disable interrupts
+whenever the Tx descriptor ring is manipulated.
+
+A notable aspect of these special configure commands is that they do
+work with the normal Tx ring entry scavenge method.  The Tx ring scavenge
+is done at interrupt time using the 'dirty_tx' index, and checking for the
+command-complete bit.  While the setup frames may have the NoOp command on the
+Tx ring marked as complete, but not have completed the setup command, this
+is not a problem.  The tx_ring entry can be still safely reused, as the
+tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+
+Commands may have bits set e.g. CmdSuspend in the command word to either
+suspend or stop the transmit/command unit.  This driver always flags the last
+command with CmdSuspend, erases the CmdSuspend in the previous command, and
+then issues a CU_RESUME.
+Note: Watch out for the potential race condition here: imagine
+	erasing the previous suspend
+		the chip processes the previous command
+		the chip processes the final command, and suspends
+	doing the CU_RESUME
+		the chip processes the next-yet-valid post-final-command.
+So blindly sending a CU_RESUME is only safe if we do it immediately after
+after erasing the previous CmdSuspend, without the possibility of an
+intervening delay.  Thus the resume command is always within the
+interrupts-disabled region.  This is a timing dependence, but handling this
+condition in a timing-independent way would considerably complicate the code.
+
+Note: In previous generation Intel chips, restarting the command unit was a
+notoriously slow process.  This is presumably no longer true.
+
+IIIC. Receive structure
+
+Because of the bus-master support on the Speedo3 this driver uses the new
+SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+This scheme allocates full-sized skbuffs as receive buffers.  The value
+SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+trade-off the memory wasted by passing the full-sized skbuff to the queue
+layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+For small frames the copying cost is negligible (esp. considering that we
+are pre-loading the cache with immediately useful header information), so we
+allocate a new, minimally-sized skbuff.  For large frames the copying cost
+is non-trivial, and the larger copy might flush the cache of useful data, so
+we pass up the skbuff the packet was received into.
+
+IV. Notes
+
+Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+that stated that I could disclose the information.  But I still resent
+having to sign an Intel NDA when I'm helping Intel sell their own product!
+
+*/
+
+static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
+
+enum pci_flags_bit {
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+static inline unsigned int io_inw(unsigned long port)
+{
+	return inw(port);
+}
+static inline void io_outw(unsigned int val, unsigned long port)
+{
+	outw(val, port);
+}
+
+#ifndef USE_IO
+/* Currently alpha headers define in/out macros.
+   Undefine them.  2000/03/30  SAW */
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb(addr) readb((void *)(addr))
+#define inw(addr) readw((void *)(addr))
+#define inl(addr) readl((void *)(addr))
+#define outb(val, addr) writeb(val, (void *)(addr))
+#define outw(val, addr) writew(val, (void *)(addr))
+#define outl(val, addr) writel(val, (void *)(addr))
+#endif
+
+/* How to wait for the command unit to accept a command.
+   Typically this takes 0 ticks. */
+static inline void wait_for_cmd_done(long cmd_ioaddr)
+{
+	int wait = 1000;
+	do  udelay(1) ;
+	while(inb(cmd_ioaddr) && --wait >= 0);
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+	if (wait < 0)
+		printk(KERN_ALERT "eepro100: wait_for_cmd_done timeout!\n");
+#endif
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDSTATS
+static inline int rt_wait_for_cmd_done(long cmd_ioaddr, const char *cmd)
+{
+    int wait = CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT;
+    rtmd_time_t t0, t1;
+
+    t0 = rtdm_clock_read();
+    while (inb(cmd_ioaddr) != 0) {
+	if (wait-- == 0) {
+	    rtdm_printk(KERN_ALERT "eepro100: rt_wait_for_cmd_done(%s) "
+			"timeout!\n", cmd);
+	    return 1;
+	}
+	rtdm_task_busy_sleep(1000);
+    }
+    return 0;
+}
+#else
+static inline int rt_wait_for_cmd_done(long cmd_ioaddr, const char *cmd)
+{
+    int wait = CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT;
+
+    while (inb(cmd_ioaddr) != 0) {
+	if (wait-- == 0)
+	    return 1;
+	rtdm_task_busy_sleep(1000);
+    }
+    return 0;
+}
+#endif
+
+/* Offsets to the various registers.
+   All accesses need not be longword aligned. */
+enum speedo_offsets {
+	SCBStatus = 0, SCBCmd = 2,	/* Rx/Command Unit command and status. */
+	SCBPointer = 4,				/* General purpose pointer. */
+	SCBPort = 8,				/* Misc. commands and operands.  */
+	SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
+	SCBCtrlMDI = 16,			/* MDI interface control. */
+	SCBEarlyRx = 20,			/* Early receive byte count. */
+};
+/* Commands that can be put in a command list entry. */
+enum commands {
+	CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
+	CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
+	CmdDump = 0x60000, CmdDiagnose = 0x70000,
+	CmdSuspend = 0x40000000,	/* Suspend after completion. */
+	CmdIntr = 0x20000000,		/* Interrupt after completion. */
+	CmdTxFlex = 0x00080000,		/* Use "Flexible mode" for CmdTx command. */
+};
+/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
+   status bits.  Previous driver versions used separate 16 bit fields for
+   commands and statuses.  --SAW
+ */
+#if defined(__alpha__)
+# define clear_suspend(cmd)  clear_bit(30, &(cmd)->cmd_status);
+#else
+# if defined(__LITTLE_ENDIAN)
+#  define clear_suspend(cmd)  ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
+# elif defined(__BIG_ENDIAN)
+#  define clear_suspend(cmd)  ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
+# else
+#  error Unsupported byteorder
+# endif
+#endif
+
+enum SCBCmdBits {
+	SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+	SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+	SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+	/* The rest are Rx and Tx commands. */
+	CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
+	CUCmdBase=0x0060,	/* CU Base address (set to zero) . */
+	CUDumpStats=0x0070, /* Dump then reset stats counters. */
+	RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+	RxResumeNoResources=0x0007,
+};
+
+enum SCBPort_cmds {
+	PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
+};
+
+/* The Speedo3 Rx and Tx frame/buffer descriptors. */
+struct descriptor {                         /* A generic descriptor. */
+	s32 cmd_status;				/* All command and status fields. */
+	u32 link;                                   /* struct descriptor *  */
+	unsigned char params[0];
+};
+
+/* The Speedo3 Rx and Tx buffer descriptors. */
+struct RxFD {					/* Receive frame descriptor. */
+	s32 status;
+	u32 link;					/* struct RxFD * */
+	u32 rx_buf_addr;			/* void * */
+	u32 count;
+};
+
+/* Selected elements of the Tx/RxFD.status word. */
+enum RxFD_bits {
+	RxComplete=0x8000, RxOK=0x2000,
+	RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
+	RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
+	TxUnderrun=0x1000,  StatusComplete=0x8000,
+};
+
+#define CONFIG_DATA_SIZE 22
+struct TxFD {					/* Transmit frame descriptor set. */
+	s32 status;
+	u32 link;					/* void * */
+	u32 tx_desc_addr;			/* Always points to the tx_buf_addr element. */
+	s32 count;					/* # of TBD (=1), Tx start thresh., etc. */
+	/* This constitutes two "TBD" entries -- we only use one. */
+#define TX_DESCR_BUF_OFFSET 16
+	u32 tx_buf_addr0;			/* void *, frame to be transmitted.  */
+	s32 tx_buf_size0;			/* Length of Tx frame. */
+	u32 tx_buf_addr1;			/* void *, frame to be transmitted.  */
+	s32 tx_buf_size1;			/* Length of Tx frame. */
+	/* the structure must have space for at least CONFIG_DATA_SIZE starting
+	 * from tx_desc_addr field */
+};
+
+/* Multicast filter setting block.  --SAW */
+struct speedo_mc_block {
+	struct speedo_mc_block *next;
+	unsigned int tx;
+	dma_addr_t frame_dma;
+	unsigned int len;
+	struct descriptor frame __attribute__ ((__aligned__(16)));
+};
+
+/* Elements of the dump_statistics block. This block must be lword aligned. */
+struct speedo_stats {
+	u32 tx_good_frames;
+	u32 tx_coll16_errs;
+	u32 tx_late_colls;
+	u32 tx_underruns;
+	u32 tx_lost_carrier;
+	u32 tx_deferred;
+	u32 tx_one_colls;
+	u32 tx_multi_colls;
+	u32 tx_total_colls;
+	u32 rx_good_frames;
+	u32 rx_crc_errs;
+	u32 rx_align_errs;
+	u32 rx_resource_errs;
+	u32 rx_overrun_errs;
+	u32 rx_colls_errs;
+	u32 rx_runt_errs;
+	u32 done_marker;
+};
+
+enum Rx_ring_state_bits {
+	RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
+};
+
+/* Do not change the position (alignment) of the first few elements!
+   The later elements are grouped for cache locality.
+
+   Unfortunately, all the positions have been shifted since there.
+   A new re-alignment is required.  2000/03/06  SAW */
+struct speedo_private {
+	struct TxFD *tx_ring;			/* Commands (usually CmdTxPacket). */
+	struct RxFD *rx_ringp[RX_RING_SIZE];	/* Rx descriptor, used as ring. */
+
+	// *** RTnet ***
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	// *** RTnet ***
+
+	/* Mapped addresses of the rings. */
+	dma_addr_t tx_ring_dma;
+#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
+	dma_addr_t rx_ring_dma[RX_RING_SIZE];
+	struct descriptor *last_cmd;		/* Last command sent. */
+	unsigned int cur_tx, dirty_tx;		/* The ring entries to be free()ed. */
+	rtdm_lock_t lock;					/* Group with Tx control cache line. */
+	u32 tx_threshold;					/* The value for txdesc.count. */
+	struct RxFD *last_rxf;				/* Last filled RX buffer. */
+	dma_addr_t last_rxf_dma;
+	unsigned int cur_rx, dirty_rx;		/* The next free ring entry */
+	long last_rx_time;			/* Last Rx, in jiffies, to handle Rx hang. */
+	struct net_device_stats stats;
+	struct speedo_stats *lstats;
+	dma_addr_t lstats_dma;
+	int chip_id;
+	struct pci_dev *pdev;
+	struct speedo_mc_block *mc_setup_head;/* Multicast setup frame list head. */
+	struct speedo_mc_block *mc_setup_tail;/* Multicast setup frame list tail. */
+	long in_interrupt;					/* Word-aligned rtdev->interrupt */
+	unsigned char acpi_pwr;
+	signed char rx_mode;					/* Current PROMISC/ALLMULTI setting. */
+	unsigned int tx_full:1;				/* The Tx queue is full. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int flow_ctrl:1;			/* Use 802.3x flow control. */
+	unsigned int rx_bug:1;				/* Work around receiver hang errata. */
+	unsigned char default_port:8;		/* Last rtdev->if_port value. */
+	unsigned char rx_ring_state;		/* RX ring status flags. */
+	unsigned short phy[2];				/* PHY media interfaces available. */
+	unsigned short advertising;			/* Current PHY advertised caps. */
+	unsigned short partner;				/* Link partner caps. */
+	rtdm_irq_t irq_handle;
+};
+
+/* The parameters for a CmdConfigure operation.
+   There are so many options that it would be difficult to document each bit.
+   We mostly use the default or recommended settings. */
+static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
+	22, 0x08, 0, 1,  0, 0, 0x22, 0x03,  1, /* 1=Use MII  0=Use AUI */
+	0, 0x2E, 0,  0x60, 0x08, 0x88,
+	0x68, 0, 0x40, 0xf2, 0x84,		/* Disable FC */
+	0x31, 0x05, };
+
+/* PHY media interface chips. */
+enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+					 S80C24, I82555, DP83840A=10, };
+#define EE_READ_CMD		(6)
+
+static int eepro100_init_one(struct pci_dev *pdev,
+		const struct pci_device_id *ent);
+static void eepro100_remove_one (struct pci_dev *pdev);
+
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static int speedo_open(struct rtnet_device *rtdev);
+static void speedo_resume(struct rtnet_device *rtdev);
+static void speedo_init_rx_ring(struct rtnet_device *rtdev);
+static int speedo_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static void speedo_refill_rx_buffers(struct rtnet_device *rtdev, int force);
+static int speedo_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp);
+static void speedo_tx_buffer_gc(struct rtnet_device *rtdev);
+static int speedo_interrupt(rtdm_irq_t *irq_handle);
+static int speedo_close(struct rtnet_device *rtdev);
+static void set_rx_mode(struct rtnet_device *rtdev);
+static void speedo_show_state(struct rtnet_device *rtdev);
+static struct net_device_stats *speedo_get_stats(struct rtnet_device *rtdev);
+
+
+static inline void speedo_write_flush(long ioaddr)
+{
+	/* Flush previous PCI writes through intermediate bridges
+	 * by doing a benign read */
+	(void)readb((void *)(ioaddr + SCBStatus));
+}
+
+static int eepro100_init_one (struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	unsigned long ioaddr;
+	int irq;
+	int acpi_idle_state = 0, pm;
+	static int cards_found = -1;
+
+	static int did_version /* = 0 */;		/* Already printed version info. */
+	if (speedo_debug > 0  &&  did_version++ == 0)
+		printk(version);
+
+	// *** RTnet ***
+	cards_found++;
+	if (cards[cards_found] == 0)
+		goto err_out_none;
+	// *** RTnet ***
+
+	if (!request_region(pci_resource_start(pdev, 1),
+			pci_resource_len(pdev, 1), "eepro100")) {
+		printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
+		goto err_out_none;
+	}
+	if (!request_mem_region(pci_resource_start(pdev, 0),
+			pci_resource_len(pdev, 0), "eepro100")) {
+		printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
+		goto err_out_free_pio_region;
+	}
+
+	irq = pdev->irq;
+#ifdef USE_IO
+	ioaddr = pci_resource_start(pdev, 1);
+	if (speedo_debug > 2)
+		printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
+			   ioaddr, irq);
+#else
+	ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
+									pci_resource_len(pdev, 0));
+	if (!ioaddr) {
+		printk(KERN_ERR "eepro100: cannot remap MMIO region %llx @ %llx\n",
+		       (unsigned long long)pci_resource_len(pdev, 0),
+		       (unsigned long long)pci_resource_start(pdev, 0));
+		goto err_out_free_mmio_region;
+	}
+	if (speedo_debug > 2)
+		printk("Found Intel i82557 PCI Speedo, MMIO at %#llx, IRQ %d.\n",
+		       (unsigned long long)pci_resource_start(pdev, 0), irq);
+#endif
+
+	/* save power state b4 pci_enable_device overwrites it */
+	pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pm) {
+		u16 pwr_command;
+		pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
+		acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
+	}
+
+	if (pci_enable_device(pdev))
+		goto err_out_free_mmio_region;
+
+	pci_set_master(pdev);
+
+	if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) != 0)
+		goto err_out_iounmap;
+
+	return 0;
+
+err_out_iounmap: ;
+#ifndef USE_IO
+	iounmap ((void *)ioaddr);
+#endif
+err_out_free_mmio_region:
+	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+err_out_free_pio_region:
+	release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+err_out_none:
+	return -ENODEV;
+}
+
+static int speedo_found1(struct pci_dev *pdev,
+		long ioaddr, int card_idx, int acpi_idle_state)
+{
+	// *** RTnet ***
+	struct rtnet_device *rtdev = NULL;
+	// *** RTnet ***
+
+	struct speedo_private *sp;
+	const char *product;
+	int i, option;
+	u16 eeprom[0x100];
+	int size;
+	void *tx_ring_space;
+	dma_addr_t tx_ring_dma;
+
+	size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
+	tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
+	if (tx_ring_space == NULL)
+		return -1;
+
+	// *** RTnet ***
+	rtdev = rt_alloc_etherdev(sizeof(struct speedo_private),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
+		pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
+		return -1;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	memset(rtdev->priv, 0, sizeof(struct speedo_private));
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+	// *** RTnet ***
+
+	if (rtdev->mem_start > 0)
+		option = rtdev->mem_start;
+	else if (card_idx >= 0  &&  options[card_idx] >= 0)
+		option = options[card_idx];
+	else
+		option = 0;
+
+	/* Read the station address EEPROM before doing the reset.
+	   Nominally his should even be done before accepting the device, but
+	   then we wouldn't have a device name with which to report the error.
+	   The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+	*/
+	{
+		unsigned long iobase;
+		int read_cmd, ee_size;
+		u16 sum;
+		int j;
+
+		/* Use IO only to avoid postponed writes and satisfy EEPROM timing
+		   requirements. */
+		iobase = pci_resource_start(pdev, 1);
+		if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
+			== 0xffe0000) {
+			ee_size = 0x100;
+			read_cmd = EE_READ_CMD << 24;
+		} else {
+			ee_size = 0x40;
+			read_cmd = EE_READ_CMD << 22;
+		}
+
+		for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
+			u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
+			eeprom[i] = value;
+			sum += value;
+			if (i < 3) {
+				rtdev->dev_addr[j++] = value;
+				rtdev->dev_addr[j++] = value >> 8;
+			}
+		}
+		if (sum != 0xBABA)
+			printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
+				   "check settings before activating this device!\n",
+				   rtdev->name, sum);
+		/* Don't  unregister_netdev(dev);  as the EEPro may actually be
+		   usable, especially if the MAC address is set later.
+		   On the other hand, it may be unusable if MDI data is corrupted. */
+	}
+
+	/* Reset the chip: stop Tx and Rx processes and clear counters.
+	   This takes less than 10usec and will easily finish before the next
+	   action. */
+	outl(PortReset, ioaddr + SCBPort);
+	inl(ioaddr + SCBPort);
+	udelay(10);
+
+	if (eeprom[3] & 0x0100)
+		product = "OEM i82557/i82558 10/100 Ethernet";
+	else
+		product = pci_name(pdev);
+
+	printk(KERN_INFO "%s: %s, ", rtdev->name, product);
+
+	for (i = 0; i < 5; i++)
+		printk("%2.2X:", rtdev->dev_addr[i]);
+	printk("%2.2X, ", rtdev->dev_addr[i]);
+#ifdef USE_IO
+	printk("I/O at %#3lx, ", ioaddr);
+#endif
+	printk("IRQ %d.\n", pdev->irq);
+
+	outl(PortReset, ioaddr + SCBPort);
+	inl(ioaddr + SCBPort);
+	udelay(10);
+
+	/* Return the chip to its original power state. */
+	pci_set_power_state(pdev, acpi_idle_state);
+
+	rtdev->base_addr = ioaddr;
+	rtdev->irq = pdev->irq;
+
+	sp = rtdev->priv;
+	sp->pdev = pdev;
+	sp->acpi_pwr = acpi_idle_state;
+	sp->tx_ring = tx_ring_space;
+	sp->tx_ring_dma = tx_ring_dma;
+	sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
+	sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
+
+	sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
+	if (card_idx >= 0) {
+		if (full_duplex[card_idx] >= 0)
+			sp->full_duplex = full_duplex[card_idx];
+	}
+	sp->default_port = option >= 0 ? (option & 0x0f) : 0;
+
+	sp->phy[0] = eeprom[6];
+	sp->phy[1] = eeprom[7];
+	sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
+	if (((pdev->device > 0x1030 && (pdev->device < 0x1039)))
+	    || (pdev->device == 0x2449)) {
+		sp->chip_id = 1;
+	}
+
+	if (sp->rx_bug)
+		printk(KERN_ERR "  *** Receiver lock-up bug detected ***\n"
+		       KERN_ERR "  Your device may not work reliably!\n");
+
+	// *** RTnet ***
+	/* The Speedo-specific entries in the device structure. */
+	rtdev->open = &speedo_open;
+	rtdev->hard_start_xmit = &speedo_start_xmit;
+	rtdev->stop = &speedo_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->get_stats = &speedo_get_stats;
+	//rtdev->do_ioctl = NULL;
+
+	if ( (i=rt_register_rtnetdev(rtdev)) )
+	{
+		pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+	pci_set_drvdata (pdev, rtdev);
+	// *** RTnet ***
+
+	return 0;
+}
+
+/* Serial EEPROM section.
+   A "bit" grungy, but we work our way through bit-by-bit :->. */
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK	0x01	/* EEPROM shift clock. */
+#define EE_CS			0x02	/* EEPROM chip select. */
+#define EE_DATA_WRITE	0x04	/* EEPROM chip data in. */
+#define EE_DATA_READ	0x08	/* EEPROM chip data out. */
+#define EE_ENB			(0x4800 | EE_CS)
+#define EE_WRITE_0		0x4802
+#define EE_WRITE_1		0x4806
+#define EE_OFFSET		SCBeeprom
+
+/* The fixes for the code were kindly provided by Dragan Stancevic
+   <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
+   access timing.
+   The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
+   interval for serial EEPROM.  However, it looks like that there is an
+   additional requirement dictating larger udelay's in the code below.
+   2000/05/24  SAW */
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+{
+	unsigned retval = 0;
+	long ee_addr = ioaddr + SCBeeprom;
+
+	io_outw(EE_ENB, ee_addr); udelay(2);
+	io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
+
+	/* Shift the command bits out. */
+	do {
+		short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+		io_outw(dataval, ee_addr); udelay(2);
+		io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
+		retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+	} while (--cmd_len >= 0);
+	io_outw(EE_ENB, ee_addr); udelay(2);
+
+	/* Terminate the EEPROM access. */
+	io_outw(EE_ENB & ~EE_CS, ee_addr);
+	return retval;
+}
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+	int val, boguscnt = 64*10;		/* <64 usec. to complete, typ 27 ticks */
+	outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+	do {
+		val = inl(ioaddr + SCBCtrlMDI);
+		if (--boguscnt < 0) {
+			printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
+			break;
+		}
+	} while (! (val & 0x10000000));
+	return val & 0xffff;
+}
+
+
+static int
+speedo_open(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int retval;
+
+	if (speedo_debug > 1)
+		printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", rtdev->name, rtdev->irq);
+
+	pci_set_power_state(sp->pdev, 0);
+
+	/* Set up the Tx queue early.. */
+	sp->cur_tx = 0;
+	sp->dirty_tx = 0;
+	sp->last_cmd = 0;
+	sp->tx_full = 0;
+	rtdm_lock_init(&sp->lock);
+	sp->in_interrupt = 0;
+
+	// *** RTnet ***
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	retval = rtdm_irq_request(&sp->irq_handle, rtdev->irq,
+				  speedo_interrupt, RTDM_IRQTYPE_SHARED,
+				  "rt_eepro100", rtdev);
+	if (retval) {
+		return retval;
+	}
+	// *** RTnet ***
+
+	rtdev->if_port = sp->default_port;
+
+	speedo_init_rx_ring(rtdev);
+
+	/* Fire up the hardware. */
+	outw(SCBMaskAll, ioaddr + SCBCmd);
+	speedo_write_flush(ioaddr);
+	speedo_resume(rtdev);
+
+	netdevice_start(rtdev);
+	rtnetif_start_queue(rtdev);
+
+	/* Setup the chip and configure the multicast list. */
+	sp->mc_setup_head = NULL;
+	sp->mc_setup_tail = NULL;
+	sp->flow_ctrl = sp->partner = 0;
+	sp->rx_mode = -1;			/* Invalid -> always reset the mode. */
+	set_rx_mode(rtdev);
+	if ((sp->phy[0] & 0x8000) == 0)
+		sp->advertising = mdio_read(ioaddr, sp->phy[0] & 0x1f, 4);
+
+	if (mdio_read(ioaddr, sp->phy[0] & 0x1f, MII_BMSR) & BMSR_LSTATUS)
+		rtnetif_carrier_on(rtdev);
+	else
+		rtnetif_carrier_off(rtdev);
+
+	if (speedo_debug > 2) {
+		printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+			   rtdev->name, inw(ioaddr + SCBStatus));
+	}
+
+	/* No need to wait for the command unit to accept here. */
+	if ((sp->phy[0] & 0x8000) == 0)
+		mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
+
+	return 0;
+}
+
+/* Start the chip hardware after a full reset. */
+static void speedo_resume(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+
+	/* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+	sp->tx_threshold = 0x01208000;
+
+	/* Set the segment registers to '0'. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outl(0, ioaddr + SCBPointer);
+	/* impose a delay to avoid a bug */
+	inl(ioaddr + SCBPointer);
+	udelay(10);
+	outb(RxAddrLoad, ioaddr + SCBCmd);
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outb(CUCmdBase, ioaddr + SCBCmd);
+
+	/* Load the statistics block and rx ring addresses. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outl(sp->lstats_dma, ioaddr + SCBPointer);
+	outb(CUStatsAddr, ioaddr + SCBCmd);
+	sp->lstats->done_marker = 0;
+
+	if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
+		if (speedo_debug > 2)
+			printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
+					rtdev->name);
+	} else {
+		wait_for_cmd_done(ioaddr + SCBCmd);
+		outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+			 ioaddr + SCBPointer);
+		outb(RxStart, ioaddr + SCBCmd);
+	}
+
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outb(CUDumpStats, ioaddr + SCBCmd);
+	udelay(30);
+
+	/* Fill the first command with our physical address. */
+	{
+		struct descriptor *ias_cmd;
+
+		ias_cmd =
+			(struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
+		/* Avoid a bug(?!) here by marking the command already completed. */
+		ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+		ias_cmd->link =
+			cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+		memcpy(ias_cmd->params, rtdev->dev_addr, 6);
+		sp->last_cmd = ias_cmd;
+	}
+
+	/* Start the chip's Tx process and unmask interrupts. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
+		 ioaddr + SCBPointer);
+	/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
+	   remain masked --Dragan */
+	outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+}
+
+static void speedo_show_state(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	unsigned int i;
+
+	/* Print a few items for debugging. */
+	if (speedo_debug > 0) {
+		printk(KERN_DEBUG "%s: Tx ring dump,  Tx queue %u / %u:\n", rtdev->name,
+			   sp->cur_tx, sp->dirty_tx);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(KERN_DEBUG "%s:  %c%c%2d %8.8x.\n", rtdev->name,
+				   i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+				   i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+				   i, sp->tx_ring[i].status);
+	}
+	printk(KERN_DEBUG "%s: Printing Rx ring"
+		   " (next to receive into %u, dirty index %u).\n",
+		   rtdev->name, sp->cur_rx, sp->dirty_rx);
+
+	for (i = 0; i < RX_RING_SIZE; i++)
+		printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", rtdev->name,
+			   sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
+			   i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
+			   i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
+			   i, (sp->rx_ringp[i] != NULL) ?
+					   (unsigned)sp->rx_ringp[i]->status : 0);
+
+	{
+		long ioaddr = rtdev->base_addr;
+		int phy_num = sp->phy[0] & 0x1f;
+		for (i = 0; i < 16; i++) {
+			/* FIXME: what does it mean?  --SAW */
+			if (i == 6) i = 21;
+			printk(KERN_DEBUG "%s:  PHY index %d register %d is %4.4x.\n",
+				   rtdev->name, phy_num, i, mdio_read(ioaddr, phy_num, i));
+		}
+	}
+}
+
+static struct net_device_stats *speedo_get_stats(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	return &sp->stats;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+speedo_init_rx_ring(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	struct RxFD *rxf, *last_rxf = NULL;
+	dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
+	int i;
+
+	sp->cur_rx = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *skb;
+		skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ + 2 + sizeof(struct RxFD));
+		sp->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;			/* OK.  Just initially short of Rx bufs. */
+		// *** RTnet ***
+		rtskb_reserve(skb, 2);  /* IP header alignment */
+		// *** RTnet ***
+		rxf = (struct RxFD *)skb->tail;
+		sp->rx_ringp[i] = rxf;
+		sp->rx_ring_dma[i] =
+			pci_map_single(sp->pdev, rxf,
+					PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
+		rtskb_reserve(skb, sizeof(struct RxFD));
+		if (last_rxf) {
+			last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
+			pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
+					sizeof(struct RxFD), PCI_DMA_TODEVICE);
+		}
+		last_rxf = rxf;
+		last_rxf_dma = sp->rx_ring_dma[i];
+		rxf->status = cpu_to_le32(0x00000001);	/* '1' is flag value only. */
+		rxf->link = 0;						/* None yet. */
+		/* This field unused by i82557. */
+		rxf->rx_buf_addr = 0xffffffff;
+		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+		pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
+				sizeof(struct RxFD), PCI_DMA_TODEVICE);
+	}
+	sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+	/* Mark the last entry as end-of-list. */
+	last_rxf->status = cpu_to_le32(0xC0000002);	/* '2' is flag value only. */
+	pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
+			sizeof(struct RxFD), PCI_DMA_TODEVICE);
+	sp->last_rxf = last_rxf;
+	sp->last_rxf_dma = last_rxf_dma;
+}
+
+static int
+speedo_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int entry;
+	// *** RTnet ***
+	rtdm_lockctx_t context;
+
+	/* Prevent interrupts from changing the Tx ring from underneath us. */
+	rtdm_lock_get_irqsave(&sp->lock, context);
+	// *** RTnet ***
+
+	/* Check if there are enough space. */
+	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+		// *** RTnet ***
+		rtnetif_stop_queue(rtdev);
+		sp->tx_full = 1;
+
+		rtdm_lock_put_irqrestore(&sp->lock, context);
+
+		rtdm_printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", rtdev->name);
+		// *** RTnet ***
+
+		return 1;
+	}
+
+	/* Calculate the Tx descriptor entry. */
+	entry = sp->cur_tx++ % TX_RING_SIZE;
+
+	sp->tx_skbuff[entry] = skb;
+	sp->tx_ring[entry].status =
+		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+	if (!(entry & ((TX_RING_SIZE>>2)-1)))
+		sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
+	sp->tx_ring[entry].link =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+	sp->tx_ring[entry].tx_desc_addr =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
+	/* The data region is always in one buffer descriptor. */
+	sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+	sp->tx_ring[entry].tx_buf_addr0 =
+		cpu_to_le32(pci_map_single(sp->pdev, skb->data,
+					   skb->len, PCI_DMA_TODEVICE));
+	sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+
+// *** RTnet ***
+// Disabled to gain shorter worst-case execution times.
+// Hope this bug is not relevant for us
+
+	/* Trigger the command unit resume. */
+	if (rt_wait_for_cmd_done(ioaddr + SCBCmd, __FUNCTION__) != 0) {
+		rtdm_lock_put_irqrestore(&sp->lock, context);
+
+		return 1;
+	}
+
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+// *** RTnet ***
+
+	clear_suspend(sp->last_cmd);
+	/* We want the time window between clearing suspend flag on the previous
+	   command and resuming CU to be as small as possible.
+	   Interrupts in between are very undesired.  --SAW */
+	outb(CUResume, ioaddr + SCBCmd);
+	sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+	/* Leave room for set_rx_mode(). If there is no more space than reserved
+	   for multicast filter mark the ring as full. */
+	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+		rtnetif_stop_queue(rtdev);
+		sp->tx_full = 1;
+	}
+
+	// *** RTnet ***
+	rtdm_lock_put_irqrestore(&sp->lock, context);
+	// *** RTnet ***
+
+	return 0;
+}
+
+static void speedo_tx_buffer_gc(struct rtnet_device *rtdev)
+{
+	unsigned int dirty_tx;
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+
+	dirty_tx = sp->dirty_tx;
+	while ((int)(sp->cur_tx - dirty_tx) > 0) {
+		int entry = dirty_tx % TX_RING_SIZE;
+		int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+		if (speedo_debug > 5)
+			printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+				   entry, status);
+		if ((status & StatusComplete) == 0)
+			break;			/* It still hasn't been processed. */
+		if (status & TxUnderrun)
+			if (sp->tx_threshold < 0x01e08000) {
+				if (speedo_debug > 2)
+					printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
+						   rtdev->name);
+				sp->tx_threshold += 0x00040000;
+			}
+		/* Free the original skb. */
+		if (sp->tx_skbuff[entry]) {
+			sp->stats.tx_packets++;	/* Count only user packets. */
+			sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+			pci_unmap_single(sp->pdev,
+					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
+
+			// *** RTnet ***
+			dev_kfree_rtskb(sp->tx_skbuff[entry]);
+			// *** RTnet ***
+
+			sp->tx_skbuff[entry] = 0;
+		}
+		dirty_tx++;
+	}
+
+// *** RTnet ***
+// *** RTnet ***
+
+	sp->dirty_tx = dirty_tx;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int speedo_interrupt(rtdm_irq_t *irq_handle)
+{
+	// *** RTnet ***
+	nanosecs_abs_t      time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev     =
+	rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	int ret = RTDM_IRQ_NONE;
+	// *** RTnet ***
+
+	struct speedo_private *sp;
+	long ioaddr, boguscnt = max_interrupt_work;
+	unsigned short status;
+
+
+	ioaddr = rtdev->base_addr;
+	sp = (struct speedo_private *)rtdev->priv;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+	/* A lock to prevent simultaneous entry on SMP machines. */
+	if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+		rtdm_printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   rtdev->name);
+		sp->in_interrupt = 0;	/* Avoid halting machine. */
+		return ret;
+	}
+#endif
+
+	do {
+		status = inw(ioaddr + SCBStatus);
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		/* Will change from 0xfc00 to 0xff00 when we start handling
+		   FCP and ER interrupts --Dragan */
+		outw(status & 0xfc00, ioaddr + SCBStatus);
+		speedo_write_flush(ioaddr);
+
+		if (speedo_debug > 4)
+			rtdm_printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",
+				   rtdev->name, status);
+
+		if ((status & 0xfc00) == 0)
+			break;
+
+		ret = RTDM_IRQ_HANDLED;
+
+		/* Always check if all rx buffers are allocated.  --SAW */
+		speedo_refill_rx_buffers(rtdev, 0);
+
+		if ((status & 0x5000) ||	/* Packet received, or Rx error. */
+			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+									/* Need to gather the postponed packet. */
+			speedo_rx(rtdev, &packets, &time_stamp);
+
+		if (status & 0x1000) {
+			rtdm_lock_get(&sp->lock);
+			if ((status & 0x003c) == 0x0028) {		/* No more Rx buffers. */
+				struct RxFD *rxf;
+				rtdm_printk(KERN_WARNING "%s: card reports no RX buffers.\n",
+						rtdev->name);
+				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+				if (rxf == NULL) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: NULL cur_rx in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else if (rxf == sp->last_rxf) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: cur_rx is last in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else
+					outb(RxResumeNoResources, ioaddr + SCBCmd);
+			} else if ((status & 0x003c) == 0x0008) { /* No resources. */
+				struct RxFD *rxf;
+				rtdm_printk(KERN_WARNING "%s: card reports no resources.\n",
+						rtdev->name);
+				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+				if (rxf == NULL) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: NULL cur_rx in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else if (rxf == sp->last_rxf) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: cur_rx is last in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else {
+					/* Restart the receiver. */
+					outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+						 ioaddr + SCBPointer);
+					outb(RxStart, ioaddr + SCBCmd);
+				}
+			}
+			sp->stats.rx_errors++;
+			rtdm_lock_put(&sp->lock);
+		}
+
+		if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) {
+			rtdm_printk(KERN_WARNING
+					"%s: restart the receiver after a possible hang.\n",
+					rtdev->name);
+			rtdm_lock_get(&sp->lock);
+			/* Restart the receiver.
+			   I'm not sure if it's always right to restart the receiver
+			   here but I don't know another way to prevent receiver hangs.
+			   1999/12/25 SAW */
+			outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+				 ioaddr + SCBPointer);
+			outb(RxStart, ioaddr + SCBCmd);
+			sp->rx_ring_state &= ~RrNoResources;
+			rtdm_lock_put(&sp->lock);
+		}
+
+		/* User interrupt, Command/Tx unit interrupt or CU not active. */
+		if (status & 0xA400) {
+			rtdm_lock_get(&sp->lock);
+			speedo_tx_buffer_gc(rtdev);
+			if (sp->tx_full
+				&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+				/* The ring is no longer full. */
+				sp->tx_full = 0;
+				rtnetif_wake_queue(rtdev); /* Attention: under a spinlock.  --SAW */
+			}
+			rtdm_lock_put(&sp->lock);
+		}
+
+		if (--boguscnt < 0) {
+			rtdm_printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+				   rtdev->name, status);
+			/* Clear all interrupt sources. */
+			/* Will change from 0xfc00 to 0xff00 when we start handling
+			   FCP and ER interrupts --Dragan */
+			outw(0xfc00, ioaddr + SCBStatus);
+			break;
+		}
+	} while (1);
+
+	if (speedo_debug > 3)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+			   rtdev->name, inw(ioaddr + SCBStatus));
+
+	clear_bit(0, (void*)&sp->in_interrupt);
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return ret;
+}
+
+static inline struct RxFD *speedo_rx_alloc(struct rtnet_device *rtdev, int entry)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	struct RxFD *rxf;
+	struct rtskb *skb;
+	/* Get a fresh skbuff to replace the consumed one. */
+	skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ + 2 + sizeof(struct RxFD));
+	sp->rx_skbuff[entry] = skb;
+	if (skb == NULL) {
+		sp->rx_ringp[entry] = NULL;
+		return NULL;
+	}
+	rtskb_reserve(skb, 2);  /* IP header alignment */
+	rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+	sp->rx_ring_dma[entry] =
+		pci_map_single(sp->pdev, rxf,
+					   PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+	rtskb_reserve(skb, sizeof(struct RxFD));
+	rxf->rx_buf_addr = 0xffffffff;
+	pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
+			sizeof(struct RxFD), PCI_DMA_TODEVICE);
+	return rxf;
+}
+
+static inline void speedo_rx_link(struct rtnet_device *rtdev, int entry,
+								  struct RxFD *rxf, dma_addr_t rxf_dma)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	rxf->status = cpu_to_le32(0xC0000001);  /* '1' for driver use only. */
+	rxf->link = 0;			/* None yet. */
+	rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+	sp->last_rxf->link = cpu_to_le32(rxf_dma);
+	sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+	pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
+			sizeof(struct RxFD), PCI_DMA_TODEVICE);
+	sp->last_rxf = rxf;
+	sp->last_rxf_dma = rxf_dma;
+}
+
+static int speedo_refill_rx_buf(struct rtnet_device *rtdev, int force)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	int entry;
+	struct RxFD *rxf;
+
+	entry = sp->dirty_rx % RX_RING_SIZE;
+	if (sp->rx_skbuff[entry] == NULL) {
+		rxf = speedo_rx_alloc(rtdev, entry);
+		if (rxf == NULL) {
+			unsigned int forw;
+			int forw_entry;
+			if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) {
+				// *** RTnet ***
+				rtdm_printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
+						rtdev->name, force);
+				// *** RTnet ***
+				sp->rx_ring_state |= RrOOMReported;
+			}
+			if (!force)
+				return -1;	/* Better luck next time!  */
+			/* Borrow an skb from one of next entries. */
+			for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
+				if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
+					break;
+			if (forw == sp->cur_rx)
+				return -1;
+			forw_entry = forw % RX_RING_SIZE;
+			sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
+			sp->rx_skbuff[forw_entry] = NULL;
+			rxf = sp->rx_ringp[forw_entry];
+			sp->rx_ringp[forw_entry] = NULL;
+			sp->rx_ringp[entry] = rxf;
+		}
+	} else {
+		rxf = sp->rx_ringp[entry];
+	}
+	speedo_rx_link(rtdev, entry, rxf, sp->rx_ring_dma[entry]);
+	sp->dirty_rx++;
+	sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
+	return 0;
+}
+
+static void speedo_refill_rx_buffers(struct rtnet_device *rtdev, int force)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+
+	/* Refill the RX ring. */
+	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
+			speedo_refill_rx_buf(rtdev, force) != -1);
+}
+
+static int
+speedo_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	int entry = sp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+	int alloc_ok = 1;
+
+	if (speedo_debug > 4)
+		rtdm_printk(KERN_DEBUG " In speedo_rx().\n");
+	/* If we own the next entry, it's a new packet. Send it up. */
+	while (sp->rx_ringp[entry] != NULL) {
+		int status;
+		int pkt_len;
+
+		pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
+			sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+		status = le32_to_cpu(sp->rx_ringp[entry]->status);
+		pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
+
+		if (!(status & RxComplete))
+			break;
+
+		if (--rx_work_limit < 0)
+			break;
+
+		/* Check for a rare out-of-memory case: the current buffer is
+		   the last buffer allocated in the RX ring.  --SAW */
+		if (sp->last_rxf == sp->rx_ringp[entry]) {
+			/* Postpone the packet.  It'll be reaped at an interrupt when this
+			   packet is no longer the last packet in the ring. */
+			if (speedo_debug > 2)
+				rtdm_printk(KERN_DEBUG "%s: RX packet postponed!\n",
+					   rtdev->name);
+			sp->rx_ring_state |= RrPostponed;
+			break;
+		}
+
+		if (speedo_debug > 4)
+			rtdm_printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,
+				   pkt_len);
+		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+			if (status & RxErrTooBig)
+				rtdm_printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+					   "status %8.8x!\n", rtdev->name, status);
+			else if (! (status & RxOK)) {
+				/* There was a fatal error.  This *should* be impossible. */
+				sp->stats.rx_errors++;
+				rtdm_printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+					   "status %8.8x.\n",
+					   rtdev->name, status);
+			}
+		} else {
+			struct rtskb *skb;
+
+// *** RTnet ***
+			{
+// *** RTnet ***
+				/* Pass up the already-filled skbuff. */
+				skb = sp->rx_skbuff[entry];
+				if (skb == NULL) {
+					rtdm_printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+						   rtdev->name);
+					break;
+				}
+				sp->rx_skbuff[entry] = NULL;
+				rtskb_put(skb, pkt_len);
+				sp->rx_ringp[entry] = NULL;
+				pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
+						PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+			}
+			skb->protocol = rt_eth_type_trans(skb, rtdev);
+			//rtmac
+			skb->time_stamp = *time_stamp;
+			//rtmac
+			rtnetif_rx(skb);
+			(*packets)++;
+			sp->stats.rx_packets++;
+			sp->stats.rx_bytes += pkt_len;
+		}
+		entry = (++sp->cur_rx) % RX_RING_SIZE;
+		sp->rx_ring_state &= ~RrPostponed;
+		/* Refill the recently taken buffers.
+		   Do it one-by-one to handle traffic bursts better. */
+		if (alloc_ok && speedo_refill_rx_buf(rtdev, 0) == -1)
+			alloc_ok = 0;
+	}
+
+	/* Try hard to refill the recently taken buffers. */
+	speedo_refill_rx_buffers(rtdev, 1);
+
+	sp->last_rx_time = jiffies;
+
+	return 0;
+}
+
+static int
+speedo_close(struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	int i;
+
+	netdevice_stop(rtdev);
+	rtnetif_stop_queue(rtdev);
+
+	if (speedo_debug > 1)
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+			   rtdev->name, inw(ioaddr + SCBStatus));
+
+	/* Shutdown procedure according to Intel's e100 */
+	outl(PortPartialReset, ioaddr + SCBPort);
+	speedo_write_flush(ioaddr); udelay(20);
+
+	outl(PortReset, ioaddr + SCBPort);
+	speedo_write_flush(ioaddr); udelay(20);
+
+	outw(SCBMaskAll, ioaddr + SCBCmd);
+	speedo_write_flush(ioaddr);
+
+	// *** RTnet ***
+	if ( (i=rtdm_irq_free(&sp->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(rtdev);
+
+	// *** RTnet ***
+
+	/* Print a few items for debugging. */
+	if (speedo_debug > 3)
+		speedo_show_state(rtdev);
+
+    /* Free all the skbuffs in the Rx and Tx queues. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *skb = sp->rx_skbuff[i];
+		sp->rx_skbuff[i] = 0;
+		/* Clear the Rx descriptors. */
+		if (skb) {
+			pci_unmap_single(sp->pdev,
+					 sp->rx_ring_dma[i],
+					 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
+			dev_kfree_rtskb(skb);
+		}
+	}
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		struct rtskb *skb = sp->tx_skbuff[i];
+		sp->tx_skbuff[i] = 0;
+		/* Clear the Tx descriptors. */
+		if (skb) {
+			pci_unmap_single(sp->pdev,
+					 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
+					 skb->len, PCI_DMA_TODEVICE);
+
+			// *** RTnet ***
+			dev_kfree_rtskb(skb);
+			// *** RTnet ***
+		}
+	}
+
+// *** RTnet ***
+// *** RTnet ***
+
+	pci_set_power_state(sp->pdev, 2);
+
+	return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+   This is very ugly with Intel chips -- we usually have to execute an
+   entire configuration command, plus process a multicast command.
+   This is complicated.  We must put a large configuration command and
+   an arbitrarily-sized multicast command in the transmit list.
+   To minimize the disruption -- the previous command might have already
+   loaded the link -- we convert the current command block, normally a Tx
+   command, into a no-op and link it to the new command.
+*/
+static void set_rx_mode(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	struct descriptor *last_cmd;
+	char new_rx_mode;
+	//unsigned long flags;
+	int entry/*, i*/;
+
+	if (rtdev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		new_rx_mode = 3;
+	} else if (rtdev->flags & IFF_ALLMULTI) {
+		new_rx_mode = 1;
+	} else
+		new_rx_mode = 0;
+
+	if (speedo_debug > 3)
+		printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", rtdev->name,
+				sp->rx_mode, new_rx_mode);
+
+	if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
+	    /* The Tx ring is full -- don't add anything!  Hope the mode will be
+		 * set again later. */
+		sp->rx_mode = -1;
+		return;
+	}
+
+	if (new_rx_mode != sp->rx_mode) {
+		u8 *config_cmd_data;
+
+		//spin_lock_irqsave(&sp->lock, flags); --- disabled for now as it runs before irq handler is active
+		entry = sp->cur_tx++ % TX_RING_SIZE;
+		last_cmd = sp->last_cmd;
+		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+		sp->tx_skbuff[entry] = 0;			/* Redundant. */
+		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
+		sp->tx_ring[entry].link =
+			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+		config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+		/* Construct a full CmdConfig frame. */
+		memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
+		config_cmd_data[1] = (txfifo << 4) | rxfifo;
+		config_cmd_data[4] = rxdmacount;
+		config_cmd_data[5] = txdmacount + 0x80;
+		config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
+		/* 0x80 doesn't disable FC 0x84 does.
+		   Disable Flow control since we are not ACK-ing any FC interrupts
+		   for now. --Dragan */
+		config_cmd_data[19] = 0x84;
+		config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
+		config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
+		if (sp->phy[0] & 0x8000) {			/* Use the AUI port instead. */
+			config_cmd_data[15] |= 0x80;
+			config_cmd_data[8] = 0;
+		}
+		/* Trigger the command unit resume. */
+		wait_for_cmd_done(ioaddr + SCBCmd);
+		clear_suspend(last_cmd);
+		outb(CUResume, ioaddr + SCBCmd);
+		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+			rtnetif_stop_queue(rtdev);
+			sp->tx_full = 1;
+		}
+		//spin_unlock_irqrestore(&sp->lock, flags);
+	}
+
+	if (new_rx_mode == 0) {
+		/* The simple case of 0-3 multicast list entries occurs often, and
+		   fits within one tx_ring[] entry. */
+		/*struct dev_mc_list *mclist;*/
+		u16 *setup_params/*, *eaddrs*/;
+
+		//spin_lock_irqsave(&sp->lock, flags); --- disabled for now as it runs before irq handler is active
+		entry = sp->cur_tx++ % TX_RING_SIZE;
+		last_cmd = sp->last_cmd;
+		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+		sp->tx_skbuff[entry] = 0;
+		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
+		sp->tx_ring[entry].link =
+			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+		sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+		setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+		*setup_params++ = cpu_to_le16(0); /* mc_count */
+// *** RTnet ***
+// *** RTnet ***
+
+		wait_for_cmd_done(ioaddr + SCBCmd);
+		clear_suspend(last_cmd);
+		/* Immediately trigger the command unit resume. */
+		outb(CUResume, ioaddr + SCBCmd);
+
+		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+			rtnetif_stop_queue(rtdev);
+			sp->tx_full = 1;
+		}
+		//spin_unlock_irqrestore(&sp->lock, flags);
+// *** RTnet ***
+// *** RTnet ***
+	}
+
+	sp->rx_mode = new_rx_mode;
+}
+
+
+static void eepro100_remove_one (struct pci_dev *pdev)
+{
+	// *** RTnet ***
+	struct rtnet_device *rtdev = pci_get_drvdata (pdev);
+
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+	// *** RTnet ***
+
+	release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+
+#ifndef USE_IO
+	iounmap((char *)rtdev->base_addr);
+#endif
+
+	pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
+								+ sizeof(struct speedo_stats),
+						sp->tx_ring, sp->tx_ring_dma);
+	pci_disable_device(pdev);
+
+	// *** RTnet ***
+	rtdev_free(rtdev);
+	// *** RTnet ***
+}
+
+static struct pci_device_id eepro100_pci_tbl[] = {
+	{ PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1092, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x27DC, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
+
+static struct pci_driver eepro100_driver = {
+	name:		"eepro100_rt",
+	id_table:	eepro100_pci_tbl,
+	probe:		eepro100_init_one,
+	remove:		eepro100_remove_one,
+	suspend:	NULL,
+	resume:		NULL,
+};
+
+static int __init eepro100_init_module(void)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+	if (local_debug >= 0 && speedo_debug != local_debug)
+		printk(KERN_INFO "eepro100.c: Debug level is %d.\n", local_debug);
+	if (local_debug >= 0)
+		speedo_debug = local_debug;
+#else  /* !CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG */
+	local_debug = speedo_debug; /* touch debug variable */
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG */
+
+	return pci_register_driver(&eepro100_driver);
+}
+
+static void __exit eepro100_cleanup_module(void)
+{
+	pci_unregister_driver(&eepro100_driver);
+}
+
+module_init(eepro100_init_module);
+module_exit(eepro100_cleanup_module);
+++ linux-patched/drivers/xenomai/net/drivers/smc91111.c	2022-03-21 12:58:29.618886728 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*------------------------------------------------------------------------
+ . smc91111.c
+ . This is a driver for SMSC's 91C111 single-chip Ethernet device.
+ .
+ . Copyright (C) 2001 Standard Microsystems Corporation (SMSC)
+ .       Developed by Simple Network Magic Corporation (SNMC)
+ . Copyright (C) 1996 by Erik Stahlman (ES)
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ .
+ . Information contained in this file was obtained from the LAN91C111
+ . manual from SMC.  To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ .
+ . "Features" of the SMC chip:
+ .   Integrated PHY/MAC for 10/100BaseT Operation
+ .   Supports internal and external MII
+ .   Integrated 8K packet memory
+ .   EEPROM interface for configuration
+ .
+ . Arguments:
+ .	io	= for the base address
+ .	irq	= for the IRQ
+ .	nowait	= 0 for normal wait states, 1 eliminates additional wait states
+ .
+ . author:
+ .	Erik Stahlman				( erik@vt.edu )
+ .	Daris A Nevil				( dnevil@snmc.com )
+ .	Pramod B Bhardwaj			(pramod.bhardwaj@smsc.com)
+ .
+ .
+ . Hardware multicast code from Peter Cammaert ( pc@denkart.be )
+ .
+ . Sources:
+ .    o   SMSC LAN91C111 databook (www.smsc.com)
+ .    o   smc9194.c by Erik Stahlman
+ .    o   skeleton.c by Donald Becker ( becker@cesdis.gsfc.nasa.gov )
+ .
+ . History:
+ .    09/24/01  Pramod B Bhardwaj, Added the changes for Kernel 2.4
+ .    08/21/01  Pramod B Bhardwaj Added support for RevB of LAN91C111
+ .	04/25/01  Daris A Nevil  Initial public release through SMSC
+ .	03/16/01  Daris A Nevil  Modified smc9194.c for use with LAN91C111
+
+	Ported to RTnet: March 2004, Jan Kiszka <Jan.Kiszka@web.de>
+ ----------------------------------------------------------------------------*/
+
+// Use power-down feature of the chip
+#define POWER_DOWN	1
+
+
+static const char version[] =
+	"SMSC LAN91C111 Driver (v2.0-rt), RTnet version - Jan Kiszka (jan.kiszka@web.de)\n\n";
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h> //#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+//#include <linux/kcomp.h>
+
+#ifdef DISABLED____CONFIG_SYSCTL
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#endif
+
+#include <rtnet_port.h>
+
+#include "rt_smc91111.h"
+/*------------------------------------------------------------------------
+ .
+ . Configuration options, for the experienced user to change.
+ .
+ -------------------------------------------------------------------------*/
+
+/*
+ . Do you want to use 32 bit xfers?  This should work on all chips, as
+ . the chipset is designed to accommodate them.
+*/
+#define USE_32_BIT 1
+
+
+/*
+ .the LAN91C111 can be at any of the following port addresses.  To change,
+ .for a slightly different card, you can add it to the array.  Keep in
+ .mind that the array must end in zero.
+*/
+static unsigned int smc_portlist[] __initdata =
+   { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0,
+	 0x300, 0x320, 0x340, 0x360, 0x380, 0x3A0, 0x3C0, 0x3E0, 0};
+
+
+/*
+ . Wait time for memory to be free.  This probably shouldn't be
+ . tuned that much, as waiting for this means nothing else happens
+ . in the system
+*/
+#define MEMORY_WAIT_TIME 16
+
+
+/*
+ . Timeout in us for waiting on the completion of a previous MMU command
+ . in smc_rcv().
+*/
+#define MMU_CMD_TIMEOUT 5
+
+
+/*
+ . DEBUGGING LEVELS
+ .
+ . 0 for normal operation
+ . 1 for slightly more details
+ . >2 for various levels of increasingly useless information
+ .    2 for interrupt tracking, status flags
+ .    3 for packet info
+ .    4 for complete packet dumps
+*/
+//#define SMC_DEBUG 3 // Must be defined in makefile
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2)
+#define PRINTK3(args...) rtdm_printk(args)
+#else
+#define PRINTK3(args...)
+#endif
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 1)
+#define PRINTK2(args...) rtdm_printk(args)
+#else
+#define PRINTK2(args...)
+#endif
+
+#ifdef SMC_DEBUG
+#define PRINTK(args...) rtdm_printk(args)
+#else
+#define PRINTK(args...)
+#endif
+
+
+/*------------------------------------------------------------------------
+ .
+ . The internal workings of the driver.  If you are changing anything
+ . here with the SMC stuff, you should have the datasheet and know
+ . what you are doing.
+ .
+ -------------------------------------------------------------------------*/
+#define CARDNAME "LAN91C111"
+
+// Memory sizing constant
+#define LAN91C111_MEMORY_MULTIPLIER	(1024*2)
+
+/* store this information for the driver.. */
+struct smc_local {
+
+// these are things that the kernel wants me to keep, so users
+	// can find out semi-useless statistics of how well the card is
+	// performing
+	struct net_device_stats stats;
+
+	// If I have to wait until memory is available to send
+	// a packet, I will store the skbuff here, until I get the
+	// desired memory.  Then, I'll send it out and free it.
+	struct rtskb * saved_skb;
+
+	// This keeps track of how many packets that I have
+	// sent out.  When an TX_EMPTY interrupt comes, I know
+	// that all of these have been sent.
+	int	packets_waiting;
+
+	// Set to true during the auto-negotiation sequence
+	int	autoneg_active;
+
+	// Address of our PHY port
+	word	phyaddr;
+
+	// Type of PHY
+	word	phytype;
+
+	// Last contents of PHY Register 18
+	word	lastPhy18;
+
+	// Contains the current active transmission mode
+	word	tcr_cur_mode;
+
+	// Contains the current active receive mode
+	word	rcr_cur_mode;
+
+	// Contains the current active receive/phy mode
+	word	rpc_cur_mode;
+
+	/* => Pramod, Odd Byte issue */
+	// Contains the Current ChipID
+	unsigned short ChipID;
+
+	//Contains the Current ChipRevision
+	unsigned short ChipRev;
+	/* <= Pramod, Odd Byte issue */
+
+#ifdef DISABLED____CONFIG_SYSCTL
+
+	// Root directory /proc/sys/dev
+	// Second entry must be null to terminate the table
+	ctl_table root_table[2];
+
+	// Directory for this device /proc/sys/dev/ethX
+	// Again the second entry must be zero to terminate
+	ctl_table eth_table[2];
+
+	// This is the parameters (file) table
+	ctl_table param_table[CTL_SMC_LAST_ENTRY];
+
+	// Saves the sysctl header returned by register_sysctl_table()
+	// we send this to unregister_sysctl_table()
+	struct ctl_table_header *sysctl_header;
+
+	// Parameter variables (files) go here
+	char ctl_info[1024];
+	int ctl_swfdup;
+	int ctl_ephloop;
+	int ctl_miiop;
+	int ctl_autoneg;
+	int ctl_rfduplx;
+	int ctl_rspeed;
+	int ctl_afduplx;
+	int ctl_aspeed;
+	int ctl_lnkfail;
+	int ctl_forcol;
+	int ctl_filtcar;
+	int ctl_freemem;
+	int ctl_totmem;
+	int ctl_leda;
+	int ctl_ledb;
+	int ctl_chiprev;
+#ifdef SMC_DEBUG
+	int ctl_reg_bsr;
+	int ctl_reg_tcr;
+	int ctl_reg_esr;
+	int ctl_reg_rcr;
+	int ctl_reg_ctrr;
+	int ctl_reg_mir;
+	int ctl_reg_rpcr;
+	int ctl_reg_cfgr;
+	int ctl_reg_bar;
+	int ctl_reg_iar0;
+	int ctl_reg_iar1;
+	int ctl_reg_iar2;
+	int ctl_reg_gpr;
+	int ctl_reg_ctlr;
+	int ctl_reg_mcr;
+	int ctl_reg_pnr;
+	int ctl_reg_fpr;
+	int ctl_reg_ptr;
+	int ctl_reg_dr;
+	int ctl_reg_isr;
+	int ctl_reg_mtr1;
+	int ctl_reg_mtr2;
+	int ctl_reg_mtr3;
+	int ctl_reg_mtr4;
+	int ctl_reg_miir;
+	int ctl_reg_revr;
+	int ctl_reg_ercvr;
+	int ctl_reg_extr;
+	int ctl_phy_ctrl;
+	int ctl_phy_stat;
+	int ctl_phy_id1;
+	int ctl_phy_id2;
+	int ctl_phy_adc;
+	int ctl_phy_remc;
+	int ctl_phy_cfg1;
+	int ctl_phy_cfg2;
+	int ctl_phy_int;
+	int ctl_phy_mask;
+#endif // SMC_DEBUG
+
+#endif // CONFIG_SYSCTL
+
+	rtdm_irq_t irq_handle;
+};
+
+
+/*-----------------------------------------------------------------
+ .
+ .  The driver can be entered at any of the following entry points.
+ .
+ .------------------------------------------------------------------  */
+
+/*
+ . This is called by  register_netdev().  It is responsible for
+ . checking the portlist for the SMC9000 series chipset.  If it finds
+ . one, then it will initialize the device, find the hardware information,
+ . and sets up the appropriate device parameters.
+ . NOTE: Interrupts are *OFF* when this procedure is called.
+ .
+ . NB:This shouldn't be static since it is referred to externally.
+*/
+int smc_init(struct rtnet_device *dev);
+
+/*
+ . This is called by  unregister_netdev().  It is responsible for
+ . cleaning up before the driver is finally unregistered and discarded.
+*/
+//void smc_destructor(struct net_device *dev);
+
+/*
+ . The kernel calls this function when someone wants to use the net_device,
+ . typically 'ifconfig ethX up'.
+*/
+static int smc_open(struct rtnet_device *dev);
+
+/*
+ . This is called by the kernel to send a packet out into the net.  it's
+ . responsible for doing a best-effort send, but if it's simply not possible
+ . to send it, the packet gets dropped.
+*/
+//static void smc_timeout (struct net_device *dev);*/
+/*
+ . This is called by the kernel in response to 'ifconfig ethX down'.  It
+ . is responsible for cleaning up everything that the open routine
+ . does, and maybe putting the card into a powerdown state.
+*/
+static int smc_close(struct rtnet_device *dev);
+
+/*
+ . This routine allows the proc file system to query the driver's
+ . statistics.
+*/
+static struct net_device_stats *smc_query_statistics(struct rtnet_device *rtdev);
+
+/*
+ . Finally, a call to set promiscuous mode ( for TCPDUMP and related
+ . programs ) and multicast modes.
+*/
+static void smc_set_multicast_list(struct rtnet_device *dev);
+
+/*
+ . Configures the PHY through the MII Management interface
+*/
+static void smc_phy_configure(struct rtnet_device* dev);
+
+/*---------------------------------------------------------------
+ .
+ . Interrupt level calls..
+ .
+ ----------------------------------------------------------------*/
+
+/*
+ . Handles the actual interrupt
+*/
+static int smc_interrupt(rtdm_irq_t *irq_handle);
+/*
+ . This is a separate procedure to handle the receipt of a packet, to
+ . leave the interrupt code looking slightly cleaner
+*/
+inline static void smc_rcv( struct rtnet_device *dev );
+/*
+ . This handles a TX interrupt, which is only called when an error
+ . relating to a packet is sent.
+*/
+//inline static void smc_tx( struct net_device * dev );
+
+/*
+ . This handles interrupts generated from PHY register 18
+*/
+//static void smc_phy_interrupt(struct net_device* dev);
+
+/*
+ ------------------------------------------------------------
+ .
+ . Internal routines
+ .
+ ------------------------------------------------------------
+*/
+
+/*
+ . Test if a given location contains a chip, trying to cause as
+ . little damage as possible if it's not a SMC chip.
+*/
+static int smc_probe(struct rtnet_device *dev, int ioaddr);
+
+/*
+ . A rather simple routine to print out a packet for debugging purposes.
+*/
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2)
+static void print_packet( byte *, int );
+#endif
+
+#define tx_done(dev) 1
+
+/* this is called to actually send the packet to the chip */
+static void smc_hardware_send_packet( struct rtnet_device * dev );
+
+/* Since I am not sure if I will have enough room in the chip's ram
+ . to store the packet, I call this routine, which either sends it
+ . now, or generates an interrupt when the card is ready for the
+ . packet */
+static int  smc_wait_to_send_packet( struct rtskb * skb, struct rtnet_device *dev );
+
+/* this does a soft reset on the device */
+static void smc_reset( struct rtnet_device* dev );
+
+/* Enable Interrupts, Receive, and Transmit */
+static void smc_enable( struct rtnet_device *dev );
+
+/* this puts the device in an inactive state */
+static void smc_shutdown( int ioaddr );
+
+#ifndef NO_AUTOPROBE
+/* This routine will find the IRQ of the driver if one is not
+ . specified in the input to the device.  */
+static int smc_findirq( int ioaddr );
+#endif
+
+/* Routines to Read and Write the PHY Registers across the
+   MII Management Interface
+*/
+
+static word smc_read_phy_register(int ioaddr, byte phyaddr, byte phyreg);
+static void smc_write_phy_register(int ioaddr, byte phyaddr, byte phyreg, word phydata);
+
+/* Initilizes our device's sysctl proc filesystem */
+
+#ifdef DISABLED____CONFIG_SYSCTL
+static void smc_sysctl_register(struct rtnet_device *);
+static void smc_sysctl_unregister(struct rtnet_device *);
+#endif /* CONFIG_SYSCTL */
+
+/*
+ . Function: smc_reset( struct device* dev )
+ . Purpose:
+ .	This sets the SMC91111 chip to its normal state, hopefully from whatever
+ .	mess that any other DOS driver has put it in.
+ .
+ . Maybe I should reset more registers to defaults in here?  SOFTRST  should
+ . do that for me.
+ .
+ . Method:
+ .	1.  send a SOFT RESET
+ .	2.  wait for it to finish
+ .	3.  enable autorelease mode
+ .	4.  reset the memory management unit
+ .	5.  clear all interrupts
+ .
+*/
+static void smc_reset( struct rtnet_device* dev )
+{
+	//struct smc_local *lp	= (struct smc_local *)dev->priv;
+	int	ioaddr = dev->base_addr;
+
+	PRINTK2("%s:smc_reset\n", dev->name);
+
+	/* This resets the registers mostly to defaults, but doesn't
+	   affect EEPROM.  That seems unnecessary */
+	SMC_SELECT_BANK( 0 );
+	outw( RCR_SOFTRST, ioaddr + RCR_REG );
+
+	/* Setup the Configuration Register */
+	/* This is necessary because the CONFIG_REG is not affected */
+	/* by a soft reset */
+
+	SMC_SELECT_BANK( 1 );
+	outw( CONFIG_DEFAULT, ioaddr + CONFIG_REG);
+
+	/* Setup for fast accesses if requested */
+	/* If the card/system can't handle it then there will */
+	/* be no recovery except for a hard reset or power cycle */
+
+	if (dev->dma)
+		outw( inw( ioaddr + CONFIG_REG ) | CONFIG_NO_WAIT,
+			ioaddr + CONFIG_REG );
+
+#ifdef POWER_DOWN
+	/* Release from possible power-down state */
+	/* Configuration register is not affected by Soft Reset */
+	SMC_SELECT_BANK( 1 );
+	outw( inw( ioaddr + CONFIG_REG ) | CONFIG_EPH_POWER_EN,
+		ioaddr + CONFIG_REG  );
+#endif
+
+	SMC_SELECT_BANK( 0 );
+
+	/* this should pause enough for the chip to be happy */
+	mdelay(10);
+
+	/* Disable transmit and receive functionality */
+	outw( RCR_CLEAR, ioaddr + RCR_REG );
+	outw( TCR_CLEAR, ioaddr + TCR_REG );
+
+	/* set the control register to automatically
+	   release successfully transmitted packets, to make the best
+	   use out of our limited memory */
+	SMC_SELECT_BANK( 1 );
+	outw( inw( ioaddr + CTL_REG ) | CTL_AUTO_RELEASE , ioaddr + CTL_REG );
+
+	/* Reset the MMU */
+	SMC_SELECT_BANK( 2 );
+	outw( MC_RESET, ioaddr + MMU_CMD_REG );
+
+	/* Note:  It doesn't seem that waiting for the MMU busy is needed here,
+	   but this is a place where future chipsets _COULD_ break.  Be wary
+	   of issuing another MMU command right after this */
+
+	/* Disable all interrupts */
+	outb( 0, ioaddr + IM_REG );
+}
+
+/*
+ . Function: smc_enable
+ . Purpose: let the chip talk to the outside work
+ . Method:
+ .	1.  Enable the transmitter
+ .	2.  Enable the receiver
+ .	3.  Enable interrupts
+*/
+static void smc_enable( struct rtnet_device *dev )
+{
+	unsigned short ioaddr	= dev->base_addr;
+	struct smc_local *lp	= (struct smc_local *)dev->priv;
+
+	PRINTK2("%s:smc_enable\n", dev->name);
+
+	SMC_SELECT_BANK( 0 );
+	/* see the header file for options in TCR/RCR DEFAULT*/
+	outw( lp->tcr_cur_mode, ioaddr + TCR_REG );
+	outw( lp->rcr_cur_mode, ioaddr + RCR_REG );
+
+	/* now, enable interrupts */
+	SMC_SELECT_BANK( 2 );
+	outb( SMC_INTERRUPT_MASK, ioaddr + IM_REG );
+}
+
+/*
+ . Function: smc_shutdown
+ . Purpose:  closes down the SMC91xxx chip.
+ . Method:
+ .	1. zero the interrupt mask
+ .	2. clear the enable receive flag
+ .	3. clear the enable xmit flags
+ .
+ . TODO:
+ .   (1) maybe utilize power down mode.
+ .	Why not yet?  Because while the chip will go into power down mode,
+ .	the manual says that it will wake up in response to any I/O requests
+ .	in the register space.   Empirical results do not show this working.
+*/
+static void smc_shutdown( int ioaddr )
+{
+	PRINTK2("CARDNAME:smc_shutdown\n");
+
+	/* no more interrupts for me */
+	SMC_SELECT_BANK( 2 );
+	outb( 0, ioaddr + IM_REG );
+
+	/* and tell the card to stay away from that nasty outside world */
+	SMC_SELECT_BANK( 0 );
+	outb( RCR_CLEAR, ioaddr + RCR_REG );
+	outb( TCR_CLEAR, ioaddr + TCR_REG );
+
+#ifdef POWER_DOWN
+	/* finally, shut the chip down */
+	SMC_SELECT_BANK( 1 );
+	outw( inw( ioaddr + CONFIG_REG ) & ~CONFIG_EPH_POWER_EN,
+		ioaddr + CONFIG_REG  );
+#endif
+}
+
+
+/*
+ . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct device * )
+ . Purpose:
+ .    Attempt to allocate memory for a packet, if chip-memory is not
+ .    available, then tell the card to generate an interrupt when it
+ .    is available.
+ .
+ . Algorithm:
+ .
+ . o	if the saved_skb is not currently null, then drop this packet
+ .	on the floor.  This should never happen, because of TBUSY.
+ . o	if the saved_skb is null, then replace it with the current packet,
+ . o	See if I can sending it now.
+ . o	(NO): Enable interrupts and let the interrupt handler deal with it.
+ . o	(YES):Send it now.
+*/
+static int smc_wait_to_send_packet( struct rtskb * skb, struct rtnet_device * dev )
+{
+	struct smc_local *lp	= (struct smc_local *)dev->priv;
+	unsigned short ioaddr	= dev->base_addr;
+	word			length;
+	unsigned short		numPages;
+	word			time_out;
+	word			status;
+
+	PRINTK3("%s:smc_wait_to_send_packet\n", dev->name);
+
+	rtnetif_stop_queue(dev);
+
+	if ( lp->saved_skb) {
+		/* THIS SHOULD NEVER HAPPEN. */
+		lp->stats.tx_aborted_errors++;
+		rtdm_printk("%s: Bad Craziness - sent packet while busy.\n",
+			dev->name);
+		return 1;
+	}
+	lp->saved_skb = skb;
+
+	length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+
+	/*
+	** The MMU wants the number of pages to be the number of 256 bytes
+	** 'pages', minus 1 ( since a packet can't ever have 0 pages :) )
+	**
+	** The 91C111 ignores the size bits, but the code is left intact
+	** for backwards and future compatibility.
+	**
+	** Pkt size for allocating is data length +6 (for additional status
+	** words, length and ctl!)
+	**
+	** If odd size then last byte is included in this header.
+	*/
+	numPages =   ((length & 0xfffe) + 6);
+	numPages >>= 8; // Divide by 256
+
+	if (numPages > 7 ) {
+		rtdm_printk("%s: Far too big packet error. \n", dev->name);
+		/* freeing the packet is a good thing here... but should
+		 . any packets of this size get down here?   */
+		kfree_rtskb(skb);
+		lp->saved_skb = NULL;
+		/* this IS an error, but, i don't want the skb saved */
+		rtnetif_wake_queue(dev);
+		return 0;
+	}
+	/* either way, a packet is waiting now */
+	lp->packets_waiting++;
+
+	/* now, try to allocate the memory */
+	SMC_SELECT_BANK( 2 );
+	outw( MC_ALLOC | numPages, ioaddr + MMU_CMD_REG );
+	/*
+	. Performance Hack
+	.
+	. wait a short amount of time.. if I can send a packet now, I send
+	. it now.  Otherwise, I enable an interrupt and wait for one to be
+	. available.
+	.
+	. I could have handled this a slightly different way, by checking to
+	. see if any memory was available in the FREE MEMORY register.  However,
+	. either way, I need to generate an allocation, and the allocation works
+	. no matter what, so I saw no point in checking free memory.
+	*/
+	time_out = MEMORY_WAIT_TIME;
+	do {
+		status = inb( ioaddr + INT_REG );
+		if ( status & IM_ALLOC_INT ) {
+			/* acknowledge the interrupt */
+			outb( IM_ALLOC_INT, ioaddr + INT_REG );
+			break;
+		}
+	} while ( -- time_out );
+
+	if ( !time_out ) {
+		kfree_rtskb(skb);
+		lp->saved_skb = NULL;
+		rtnetif_wake_queue(dev);
+
+		rtdm_printk("%s: ERROR: unable to allocate card memory for "
+			"packet transmission.\n", dev->name);
+		return 0;
+	}
+	/* or YES! I can send the packet now.. */
+	smc_hardware_send_packet(dev);
+	rtnetif_wake_queue(dev);
+	return 0;
+}
+
+/*
+ . Function:  smc_hardware_send_packet(struct device * )
+ . Purpose:
+ .	This sends the actual packet to the SMC9xxx chip.
+ .
+ . Algorithm:
+ .	First, see if a saved_skb is available.
+ .		( this should NOT be called if there is no 'saved_skb'
+ .	Now, find the packet number that the chip allocated
+ .	Point the data pointers at it in memory
+ .	Set the length word in the chip's memory
+ .	Dump the packet to chip memory
+ .	Check if a last byte is needed ( odd length packet )
+ .		if so, set the control flag right
+ .	Tell the card to send it
+ .	Enable the transmit interrupt, so I know if it failed
+ .	Free the kernel data if I actually sent it.
+*/
+static void smc_hardware_send_packet( struct rtnet_device * dev )
+{
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+	byte			packet_no;
+	struct rtskb *	skb = lp->saved_skb;
+	word			length;
+	unsigned short		ioaddr;
+	void			* buf;
+	rtdm_lockctx_t		context;
+
+	PRINTK3("%s:smc_hardware_send_packet\n", dev->name);
+
+	ioaddr = dev->base_addr;
+
+	if ( !skb ) {
+		PRINTK("%s: In XMIT with no packet to send \n", dev->name);
+		return;
+	}
+	length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+	buf = skb->data;
+
+	/* If I get here, I _know_ there is a packet slot waiting for me */
+	packet_no = inb( ioaddr + AR_REG );
+	if ( packet_no & AR_FAILED ) {
+		/* or isn't there?  BAD CHIP! */
+		rtdm_printk(KERN_DEBUG "%s: Memory allocation failed. \n",
+			dev->name);
+		kfree_rtskb(skb);
+		lp->saved_skb = NULL;
+		rtnetif_wake_queue(dev);
+		return;
+	}
+
+	/* we have a packet address, so tell the card to use it */
+	outb( packet_no, ioaddr + PN_REG );
+
+	/* point to the beginning of the packet */
+	outw( PTR_AUTOINC , ioaddr + PTR_REG );
+
+	PRINTK3("%s: Trying to xmit packet of length %x\n",
+		dev->name, length);
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2)
+	rtdm_printk("Transmitting Packet\n");
+	print_packet( buf, length );
+#endif
+
+	/* send the packet length ( +6 for status, length and ctl byte )
+	   and the status word ( set to zeros ) */
+#ifdef USE_32_BIT
+	outl(  (length +6 ) << 16 , ioaddr + DATA_REG );
+#else
+	outw( 0, ioaddr + DATA_REG );
+	/* send the packet length ( +6 for status words, length, and ctl*/
+	outb( (length+6) & 0xFF,ioaddr + DATA_REG );
+	outb( (length+6) >> 8 , ioaddr + DATA_REG );
+#endif
+
+	/* send the actual data
+	 . I _think_ it's faster to send the longs first, and then
+	 . mop up by sending the last word.  It depends heavily
+	 . on alignment, at least on the 486.  Maybe it would be
+	 . a good idea to check which is optimal?  But that could take
+	 . almost as much time as is saved?
+	*/
+#ifdef USE_32_BIT
+	outsl(ioaddr + DATA_REG, buf,  length >> 2 );
+	if ( length & 0x2  )
+		outw(*((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_REG);
+#else
+	outsw(ioaddr + DATA_REG , buf, (length ) >> 1);
+#endif // USE_32_BIT
+
+	/* Send the last byte, if there is one.   */
+	if ( (length & 1) == 0 ) {
+		outw( 0, ioaddr + DATA_REG );
+	} else {
+		outb( ((char *)buf)[length -1 ], ioaddr + DATA_REG );
+		outb( 0x20, ioaddr + DATA_REG); // Set odd bit in CONTROL BYTE
+	}
+
+	rtdm_lock_irqsave(context);
+
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp) {
+		nanosecs_abs_t xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+		/* point to the patch address */
+		outw(PTR_AUTOINC |
+			(4 + (char *)skb->xmit_stamp - (char *)skb->data),
+			ioaddr + PTR_REG);
+		/* we don't check alignments, we just write bytes */
+		outsb(ioaddr + DATA_REG, (char *)&xmit_stamp,
+			sizeof(xmit_stamp));
+	}
+
+	/* enable the interrupts */
+	SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) );
+
+	/* and let the chipset deal with it */
+	outw( MC_ENQUEUE , ioaddr + MMU_CMD_REG );
+
+	rtdm_lock_irqrestore(context);
+
+	PRINTK2("%s: Sent packet of length %d \n", dev->name, length);
+
+	lp->saved_skb = NULL;
+	kfree_rtskb(skb);
+
+//	dev->trans_start = jiffies;
+
+	/* we can send another packet */
+	rtnetif_wake_queue(dev);
+
+
+	return;
+}
+
+/*-------------------------------------------------------------------------
+ |
+ | smc_init( struct device * dev )
+ |   Input parameters:
+ |	dev->base_addr == 0, try to find all possible locations
+ |	dev->base_addr == 1, return failure code
+ |	dev->base_addr == 2, always allocate space,  and return success
+ |	dev->base_addr == <anything else>   this is the address to check
+ |
+ |   Output:
+ |	0 --> there is a device
+ |	anything else, error
+ |
+ ---------------------------------------------------------------------------
+*/
+int __init smc_init(struct rtnet_device *dev)
+{
+	int i;
+	int base_addr = dev ? dev->base_addr : 0;
+
+	PRINTK2("CARDNAME:smc_init\n");
+
+	/*  try a specific location */
+	if (base_addr > 0x1ff)
+		return smc_probe(dev, base_addr);
+	else if ( 0 != base_addr )
+			return -ENXIO;
+
+	/* check every ethernet address */
+	for (i = 0; smc_portlist[i]; i++)
+		if ( smc_probe(dev,smc_portlist[i]) ==0)
+			return 0;
+
+	/* couldn't find anything */
+	return -ENODEV;
+}
+
+
+#ifndef NO_AUTOPROBE
+/*----------------------------------------------------------------------
+ . smc_findirq
+ .
+ . This routine has a simple purpose -- make the SMC chip generate an
+ . interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ ------------------------------------------------------------------------
+*/
+int __init smc_findirq( int ioaddr )
+{
+	int	timeout = 20;
+	unsigned long cookie;
+
+	PRINTK2("CARDNAME:smc_findirq\n");
+
+	/* I have to do a STI() here, because this is called from
+	   a routine that does an CLI during this process, making it
+	   rather difficult to get interrupts for auto detection */
+	local_irq_enable();
+
+	cookie = probe_irq_on();
+
+	/*
+	 * What I try to do here is trigger an ALLOC_INT. This is done
+	 * by allocating a small chunk of memory, which will give an interrupt
+	 * when done.
+	 */
+
+
+	SMC_SELECT_BANK(2);
+	/* enable ALLOCation interrupts ONLY */
+	outb( IM_ALLOC_INT, ioaddr + IM_REG );
+
+	/*
+	 . Allocate 512 bytes of memory.  Note that the chip was just
+	 . reset so all the memory is available
+	*/
+	outw( MC_ALLOC | 1, ioaddr + MMU_CMD_REG );
+
+	/*
+	 . Wait until positive that the interrupt has been generated
+	*/
+	while ( timeout ) {
+		byte	int_status;
+
+		int_status = inb( ioaddr + INT_REG );
+
+		if ( int_status & IM_ALLOC_INT )
+			break;		/* got the interrupt */
+		timeout--;
+	}
+
+	/* there is really nothing that I can do here if timeout fails,
+	   as autoirq_report will return a 0 anyway, which is what I
+	   want in this case.   Plus, the clean up is needed in both
+	   cases.  */
+
+	/* DELAY HERE!
+	   On a fast machine, the status might change before the interrupt
+	   is given to the processor.  This means that the interrupt was
+	   never detected, and autoirq_report fails to report anything.
+	   This should fix autoirq_* problems.
+	*/
+	mdelay(10);
+
+	/* and disable all interrupts again */
+	outb( 0, ioaddr + IM_REG );
+
+	/* clear hardware interrupts again, because that's how it
+	   was when I was called... */
+	local_irq_disable();
+
+	/* and return what I found */
+	return probe_irq_off(cookie);
+}
+#endif
+
+/*----------------------------------------------------------------------
+ . Function: smc_probe( int ioaddr )
+ .
+ . Purpose:
+ .	Tests to see if a given ioaddr points to an SMC91111 chip.
+ .	Returns a 0 on success
+ .
+ . Algorithm:
+ .	(1) see if the high byte of BANK_SELECT is 0x33
+ .	(2) compare the ioaddr with the base register's address
+ .	(3) see if I recognize the chip ID in the appropriate register
+ .
+ .---------------------------------------------------------------------
+ */
+/*---------------------------------------------------------------
+ . Here I do typical initialization tasks.
+ .
+ . o  Initialize the structure if needed
+ . o  print out my vanity message if not done so already
+ . o  print out what type of hardware is detected
+ . o  print out the ethernet address
+ . o  find the IRQ
+ . o  set up my private data
+ . o  configure the dev structure with my subroutines
+ . o  actually GRAB the irq.
+ . o  GRAB the region
+ .-----------------------------------------------------------------*/
+
+static int __init smc_probe(struct rtnet_device *dev, int ioaddr )
+{
+	int i, memory, retval;
+	static unsigned version_printed = 0;
+	unsigned int	bank;
+
+	const char *version_string;
+
+	/*registers */
+	word	revision_register;
+	word	base_address_register;
+	word	memory_info_register;
+	/*=> Pramod */
+	struct smc_local *lp;
+	/*<= Pramod */
+
+	PRINTK2("CARDNAME:smc_probe\n");
+
+	/* Grab the region so that no one else tries to probe our ioports. */
+	if (!request_region(ioaddr, SMC_IO_EXTENT, dev->name)) return -EBUSY;
+
+	/* First, see if the high byte is 0x33 */
+	bank = inw( ioaddr + BANK_SELECT );
+	if ( (bank & 0xFF00) != 0x3300 ) return -ENODEV;
+
+	/* The above MIGHT indicate a device, but I need to write to further test this.  */
+	outw( 0x0, ioaddr + BANK_SELECT );
+	bank = inw( ioaddr + BANK_SELECT );
+	if ( (bank & 0xFF00 ) != 0x3300 )
+	{
+		retval = -ENODEV;
+		goto err_out;
+	}
+
+	/* well, we've already written once, so hopefully another time won't
+	   hurt.  This time, I need to switch the bank register to bank 1,
+	   so I can access the base address register */
+	SMC_SELECT_BANK(1);
+	base_address_register = inw( ioaddr + BASE_REG );
+	if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) )
+	{
+		printk("CARDNAME: IOADDR %x doesn't match configuration (%x)."
+			"Probably not a SMC chip\n",
+			ioaddr, base_address_register >> 3 & 0x3E0 );
+		/* well, the base address register didn't match.  Must not have
+		   been a SMC chip after all. */
+		retval = -ENODEV;
+		goto err_out;
+	}
+
+	/*  check if the revision register is something that I recognize.
+	    These might need to be added to later, as future revisions
+	    could be added.  */
+	SMC_SELECT_BANK(3);
+	revision_register  = inw( ioaddr + REV_REG );
+	if ( !chip_ids[ ( revision_register  >> 4 ) & 0xF  ] )
+	{
+		/* I don't recognize this chip, so... */
+		printk("CARDNAME: IO %x: Unrecognized revision register:"
+			" %x, Contact author. \n",
+			ioaddr, revision_register );
+		retval =  -ENODEV;
+		goto err_out;
+	}
+
+	/* at this point I'll assume that the chip is an SMC9xxx.
+	   It might be prudent to check a listing of MAC addresses
+	   against the hardware address, or do some other tests. */
+
+	if (version_printed++ == 0)
+		printk("%s", version);
+
+	/* fill in some of the fields */
+	dev->base_addr = ioaddr;
+
+	/*
+	 . Get the MAC address ( bank 1, regs 4 - 9 )
+	*/
+	SMC_SELECT_BANK( 1 );
+	for ( i = 0; i < 6; i += 2 )
+	{
+		word	address;
+
+		address = inw( ioaddr + ADDR0_REG + i  );
+		dev->dev_addr[ i + 1] = address >> 8;
+		dev->dev_addr[ i ] = address & 0xFF;
+	}
+
+	/* get the memory information */
+
+	SMC_SELECT_BANK( 0 );
+	memory_info_register = inw( ioaddr + MIR_REG );
+	memory = memory_info_register & (word)0x00ff;
+	memory *= LAN91C111_MEMORY_MULTIPLIER;
+
+	/*
+	 Now, I want to find out more about the chip.  This is sort of
+	 redundant, but it's cleaner to have it in both, rather than having
+	 one VERY long probe procedure.
+	*/
+	SMC_SELECT_BANK(3);
+	revision_register  = inw( ioaddr + REV_REG );
+	version_string = chip_ids[ ( revision_register  >> 4 ) & 0xF  ];
+	if ( !version_string )
+	{
+		/* I shouldn't get here because this call was done before.... */
+		retval =  -ENODEV;
+		goto err_out;
+	}
+
+	/* now, reset the chip, and put it into a known state */
+	smc_reset( dev );
+
+	/*
+	 . If dev->irq is 0, then the device has to be banged on to see
+	 . what the IRQ is.
+	 .
+	 . This banging doesn't always detect the IRQ, for unknown reasons.
+	 . a workaround is to reset the chip and try again.
+	 .
+	 . Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+	 . be what is requested on the command line.   I don't do that, mostly
+	 . because the card that I have uses a non-standard method of accessing
+	 . the IRQs, and because this _should_ work in most configurations.
+	 .
+	 . Specifying an IRQ is done with the assumption that the user knows
+	 . what (s)he is doing.  No checking is done!!!!
+	 .
+	*/
+	if ( dev->irq < 2 ) {
+		int	trials;
+
+		trials = 3;
+		while ( trials-- ) {
+			dev->irq = smc_findirq( ioaddr );
+			if ( dev->irq )
+				break;
+			/* kick the card and try again */
+			smc_reset( dev );
+		}
+	}
+	if (dev->irq == 0 ) {
+		printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+			dev->name);
+		retval =  -ENODEV;
+		goto err_out;
+	}
+
+	if (dev->irq == 2) {
+		/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+		 * or don't know which one to set.
+		 */
+		dev->irq = 9;
+	}
+
+	/* now, print out the card info, in a short format.. */
+
+	printk("%s: %s(rev:%d) at %#3x IRQ:%d MEMSIZE:%db NOWAIT:%d ",
+		dev->name,
+		version_string, revision_register & 0xF, ioaddr, dev->irq,
+		memory, dev->dma);
+	/*
+	 . Print the Ethernet address
+	*/
+	printk("ADDR: ");
+	for (i = 0; i < 5; i++)
+		printk("%2.2x:", dev->dev_addr[i] );
+	printk("%2.2x \n", dev->dev_addr[5] );
+
+
+	/* Initialize the private structure. */
+	/*if (dev->priv == NULL) {
+		dev->priv = kmalloc(sizeof(struct smc_local), GFP_KERNEL);
+		if (dev->priv == NULL) {
+			retval = -ENOMEM;
+			goto err_out;
+		}
+	}*/
+	/* set the private data to zero by default */
+	memset(dev->priv, 0, sizeof(struct smc_local));
+
+	/* Fill in the fields of the device structure with ethernet values. */
+//	ether_setup(dev);
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	/* Grab the IRQ */
+    retval = rtdm_irq_request(&((struct smc_local *)dev->priv)->irq_handle,
+			      dev->irq, &smc_interrupt, 0,
+			      "rt_smx91111", dev);
+    if (retval) {
+	  printk("%s: unable to get IRQ %d (irqval=%d).\n",
+		dev->name, dev->irq, retval);
+		//kfree (dev->priv);
+		//dev->priv = NULL;
+		goto err_out;
+	}
+
+	dev->open			= smc_open;
+	dev->stop			= smc_close;
+	dev->hard_start_xmit	= smc_wait_to_send_packet;
+	dev->get_stats			= smc_query_statistics;
+//	dev->tx_timeout			= smc_timeout;
+#ifdef	HAVE_MULTICAST
+//	dev->set_multicast_list		= &smc_set_multicast_list;
+#endif
+
+	/* => Store the ChipRevision and ChipID, to be used in resolving the Odd-Byte issue in RevB of LAN91C111; Pramod */
+	SMC_SELECT_BANK(3);
+	revision_register  = inw( ioaddr + REV_REG );
+	lp = (struct smc_local *)dev->priv;
+	lp->ChipID = (revision_register >> 4) & 0xF;
+	lp->ChipRev = revision_register & 0xF;
+
+	return 0;
+
+err_out:
+	release_region (ioaddr, SMC_IO_EXTENT);
+	return retval;
+}
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2)
+static void print_packet( byte * buf, int length )
+{
+	int i;
+	int remainder;
+	int lines;
+
+	rtdm_printk("Packet of length %d \n", length );
+
+#if SMC_DEBUG > 3
+	lines = length / 16;
+	remainder = length % 16;
+
+	for ( i = 0; i < lines ; i ++ ) {
+		int cur;
+
+		for ( cur = 0; cur < 8; cur ++ ) {
+			byte a, b;
+
+			a = *(buf ++ );
+			b = *(buf ++ );
+			rtdm_printk("%02x%02x ", a, b );
+		}
+		rtdm_printk("\n");
+	}
+	for ( i = 0; i < remainder/2 ; i++ ) {
+		byte a, b;
+
+		a = *(buf ++ );
+		b = *(buf ++ );
+		rtdm_printk("%02x%02x ", a, b );
+	}
+	rtdm_printk("\n");
+#endif
+}
+#endif
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc ..
+ *
+ */
+static int smc_open(struct rtnet_device *dev)
+{
+	struct smc_local *lp	= (struct smc_local *)dev->priv;
+	int	ioaddr = dev->base_addr;
+	int	i;	/* used to set hw ethernet address */
+
+	PRINTK2("%s:smc_open\n", dev->name);
+
+	/* clear out all the junk that was put here before... */
+	memset(dev->priv, 0, (size_t)&((struct smc_local *)0)->irq_handle);
+
+	rtnetif_start_queue(dev);
+
+	// Setup the default Register Modes
+	lp->tcr_cur_mode = TCR_DEFAULT;
+	lp->rcr_cur_mode = RCR_DEFAULT;
+	lp->rpc_cur_mode = RPC_DEFAULT;
+
+#ifdef DISABLED____CONFIG_SYSCTL
+	// Set default parameters (files)
+	lp->ctl_swfdup = 0;
+	lp->ctl_ephloop = 0;
+	lp->ctl_miiop = 0;
+	lp->ctl_autoneg = 1;
+	lp->ctl_rfduplx = 1;
+	lp->ctl_rspeed = 100;
+	lp->ctl_afduplx = 1;
+	lp->ctl_aspeed = 100;
+	lp->ctl_lnkfail = 1;
+	lp->ctl_forcol = 0;
+	lp->ctl_filtcar = 0;
+#endif /* CONFIG_SYSCTL */
+
+	/* reset the hardware */
+
+	smc_reset( dev );
+	smc_enable( dev );
+
+	/* Configure the PHY */
+	smc_phy_configure(dev);
+
+	smc_set_multicast_list(dev);
+
+	/*
+		According to Becker, I have to set the hardware address
+		at this point, because the (l)user can set it with an
+		ioctl.  Easily done...
+	*/
+	SMC_SELECT_BANK( 1 );
+	for ( i = 0; i < 6; i += 2 ) {
+		word	address;
+
+		address = dev->dev_addr[ i + 1 ] << 8 ;
+		address  |= dev->dev_addr[ i ];
+		outw( address, ioaddr + ADDR0_REG + i );
+	}
+
+#ifdef DISABLED____CONFIG_SYSCTL
+	smc_sysctl_register(dev);
+#endif /* CONFIG_SYSCTL */
+
+	rtnetif_start_queue(dev);
+	return 0;
+}
+
+/*-------------------------------------------------------------
+ .
+ . smc_rcv -  receive a packet from the card
+ .
+ . There is ( at least ) a packet waiting to be read from
+ . chip-memory.
+ .
+ . o Read the status
+ . o If an error, record it
+ . o otherwise, read in the packet
+ --------------------------------------------------------------
+*/
+static inline void smc_rcv(struct rtnet_device *dev)
+{
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+	int	ioaddr = dev->base_addr;
+	int	packet_number;
+	word	status;
+	word	packet_length;
+	nanosecs_abs_t	time_stamp = rtdm_clock_read();
+	int		timeout;
+
+	PRINTK3("%s:smc_rcv\n", dev->name);
+
+	/* assume bank 2 */
+
+	packet_number = inw( ioaddr + RXFIFO_REG );
+
+	if ( packet_number & RXFIFO_REMPTY ) {
+
+		/* we got called , but nothing was on the FIFO */
+		PRINTK("%s: WARNING: smc_rcv with nothing on FIFO. \n",
+			dev->name);
+		/* don't need to restore anything */
+		return;
+	}
+
+	/*  start reading from the start of the packet */
+	outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + PTR_REG );
+	inw( ioaddr + MMU_CMD_REG ); /* min delay to avoid errors... */
+
+	/* First two words are status and packet_length */
+	status		= inw( ioaddr + DATA_REG );
+	packet_length	= inw( ioaddr + DATA_REG );
+
+	packet_length &= 0x07ff;  /* mask off top bits */
+
+	PRINTK2("RCV: STATUS %4x LENGTH %4x\n", status, packet_length );
+
+	if ( !(status & RS_ERRORS ) ){
+		/* do stuff to make a new packet */
+		struct rtskb  * skb;
+		void		* data;
+
+		/* set multicast stats */
+		if ( status & RS_MULTICAST )
+			lp->stats.multicast++;
+
+		// Allocate enough memory for entire receive frame, to be safe
+		skb = rtnetdev_alloc_rtskb(dev, packet_length);
+
+		/* Adjust for having already read the first two words */
+		packet_length -= 4;
+
+		if ( skb == NULL ) {
+			rtdm_printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
+				dev->name);
+			lp->stats.rx_dropped++;
+			goto done;
+		}
+
+		/*
+		 ! This should work without alignment, but it could be
+		 ! in the worse case
+		*/
+		/* TODO: Should I use 32bit alignment here ? */
+		rtskb_reserve( skb, 2 );   /* 16 bit alignment */
+
+		/* =>
+    ODD-BYTE ISSUE : The odd byte problem has been fixed in the LAN91C111 Rev B.
+		So we check if the Chip Revision, stored in smsc_local->ChipRev, is = 1.
+		If so then we increment the packet length only if RS_ODDFRAME is set.
+		If the Chip's revision is equal to 0, then we blindly increment the packet length
+		by 1, thus always assuming that the packet is odd length, leaving the higher layer
+		to decide the actual length.
+			-- Pramod
+		<= */
+		if ((9 == lp->ChipID) && (1 == lp->ChipRev))
+		{
+			if (status & RS_ODDFRAME)
+				data = rtskb_put( skb, packet_length + 1 );
+			else
+				data = rtskb_put( skb, packet_length);
+
+		}
+		else
+		{
+			// set odd length for bug in LAN91C111, REV A
+			// which never sets RS_ODDFRAME
+			data = rtskb_put( skb, packet_length + 1 );
+		}
+
+#ifdef USE_32_BIT
+		PRINTK3(" Reading %d dwords (and %d bytes) \n",
+			packet_length >> 2, packet_length & 3 );
+		/* QUESTION:  Like in the TX routine, do I want
+		   to send the DWORDs or the bytes first, or some
+		   mixture.  A mixture might improve already slow PIO
+		   performance  */
+		insl(ioaddr + DATA_REG , data, packet_length >> 2 );
+		/* read the left over bytes */
+		insb( ioaddr + DATA_REG, data + (packet_length & 0xFFFFFC),
+			packet_length & 0x3  );
+#else
+		PRINTK3(" Reading %d words and %d byte(s) \n",
+			(packet_length >> 1 ), packet_length & 1 );
+		insw(ioaddr + DATA_REG , data, packet_length >> 1);
+
+#endif // USE_32_BIT
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2)
+		rtdm_printk("Receiving Packet\n");
+		print_packet( data, packet_length );
+#endif
+
+		skb->protocol = rt_eth_type_trans(skb, dev );
+		skb->time_stamp = time_stamp;
+		rtnetif_rx(skb);
+		lp->stats.rx_packets++;
+	} else {
+		/* error ... */
+		lp->stats.rx_errors++;
+
+		if ( status & RS_ALGNERR )  lp->stats.rx_frame_errors++;
+		if ( status & (RS_TOOSHORT | RS_TOOLONG ) )
+			lp->stats.rx_length_errors++;
+		if ( status & RS_BADCRC)	lp->stats.rx_crc_errors++;
+	}
+
+	timeout = MMU_CMD_TIMEOUT;
+	while ( inw( ioaddr + MMU_CMD_REG ) & MC_BUSY ) {
+		rtdm_task_busy_sleep(1000); // Wait until not busy
+		if (--timeout == 0) {
+			rtdm_printk("%s: ERROR: timeout while waiting on MMU.\n",
+				dev->name);
+			break;
+		}
+	}
+done:
+	/*  error or good, tell the card to get rid of this packet */
+	outw( MC_RELEASE, ioaddr + MMU_CMD_REG );
+
+	return;
+}
+
+/*--------------------------------------------------------------------
+ .
+ . This is the main routine of the driver, to handle the net_device when
+ . it needs some attention.
+ .
+ . So:
+ .   first, save state of the chipset
+ .   branch off into routines to handle each case, and acknowledge
+ .	    each to the interrupt register
+ .   and finally restore state.
+ .
+ ---------------------------------------------------------------------*/
+static int smc_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int ioaddr		= dev->base_addr;
+	struct smc_local *lp	= (struct smc_local *)dev->priv;
+
+	byte	status;
+	word	card_stats;
+	byte	mask;
+	int	timeout;
+	/* state registers */
+	word	saved_bank;
+	word	saved_pointer;
+
+	unsigned int old_packet_cnt = lp->stats.rx_packets;
+
+
+
+	PRINTK3("%s: SMC interrupt started \n", dev->name);
+
+/*	if (dev == NULL) {
+		rtdm_printk(KERN_WARNING "%s: irq %d for unknown device.\n",
+			dev->name, irq);
+		return;
+	}*/
+
+/* will Linux let this happen ??  If not, this costs some speed
+	if ( dev->interrupt ) {
+		printk(KERN_WARNING "%s: interrupt inside interrupt.\n",
+			dev->name);
+		return;
+	}
+
+	dev->interrupt = 1; */
+
+	saved_bank = inw( ioaddr + BANK_SELECT );
+
+	SMC_SELECT_BANK(2);
+	saved_pointer = inw( ioaddr + PTR_REG );
+
+	/* read the interrupt status register */
+	mask = inb( ioaddr + IM_REG );
+
+	/* disable all interrupts */
+	outb( 0, ioaddr + IM_REG );
+
+	/*
+	 * The packet reception will take some time (up to several hundred us).
+	 * Re-enable other irqs now so that no critical deadline will be missed.
+	 */
+	hard_local_irq_enable();
+
+	/* set a timeout value, so I don't stay here forever */
+	timeout = 4;
+
+	PRINTK2(KERN_WARNING "%s: MASK IS %x \n", dev->name, mask);
+	do {
+		/* read the status flag, and mask it */
+		status = inb( ioaddr + INT_REG ) & mask;
+		if (!status )
+			break;
+
+		PRINTK3(KERN_WARNING "%s: Handling interrupt status %x \n",
+			dev->name, status);
+
+		if (status & IM_RCV_INT) {
+			/* Got a packet(s). */
+			PRINTK2(KERN_WARNING
+				"%s: Receive Interrupt\n", dev->name);
+			smc_rcv(dev);
+		} else if (status & IM_TX_INT ) {
+			rtdm_printk(KERN_ERR "%s: TX ERROR!\n", dev->name);
+			//smc_tx(dev);
+			// Acknowledge the interrupt
+			outb(IM_TX_INT, ioaddr + INT_REG );
+		} else if (status & IM_TX_EMPTY_INT ) {
+			/* update stats */
+			SMC_SELECT_BANK( 0 );
+			card_stats = inw( ioaddr + COUNTER_REG );
+			/* single collisions */
+			lp->stats.collisions += card_stats & 0xF;
+			card_stats >>= 4;
+			/* multiple collisions */
+			lp->stats.collisions += card_stats & 0xF;
+
+			/* these are for when linux supports these statistics */
+			SMC_SELECT_BANK( 2 );
+			PRINTK2(KERN_WARNING "%s: TX_BUFFER_EMPTY handled\n",
+				dev->name);
+			// Acknowledge the interrupt
+			outb( IM_TX_EMPTY_INT, ioaddr + INT_REG );
+			mask &= ~IM_TX_EMPTY_INT;
+			lp->stats.tx_packets += lp->packets_waiting;
+			lp->packets_waiting = 0;
+
+		} else if (status & IM_ALLOC_INT ) {
+			PRINTK2(KERN_DEBUG "%s: Allocation interrupt \n",
+				dev->name);
+			/* clear this interrupt so it doesn't happen again */
+			mask &= ~IM_ALLOC_INT;
+
+		} else if (status & IM_RX_OVRN_INT ) {
+			lp->stats.rx_errors++;
+			lp->stats.rx_fifo_errors++;
+			// Acknowledge the interrupt
+			outb( IM_RX_OVRN_INT, ioaddr + INT_REG );
+		} else if (status & IM_EPH_INT ) {
+			PRINTK("%s: UNSUPPORTED: EPH INTERRUPT \n",
+				dev->name);
+		} else if (status & IM_MDINT ) {
+			//smc_phy_interrupt(dev);
+			PRINTK("%s: UNSUPPORTED: MD INTERRUPT \n",
+				dev->name);
+			// Acknowledge the interrupt
+			outb(IM_MDINT, ioaddr + INT_REG );
+		} else if (status & IM_ERCV_INT ) {
+			PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n",
+				dev->name);
+			// Acknowledge the interrupt
+			outb( IM_ERCV_INT, ioaddr + INT_REG );
+		}
+	} while ( timeout -- );
+
+
+	/* restore register states */
+
+	SMC_SELECT_BANK( 2 );
+
+	outb( mask, ioaddr + IM_REG );
+
+	PRINTK3( KERN_WARNING "%s: MASK is now %x \n", dev->name, mask);
+	outw( saved_pointer, ioaddr + PTR_REG );
+
+	SMC_SELECT_BANK( saved_bank );
+
+	if (old_packet_cnt != lp->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+
+	hard_local_irq_disable();
+
+	//dev->interrupt = 0;
+	PRINTK3("%s: Interrupt done\n", dev->name);
+	return RTDM_IRQ_HANDLED;
+}
+
+
+/*----------------------------------------------------
+ . smc_close
+ .
+ . this makes the board clean up everything that it can
+ . and not talk to the outside world.   Caused by
+ . an 'ifconfig ethX down'
+ .
+ -----------------------------------------------------*/
+static int smc_close(struct rtnet_device *dev)
+{
+	rtnetif_stop_queue(dev);
+	//dev->start = 0;
+
+	PRINTK2("%s:smc_close\n", dev->name);
+
+#ifdef DISABLED____CONFIG_SYSCTL
+	smc_sysctl_unregister(dev);
+#endif /* CONFIG_SYSCTL */
+
+	/* clear everything */
+	smc_shutdown( dev->base_addr );
+
+	/* Update the statistics here. */
+
+	return 0;
+}
+
+/*------------------------------------------------------------
+ . Get the current statistics.
+ . This may be called with the card open or closed.
+ .-------------------------------------------------------------*/
+static struct net_device_stats* smc_query_statistics(struct rtnet_device *rtdev)
+{
+	struct smc_local *lp = (struct smc_local *)rtdev->priv;
+
+	PRINTK2("%s:smc_query_statistics\n", rtdev->name);
+
+	return &lp->stats;
+}
+
+/*-----------------------------------------------------------
+ . smc_set_multicast_list
+ .
+ . This routine will, depending on the values passed to it,
+ . either make it accept multicast packets, go into
+ . promiscuous mode ( for TCPDUMP and cousins ) or accept
+ . a select set of multicast packets
+*/
+static void smc_set_multicast_list(struct rtnet_device *dev)
+{
+	short ioaddr = dev->base_addr;
+
+	PRINTK2("%s:smc_set_multicast_list\n", dev->name);
+
+	SMC_SELECT_BANK(0);
+	if ( dev->flags & IFF_PROMISC )
+		{
+		PRINTK2("%s:smc_set_multicast_list:RCR_PRMS\n", dev->name);
+		outw( inw(ioaddr + RCR_REG ) | RCR_PRMS, ioaddr + RCR_REG );
+		}
+
+/* BUG?  I never disable promiscuous mode if multicasting was turned on.
+   Now, I turn off promiscuous mode, but I don't do anything to multicasting
+   when promiscuous mode is turned on.
+*/
+
+	/* Here, I am setting this to accept all multicast packets.
+	   I don't need to zero the multicast table, because the flag is
+	   checked before the table is
+	*/
+	else if (dev->flags & IFF_ALLMULTI)
+		{
+		outw( inw(ioaddr + RCR_REG ) | RCR_ALMUL, ioaddr + RCR_REG );
+		PRINTK2("%s:smc_set_multicast_list:RCR_ALMUL\n", dev->name);
+		}
+
+	else  {
+		PRINTK2("%s:smc_set_multicast_list:~(RCR_PRMS|RCR_ALMUL)\n",
+			dev->name);
+		outw( inw( ioaddr + RCR_REG ) & ~(RCR_PRMS | RCR_ALMUL),
+			ioaddr + RCR_REG );
+
+		/*
+		  since I'm disabling all multicast entirely, I need to
+		  clear the multicast list
+		*/
+		SMC_SELECT_BANK( 3 );
+		outw( 0, ioaddr + MCAST_REG1 );
+		outw( 0, ioaddr + MCAST_REG2 );
+		outw( 0, ioaddr + MCAST_REG3 );
+		outw( 0, ioaddr + MCAST_REG4 );
+	}
+}
+
+#ifdef MODULE
+
+static struct rtnet_device *devSMC91111;
+int io = 0;
+int irq = 0;
+int nowait = 0;
+
+module_param(io, int, 0444);
+module_param(irq, int, 0444);
+module_param(nowait, int, 0444);
+
+/*------------------------------------------------------------
+ . Module initialization function
+ .-------------------------------------------------------------*/
+int __init init_module(void)
+{
+	int result;
+
+	PRINTK2("CARDNAME:init_module\n");
+	if (io == 0)
+		printk(KERN_WARNING
+		CARDNAME": You shouldn't use auto-probing with insmod!\n" );
+
+	devSMC91111 = rt_alloc_etherdev(sizeof(struct smc_local), 4 * 2 + 1);
+	if (devSMC91111 == NULL) {
+		printk (KERN_ERR "init_ethernet failed\n");
+		return -ENODEV;
+	}
+	rtdev_alloc_name(devSMC91111, "rteth%d");
+	rt_rtdev_connect(devSMC91111, &RTDEV_manager);
+	devSMC91111->vers = RTDEV_VERS_2_0;
+
+	/* copy the parameters from insmod into the device structure */
+	devSMC91111->base_addr	= io;
+	devSMC91111->irq		= irq;
+	devSMC91111->dma		= nowait; // Use DMA field for nowait
+	if ((result = smc_init(devSMC91111)) != 0)
+		return result;
+
+	if ((result = rt_register_rtnetdev(devSMC91111)) != 0) {
+		rt_rtdev_disconnect(devSMC91111);
+		release_region(devSMC91111->base_addr, SMC_IO_EXTENT);
+
+		rtdm_irq_free(&((struct smc_local *)devSMC91111)->irq_handle);
+
+		rtdev_free(devSMC91111);
+
+		return result;
+	}
+
+	return 0;
+}
+
+/*------------------------------------------------------------
+ . Cleanup when module is removed with rmmod
+ .-------------------------------------------------------------*/
+void __exit cleanup_module(void)
+{
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	rt_unregister_rtnetdev(devSMC91111);
+	rt_rtdev_disconnect(devSMC91111);
+
+	release_region(devSMC91111->base_addr, SMC_IO_EXTENT);
+
+	if (devSMC91111->priv) {
+		rtdm_irq_free(&((struct smc_local *)devSMC91111->priv)->irq_handle);
+	}
+
+	rtdev_free(devSMC91111);
+}
+
+#endif /* MODULE */
+
+
+#ifdef DISABLED____CONFIG_SYSCTL
+
+
+/*------------------------------------------------------------
+ . Modify a bit in the LAN91C111 register set
+ .-------------------------------------------------------------*/
+static word smc_modify_regbit(int bank, int ioaddr, int reg,
+	unsigned int bit, int val)
+{
+	word regval;
+
+	SMC_SELECT_BANK( bank );
+
+	regval = inw( ioaddr+reg );
+	if (val)
+		regval |= bit;
+	else
+		regval &= ~bit;
+
+	outw( regval, ioaddr );
+	return(regval);
+}
+
+
+/*------------------------------------------------------------
+ . Retrieve a bit in the LAN91C111 register set
+ .-------------------------------------------------------------*/
+static int smc_get_regbit(int bank, int ioaddr, int reg, unsigned int bit)
+{
+	SMC_SELECT_BANK( bank );
+	if ( inw( ioaddr+reg ) & bit)
+		return(1);
+	else
+		return(0);
+}
+
+
+/*------------------------------------------------------------
+ . Modify a LAN91C111 register (word access only)
+ .-------------------------------------------------------------*/
+static void smc_modify_reg(int bank, int ioaddr, int reg, word val)
+{
+	SMC_SELECT_BANK( bank );
+	outw( val, ioaddr+reg );
+}
+
+
+/*------------------------------------------------------------
+ . Retrieve a LAN91C111 register (word access only)
+ .-------------------------------------------------------------*/
+static int smc_get_reg(int bank, int ioaddr, int reg)
+{
+	SMC_SELECT_BANK( bank );
+	return(inw( ioaddr+reg ));
+}
+
+
+static const char smc_info_string[] =
+"\n"
+"info           Provides this information blurb\n"
+"swver          Prints the software version information of this driver\n"
+"autoneg        Auto-negotiate Mode = 1\n"
+"rspeed         Requested Speed, 100=100Mbps, 10=10Mpbs\n"
+"rfduplx        Requested Full Duplex Operation\n"
+"aspeed         Actual Speed, 100=100Mbps, 10=10Mpbs\n"
+"afduplx        Actual Full Duplex Operation\n"
+"lnkfail        PHY Link Failure when 1\n"
+"miiop          External MII when 1, Internal PHY when 0\n"
+"swfdup         Switched Full Duplex Mode (allowed only in MII operation)\n"
+"ephloop        EPH Block Loopback\n"
+"forcol         Force a collision\n"
+"filtcar        Filter leading edge of carrier sense for 12 bit times\n"
+"freemem        Free buffer memory in bytes\n"
+"totmem         Total buffer memory in bytes\n"
+"leda           Output of LED-A (green)\n"
+"ledb           Output of LED-B (yellow)\n"
+"chiprev        Revision ID of the LAN91C111 chip\n"
+"";
+
+/*------------------------------------------------------------
+ . Sysctl handler for all integer parameters
+ .-------------------------------------------------------------*/
+static int smc_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
+				void *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct rtnet_device *dev = (struct rtnet_device*)ctl->extra1;
+	struct smc_local *lp = (struct smc_local *)ctl->extra2;
+	int ioaddr = dev->base_addr;
+	int *valp = ctl->data;
+	int val;
+	int ret;
+
+	// Update parameters from the real registers
+	switch (ctl->ctl_name)
+	{
+	case CTL_SMC_FORCOL:
+		*valp = smc_get_regbit(0, ioaddr, TCR_REG, TCR_FORCOL);
+		break;
+
+	case CTL_SMC_FREEMEM:
+		*valp = ( (word)smc_get_reg(0, ioaddr, MIR_REG) >> 8 )
+			* LAN91C111_MEMORY_MULTIPLIER;
+		break;
+
+
+	case CTL_SMC_TOTMEM:
+		*valp = ( smc_get_reg(0, ioaddr, MIR_REG) & (word)0x00ff )
+			* LAN91C111_MEMORY_MULTIPLIER;
+		break;
+
+	case CTL_SMC_CHIPREV:
+		*valp = smc_get_reg(3, ioaddr, REV_REG);
+		break;
+
+	case CTL_SMC_AFDUPLX:
+		*valp = (lp->lastPhy18 & PHY_INT_DPLXDET) ? 1 : 0;
+		break;
+
+	case CTL_SMC_ASPEED:
+		*valp = (lp->lastPhy18 & PHY_INT_SPDDET) ? 100 : 10;
+		break;
+
+	case CTL_SMC_LNKFAIL:
+		*valp = (lp->lastPhy18 & PHY_INT_LNKFAIL) ? 1 : 0;
+		break;
+
+	case CTL_SMC_LEDA:
+		*valp = (lp->rpc_cur_mode >> RPC_LSXA_SHFT) & (word)0x0007;
+		break;
+
+	case CTL_SMC_LEDB:
+		*valp = (lp->rpc_cur_mode >> RPC_LSXB_SHFT) & (word)0x0007;
+		break;
+
+	case CTL_SMC_MIIOP:
+		*valp = smc_get_regbit(1, ioaddr, CONFIG_REG, CONFIG_EXT_PHY);
+		break;
+
+#ifdef SMC_DEBUG
+	case CTL_SMC_REG_BSR:	// Bank Select
+		*valp = smc_get_reg(0, ioaddr, BSR_REG);
+		break;
+
+	case CTL_SMC_REG_TCR:	// Transmit Control
+		*valp = smc_get_reg(0, ioaddr, TCR_REG);
+		break;
+
+	case CTL_SMC_REG_ESR:	// EPH Status
+		*valp = smc_get_reg(0, ioaddr, EPH_STATUS_REG);
+		break;
+
+	case CTL_SMC_REG_RCR:	// Receive Control
+		*valp = smc_get_reg(0, ioaddr, RCR_REG);
+		break;
+
+	case CTL_SMC_REG_CTRR:	// Counter
+		*valp = smc_get_reg(0, ioaddr, COUNTER_REG);
+		break;
+
+	case CTL_SMC_REG_MIR:	// Memory Information
+		*valp = smc_get_reg(0, ioaddr, MIR_REG);
+		break;
+
+	case CTL_SMC_REG_RPCR:	// Receive/Phy Control
+		*valp = smc_get_reg(0, ioaddr, RPC_REG);
+		break;
+
+	case CTL_SMC_REG_CFGR:	// Configuration
+		*valp = smc_get_reg(1, ioaddr, CONFIG_REG);
+		break;
+
+	case CTL_SMC_REG_BAR:	// Base Address
+		*valp = smc_get_reg(1, ioaddr, BASE_REG);
+		break;
+
+	case CTL_SMC_REG_IAR0:	// Individual Address
+		*valp = smc_get_reg(1, ioaddr, ADDR0_REG);
+		break;
+
+	case CTL_SMC_REG_IAR1:	// Individual Address
+		*valp = smc_get_reg(1, ioaddr, ADDR1_REG);
+		break;
+
+	case CTL_SMC_REG_IAR2:	// Individual Address
+		*valp = smc_get_reg(1, ioaddr, ADDR2_REG);
+		break;
+
+	case CTL_SMC_REG_GPR:	// General Purpose
+		*valp = smc_get_reg(1, ioaddr, GP_REG);
+		break;
+
+	case CTL_SMC_REG_CTLR:	// Control
+		*valp = smc_get_reg(1, ioaddr, CTL_REG);
+		break;
+
+	case CTL_SMC_REG_MCR:	// MMU Command
+		*valp = smc_get_reg(2, ioaddr, MMU_CMD_REG);
+		break;
+
+	case CTL_SMC_REG_PNR:	// Packet Number
+		*valp = smc_get_reg(2, ioaddr, PN_REG);
+		break;
+
+	case CTL_SMC_REG_FPR:	// Allocation Result/FIFO Ports
+		*valp = smc_get_reg(2, ioaddr, RXFIFO_REG);
+		break;
+
+	case CTL_SMC_REG_PTR:	// Pointer
+		*valp = smc_get_reg(2, ioaddr, PTR_REG);
+		break;
+
+	case CTL_SMC_REG_DR:	// Data
+		*valp = smc_get_reg(2, ioaddr, DATA_REG);
+		break;
+
+	case CTL_SMC_REG_ISR:	// Interrupt Status/Mask
+		*valp = smc_get_reg(2, ioaddr, INT_REG);
+		break;
+
+	case CTL_SMC_REG_MTR1:	// Multicast Table Entry 1
+		*valp = smc_get_reg(3, ioaddr, MCAST_REG1);
+		break;
+
+	case CTL_SMC_REG_MTR2:	// Multicast Table Entry 2
+		*valp = smc_get_reg(3, ioaddr, MCAST_REG2);
+		break;
+
+	case CTL_SMC_REG_MTR3:	// Multicast Table Entry 3
+		*valp = smc_get_reg(3, ioaddr, MCAST_REG3);
+		break;
+
+	case CTL_SMC_REG_MTR4:	// Multicast Table Entry 4
+		*valp = smc_get_reg(3, ioaddr, MCAST_REG4);
+		break;
+
+	case CTL_SMC_REG_MIIR:	// Management Interface
+		*valp = smc_get_reg(3, ioaddr, MII_REG);
+		break;
+
+	case CTL_SMC_REG_REVR:	// Revision
+		*valp = smc_get_reg(3, ioaddr, REV_REG);
+		break;
+
+	case CTL_SMC_REG_ERCVR:	// Early RCV
+		*valp = smc_get_reg(3, ioaddr, ERCV_REG);
+		break;
+
+	case CTL_SMC_REG_EXTR:	// External
+		*valp = smc_get_reg(7, ioaddr, EXT_REG);
+		break;
+
+	case CTL_SMC_PHY_CTRL:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_CNTL_REG);
+		break;
+
+	case CTL_SMC_PHY_STAT:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_STAT_REG);
+		break;
+
+	case CTL_SMC_PHY_ID1:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_ID1_REG);
+		break;
+
+	case CTL_SMC_PHY_ID2:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_ID2_REG);
+		break;
+
+	case CTL_SMC_PHY_ADC:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_AD_REG);
+		break;
+
+	case CTL_SMC_PHY_REMC:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_RMT_REG);
+		break;
+
+	case CTL_SMC_PHY_CFG1:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_CFG1_REG);
+		break;
+
+	case CTL_SMC_PHY_CFG2:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_CFG2_REG);
+		break;
+
+	case CTL_SMC_PHY_INT:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_INT_REG);
+		break;
+
+	case CTL_SMC_PHY_MASK:
+		*valp = smc_read_phy_register(ioaddr, lp->phyaddr,
+			PHY_MASK_REG);
+		break;
+
+#endif // SMC_DEBUG
+
+	default:
+		// Just ignore unsupported parameters
+		break;
+	}
+
+	// Save old state
+	val = *valp;
+
+	// Perform the generic integer operation
+	if ((ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos)) != 0)
+		return(ret);
+
+	// Write changes out to the registers
+	if (write && *valp != val) {
+
+		val = *valp;
+		switch (ctl->ctl_name) {
+
+		case CTL_SMC_SWFDUP:
+			if (val)
+				lp->tcr_cur_mode |= TCR_SWFDUP;
+			else
+				lp->tcr_cur_mode &= ~TCR_SWFDUP;
+
+			smc_modify_regbit(0, ioaddr, TCR_REG, TCR_SWFDUP, val);
+			break;
+
+		case CTL_SMC_EPHLOOP:
+			if (val)
+				lp->tcr_cur_mode |= TCR_EPH_LOOP;
+			else
+				lp->tcr_cur_mode &= ~TCR_EPH_LOOP;
+
+			smc_modify_regbit(0, ioaddr, TCR_REG, TCR_EPH_LOOP, val);
+			break;
+
+		case CTL_SMC_FORCOL:
+			if (val)
+				lp->tcr_cur_mode |= TCR_FORCOL;
+			else
+				lp->tcr_cur_mode &= ~TCR_FORCOL;
+
+			// Update the EPH block
+			smc_modify_regbit(0, ioaddr, TCR_REG, TCR_FORCOL, val);
+			break;
+
+		case CTL_SMC_FILTCAR:
+			if (val)
+				lp->rcr_cur_mode |= RCR_FILT_CAR;
+			else
+				lp->rcr_cur_mode &= ~RCR_FILT_CAR;
+
+			// Update the EPH block
+			smc_modify_regbit(0, ioaddr, RCR_REG, RCR_FILT_CAR, val);
+			break;
+
+		case CTL_SMC_RFDUPLX:
+			// Disallow changes if in auto-negotiation mode
+			if (lp->ctl_autoneg)
+				break;
+
+			if (val)
+				{
+				lp->rpc_cur_mode |= RPC_DPLX;
+				}
+			else
+				{
+				lp->rpc_cur_mode &= ~RPC_DPLX;
+				}
+
+			// Reconfigure the PHY
+			smc_phy_configure(dev);
+
+			break;
+
+		case CTL_SMC_RSPEED:
+			// Disallow changes if in auto-negotiation mode
+			if (lp->ctl_autoneg)
+				break;
+
+			if (val > 10)
+				lp->rpc_cur_mode |= RPC_SPEED;
+			else
+				lp->rpc_cur_mode &= ~RPC_SPEED;
+
+			// Reconfigure the PHY
+			smc_phy_configure(dev);
+
+			break;
+
+		case CTL_SMC_AUTONEG:
+			if (val)
+				lp->rpc_cur_mode |= RPC_ANEG;
+			else
+				lp->rpc_cur_mode &= ~RPC_ANEG;
+
+			// Reconfigure the PHY
+			smc_phy_configure(dev);
+
+			break;
+
+		case CTL_SMC_LEDA:
+			val &= 0x07; // Restrict to 3 ls bits
+			lp->rpc_cur_mode &= ~(word)(0x07<<RPC_LSXA_SHFT);
+			lp->rpc_cur_mode |= (word)(val<<RPC_LSXA_SHFT);
+
+			// Update the Internal PHY block
+			smc_modify_reg(0, ioaddr, RPC_REG, lp->rpc_cur_mode);
+			break;
+
+		case CTL_SMC_LEDB:
+			val &= 0x07; // Restrict to 3 ls bits
+			lp->rpc_cur_mode &= ~(word)(0x07<<RPC_LSXB_SHFT);
+			lp->rpc_cur_mode |= (word)(val<<RPC_LSXB_SHFT);
+
+			// Update the Internal PHY block
+			smc_modify_reg(0, ioaddr, RPC_REG, lp->rpc_cur_mode);
+			break;
+
+		case CTL_SMC_MIIOP:
+			// Update the Internal PHY block
+			smc_modify_regbit(1, ioaddr, CONFIG_REG,
+				CONFIG_EXT_PHY, val);
+			break;
+
+#ifdef SMC_DEBUG
+		case CTL_SMC_REG_BSR:	// Bank Select
+			smc_modify_reg(0, ioaddr, BSR_REG, val);
+			break;
+
+		case CTL_SMC_REG_TCR:	// Transmit Control
+			smc_modify_reg(0, ioaddr, TCR_REG, val);
+			break;
+
+		case CTL_SMC_REG_ESR:	// EPH Status
+			smc_modify_reg(0, ioaddr, EPH_STATUS_REG, val);
+			break;
+
+		case CTL_SMC_REG_RCR:	// Receive Control
+			smc_modify_reg(0, ioaddr, RCR_REG, val);
+			break;
+
+		case CTL_SMC_REG_CTRR:	// Counter
+			smc_modify_reg(0, ioaddr, COUNTER_REG, val);
+			break;
+
+		case CTL_SMC_REG_MIR:	// Memory Information
+			smc_modify_reg(0, ioaddr, MIR_REG, val);
+			break;
+
+		case CTL_SMC_REG_RPCR:	// Receive/Phy Control
+			smc_modify_reg(0, ioaddr, RPC_REG, val);
+			break;
+
+		case CTL_SMC_REG_CFGR:	// Configuration
+			smc_modify_reg(1, ioaddr, CONFIG_REG, val);
+			break;
+
+		case CTL_SMC_REG_BAR:	// Base Address
+			smc_modify_reg(1, ioaddr, BASE_REG, val);
+			break;
+
+		case CTL_SMC_REG_IAR0:	// Individual Address
+			smc_modify_reg(1, ioaddr, ADDR0_REG, val);
+			break;
+
+		case CTL_SMC_REG_IAR1:	// Individual Address
+			smc_modify_reg(1, ioaddr, ADDR1_REG, val);
+			break;
+
+		case CTL_SMC_REG_IAR2:	// Individual Address
+			smc_modify_reg(1, ioaddr, ADDR2_REG, val);
+			break;
+
+		case CTL_SMC_REG_GPR:	// General Purpose
+			smc_modify_reg(1, ioaddr, GP_REG, val);
+			break;
+
+		case CTL_SMC_REG_CTLR:	// Control
+			smc_modify_reg(1, ioaddr, CTL_REG, val);
+			break;
+
+		case CTL_SMC_REG_MCR:	// MMU Command
+			smc_modify_reg(2, ioaddr, MMU_CMD_REG, val);
+			break;
+
+		case CTL_SMC_REG_PNR:	// Packet Number
+			smc_modify_reg(2, ioaddr, PN_REG, val);
+			break;
+
+		case CTL_SMC_REG_FPR:	// Allocation Result/FIFO Ports
+			smc_modify_reg(2, ioaddr, RXFIFO_REG, val);
+			break;
+
+		case CTL_SMC_REG_PTR:	// Pointer
+			smc_modify_reg(2, ioaddr, PTR_REG, val);
+			break;
+
+		case CTL_SMC_REG_DR:	// Data
+			smc_modify_reg(2, ioaddr, DATA_REG, val);
+			break;
+
+		case CTL_SMC_REG_ISR:	// Interrupt Status/Mask
+			smc_modify_reg(2, ioaddr, INT_REG, val);
+			break;
+
+		case CTL_SMC_REG_MTR1:	// Multicast Table Entry 1
+			smc_modify_reg(3, ioaddr, MCAST_REG1, val);
+			break;
+
+		case CTL_SMC_REG_MTR2:	// Multicast Table Entry 2
+			smc_modify_reg(3, ioaddr, MCAST_REG2, val);
+			break;
+
+		case CTL_SMC_REG_MTR3:	// Multicast Table Entry 3
+			smc_modify_reg(3, ioaddr, MCAST_REG3, val);
+			break;
+
+		case CTL_SMC_REG_MTR4:	// Multicast Table Entry 4
+			smc_modify_reg(3, ioaddr, MCAST_REG4, val);
+			break;
+
+		case CTL_SMC_REG_MIIR:	// Management Interface
+			smc_modify_reg(3, ioaddr, MII_REG, val);
+			break;
+
+		case CTL_SMC_REG_REVR:	// Revision
+			smc_modify_reg(3, ioaddr, REV_REG, val);
+			break;
+
+		case CTL_SMC_REG_ERCVR:	// Early RCV
+			smc_modify_reg(3, ioaddr, ERCV_REG, val);
+			break;
+
+		case CTL_SMC_REG_EXTR:	// External
+			smc_modify_reg(7, ioaddr, EXT_REG, val);
+			break;
+
+		case CTL_SMC_PHY_CTRL:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_CNTL_REG, val);
+			break;
+
+		case CTL_SMC_PHY_STAT:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_STAT_REG, val);
+			break;
+
+		case CTL_SMC_PHY_ID1:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_ID1_REG, val);
+			break;
+
+		case CTL_SMC_PHY_ID2:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_ID2_REG, val);
+			break;
+
+		case CTL_SMC_PHY_ADC:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_AD_REG, val);
+			break;
+
+		case CTL_SMC_PHY_REMC:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_RMT_REG, val);
+			break;
+
+		case CTL_SMC_PHY_CFG1:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_CFG1_REG, val);
+			break;
+
+		case CTL_SMC_PHY_CFG2:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_CFG2_REG, val);
+			break;
+
+		case CTL_SMC_PHY_INT:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_INT_REG, val);
+			break;
+
+		case CTL_SMC_PHY_MASK:
+			smc_write_phy_register(ioaddr, lp->phyaddr,
+				PHY_MASK_REG, val);
+			break;
+
+#endif // SMC_DEBUG
+
+		default:
+			// Just ignore unsupported parameters
+			break;
+		} // end switch
+
+	} // end if
+
+	return ret;
+}
+
+/*------------------------------------------------------------
+ . Sysctl registration function for all parameters (files)
+ .-------------------------------------------------------------*/
+static void smc_sysctl_register(struct rtnet_device *dev)
+{
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+	static int ctl_name = CTL_SMC;
+	ctl_table* ct;
+	int i;
+
+	// Make sure the ctl_tables start out as all zeros
+	memset(lp->root_table, 0, sizeof lp->root_table);
+	memset(lp->eth_table, 0, sizeof lp->eth_table);
+	memset(lp->param_table, 0, sizeof lp->param_table);
+
+	// Initialize the root table
+	ct = lp->root_table;
+	ct->ctl_name = CTL_DEV;
+	ct->procname = "dev";
+	ct->maxlen = 0;
+	ct->mode = 0555;
+	ct->child = lp->eth_table;
+	// remaining fields are zero
+
+	// Initialize the ethX table (this device's table)
+	ct = lp->eth_table;
+	ct->ctl_name = ctl_name++; // Must be unique
+	ct->procname = dev->name;
+	ct->maxlen = 0;
+	ct->mode = 0555;
+	ct->child = lp->param_table;
+	// remaining fields are zero
+
+	// Initialize the parameter (files) table
+	// Make sure the last entry remains null
+	ct = lp->param_table;
+	for (i = 0; i < (CTL_SMC_LAST_ENTRY-1); ++i)
+		{
+		// Initialize fields common to all table entries
+		ct[i].proc_handler = smc_sysctl_handler;
+		ct[i].extra1 = (void*)dev; // Save our device pointer
+		ct[i].extra2 = (void*)lp;  // Save our smc_local data pointer
+		}
+
+	// INFO - this is our only string parameter
+	i = 0;
+	ct[i].proc_handler = proc_dostring; // use default handler
+	ct[i].ctl_name = CTL_SMC_INFO;
+	ct[i].procname = "info";
+	ct[i].data = (void*)smc_info_string;
+	ct[i].maxlen = sizeof smc_info_string;
+	ct[i].mode = 0444; // Read only
+
+	// SWVER
+	++i;
+	ct[i].proc_handler = proc_dostring; // use default handler
+	ct[i].ctl_name = CTL_SMC_SWVER;
+	ct[i].procname = "swver";
+	ct[i].data = (void*)version;
+	ct[i].maxlen = sizeof version;
+	ct[i].mode = 0444; // Read only
+
+	// SWFDUP
+	++i;
+	ct[i].ctl_name = CTL_SMC_SWFDUP;
+	ct[i].procname = "swfdup";
+	ct[i].data = (void*)&(lp->ctl_swfdup);
+	ct[i].maxlen = sizeof lp->ctl_swfdup;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// EPHLOOP
+	++i;
+	ct[i].ctl_name = CTL_SMC_EPHLOOP;
+	ct[i].procname = "ephloop";
+	ct[i].data = (void*)&(lp->ctl_ephloop);
+	ct[i].maxlen = sizeof lp->ctl_ephloop;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// MIIOP
+	++i;
+	ct[i].ctl_name = CTL_SMC_MIIOP;
+	ct[i].procname = "miiop";
+	ct[i].data = (void*)&(lp->ctl_miiop);
+	ct[i].maxlen = sizeof lp->ctl_miiop;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// AUTONEG
+	++i;
+	ct[i].ctl_name = CTL_SMC_AUTONEG;
+	ct[i].procname = "autoneg";
+	ct[i].data = (void*)&(lp->ctl_autoneg);
+	ct[i].maxlen = sizeof lp->ctl_autoneg;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// RFDUPLX
+	++i;
+	ct[i].ctl_name = CTL_SMC_RFDUPLX;
+	ct[i].procname = "rfduplx";
+	ct[i].data = (void*)&(lp->ctl_rfduplx);
+	ct[i].maxlen = sizeof lp->ctl_rfduplx;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// RSPEED
+	++i;
+	ct[i].ctl_name = CTL_SMC_RSPEED;
+	ct[i].procname = "rspeed";
+	ct[i].data = (void*)&(lp->ctl_rspeed);
+	ct[i].maxlen = sizeof lp->ctl_rspeed;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// AFDUPLX
+	++i;
+	ct[i].ctl_name = CTL_SMC_AFDUPLX;
+	ct[i].procname = "afduplx";
+	ct[i].data = (void*)&(lp->ctl_afduplx);
+	ct[i].maxlen = sizeof lp->ctl_afduplx;
+	ct[i].mode = 0444; // Read only
+
+	// ASPEED
+	++i;
+	ct[i].ctl_name = CTL_SMC_ASPEED;
+	ct[i].procname = "aspeed";
+	ct[i].data = (void*)&(lp->ctl_aspeed);
+	ct[i].maxlen = sizeof lp->ctl_aspeed;
+	ct[i].mode = 0444; // Read only
+
+	// LNKFAIL
+	++i;
+	ct[i].ctl_name = CTL_SMC_LNKFAIL;
+	ct[i].procname = "lnkfail";
+	ct[i].data = (void*)&(lp->ctl_lnkfail);
+	ct[i].maxlen = sizeof lp->ctl_lnkfail;
+	ct[i].mode = 0444; // Read only
+
+	// FORCOL
+	++i;
+	ct[i].ctl_name = CTL_SMC_FORCOL;
+	ct[i].procname = "forcol";
+	ct[i].data = (void*)&(lp->ctl_forcol);
+	ct[i].maxlen = sizeof lp->ctl_forcol;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// FILTCAR
+	++i;
+	ct[i].ctl_name = CTL_SMC_FILTCAR;
+	ct[i].procname = "filtcar";
+	ct[i].data = (void*)&(lp->ctl_filtcar);
+	ct[i].maxlen = sizeof lp->ctl_filtcar;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// FREEMEM
+	++i;
+	ct[i].ctl_name = CTL_SMC_FREEMEM;
+	ct[i].procname = "freemem";
+	ct[i].data = (void*)&(lp->ctl_freemem);
+	ct[i].maxlen = sizeof lp->ctl_freemem;
+	ct[i].mode = 0444; // Read only
+
+	// TOTMEM
+	++i;
+	ct[i].ctl_name = CTL_SMC_TOTMEM;
+	ct[i].procname = "totmem";
+	ct[i].data = (void*)&(lp->ctl_totmem);
+	ct[i].maxlen = sizeof lp->ctl_totmem;
+	ct[i].mode = 0444; // Read only
+
+	// LEDA
+	++i;
+	ct[i].ctl_name = CTL_SMC_LEDA;
+	ct[i].procname = "leda";
+	ct[i].data = (void*)&(lp->ctl_leda);
+	ct[i].maxlen = sizeof lp->ctl_leda;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// LEDB
+	++i;
+	ct[i].ctl_name = CTL_SMC_LEDB;
+	ct[i].procname = "ledb";
+	ct[i].data = (void*)&(lp->ctl_ledb);
+	ct[i].maxlen = sizeof lp->ctl_ledb;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// CHIPREV
+	++i;
+	ct[i].ctl_name = CTL_SMC_CHIPREV;
+	ct[i].procname = "chiprev";
+	ct[i].data = (void*)&(lp->ctl_chiprev);
+	ct[i].maxlen = sizeof lp->ctl_chiprev;
+	ct[i].mode = 0444; // Read only
+
+#ifdef SMC_DEBUG
+	// REG_BSR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_BSR;
+	ct[i].procname = "reg_bsr";
+	ct[i].data = (void*)&(lp->ctl_reg_bsr);
+	ct[i].maxlen = sizeof lp->ctl_reg_bsr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_TCR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_TCR;
+	ct[i].procname = "reg_tcr";
+	ct[i].data = (void*)&(lp->ctl_reg_tcr);
+	ct[i].maxlen = sizeof lp->ctl_reg_tcr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_ESR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_ESR;
+	ct[i].procname = "reg_esr";
+	ct[i].data = (void*)&(lp->ctl_reg_esr);
+	ct[i].maxlen = sizeof lp->ctl_reg_esr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_RCR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_RCR;
+	ct[i].procname = "reg_rcr";
+	ct[i].data = (void*)&(lp->ctl_reg_rcr);
+	ct[i].maxlen = sizeof lp->ctl_reg_rcr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_CTRR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_CTRR;
+	ct[i].procname = "reg_ctrr";
+	ct[i].data = (void*)&(lp->ctl_reg_ctrr);
+	ct[i].maxlen = sizeof lp->ctl_reg_ctrr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MIR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MIR;
+	ct[i].procname = "reg_mir";
+	ct[i].data = (void*)&(lp->ctl_reg_mir);
+	ct[i].maxlen = sizeof lp->ctl_reg_mir;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_RPCR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_RPCR;
+	ct[i].procname = "reg_rpcr";
+	ct[i].data = (void*)&(lp->ctl_reg_rpcr);
+	ct[i].maxlen = sizeof lp->ctl_reg_rpcr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_CFGR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_CFGR;
+	ct[i].procname = "reg_cfgr";
+	ct[i].data = (void*)&(lp->ctl_reg_cfgr);
+	ct[i].maxlen = sizeof lp->ctl_reg_cfgr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_BAR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_BAR;
+	ct[i].procname = "reg_bar";
+	ct[i].data = (void*)&(lp->ctl_reg_bar);
+	ct[i].maxlen = sizeof lp->ctl_reg_bar;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_IAR0
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_IAR0;
+	ct[i].procname = "reg_iar0";
+	ct[i].data = (void*)&(lp->ctl_reg_iar0);
+	ct[i].maxlen = sizeof lp->ctl_reg_iar0;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_IAR1
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_IAR1;
+	ct[i].procname = "reg_iar1";
+	ct[i].data = (void*)&(lp->ctl_reg_iar1);
+	ct[i].maxlen = sizeof lp->ctl_reg_iar1;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_IAR2
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_IAR2;
+	ct[i].procname = "reg_iar2";
+	ct[i].data = (void*)&(lp->ctl_reg_iar2);
+	ct[i].maxlen = sizeof lp->ctl_reg_iar2;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_GPR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_GPR;
+	ct[i].procname = "reg_gpr";
+	ct[i].data = (void*)&(lp->ctl_reg_gpr);
+	ct[i].maxlen = sizeof lp->ctl_reg_gpr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_CTLR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_CTLR;
+	ct[i].procname = "reg_ctlr";
+	ct[i].data = (void*)&(lp->ctl_reg_ctlr);
+	ct[i].maxlen = sizeof lp->ctl_reg_ctlr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MCR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MCR;
+	ct[i].procname = "reg_mcr";
+	ct[i].data = (void*)&(lp->ctl_reg_mcr);
+	ct[i].maxlen = sizeof lp->ctl_reg_mcr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_PNR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_PNR;
+	ct[i].procname = "reg_pnr";
+	ct[i].data = (void*)&(lp->ctl_reg_pnr);
+	ct[i].maxlen = sizeof lp->ctl_reg_pnr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_FPR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_FPR;
+	ct[i].procname = "reg_fpr";
+	ct[i].data = (void*)&(lp->ctl_reg_fpr);
+	ct[i].maxlen = sizeof lp->ctl_reg_fpr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_PTR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_PTR;
+	ct[i].procname = "reg_ptr";
+	ct[i].data = (void*)&(lp->ctl_reg_ptr);
+	ct[i].maxlen = sizeof lp->ctl_reg_ptr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_DR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_DR;
+	ct[i].procname = "reg_dr";
+	ct[i].data = (void*)&(lp->ctl_reg_dr);
+	ct[i].maxlen = sizeof lp->ctl_reg_dr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_ISR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_ISR;
+	ct[i].procname = "reg_isr";
+	ct[i].data = (void*)&(lp->ctl_reg_isr);
+	ct[i].maxlen = sizeof lp->ctl_reg_isr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MTR1
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MTR1;
+	ct[i].procname = "reg_mtr1";
+	ct[i].data = (void*)&(lp->ctl_reg_mtr1);
+	ct[i].maxlen = sizeof lp->ctl_reg_mtr1;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MTR2
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MTR2;
+	ct[i].procname = "reg_mtr2";
+	ct[i].data = (void*)&(lp->ctl_reg_mtr2);
+	ct[i].maxlen = sizeof lp->ctl_reg_mtr2;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MTR3
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MTR3;
+	ct[i].procname = "reg_mtr3";
+	ct[i].data = (void*)&(lp->ctl_reg_mtr3);
+	ct[i].maxlen = sizeof lp->ctl_reg_mtr3;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MTR4
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MTR4;
+	ct[i].procname = "reg_mtr4";
+	ct[i].data = (void*)&(lp->ctl_reg_mtr4);
+	ct[i].maxlen = sizeof lp->ctl_reg_mtr4;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_MIIR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_MIIR;
+	ct[i].procname = "reg_miir";
+	ct[i].data = (void*)&(lp->ctl_reg_miir);
+	ct[i].maxlen = sizeof lp->ctl_reg_miir;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_REVR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_REVR;
+	ct[i].procname = "reg_revr";
+	ct[i].data = (void*)&(lp->ctl_reg_revr);
+	ct[i].maxlen = sizeof lp->ctl_reg_revr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_ERCVR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_ERCVR;
+	ct[i].procname = "reg_ercvr";
+	ct[i].data = (void*)&(lp->ctl_reg_ercvr);
+	ct[i].maxlen = sizeof lp->ctl_reg_ercvr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// REG_EXTR
+	++i;
+	ct[i].ctl_name = CTL_SMC_REG_EXTR;
+	ct[i].procname = "reg_extr";
+	ct[i].data = (void*)&(lp->ctl_reg_extr);
+	ct[i].maxlen = sizeof lp->ctl_reg_extr;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Control
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_CTRL;
+	ct[i].procname = "phy_ctrl";
+	ct[i].data = (void*)&(lp->ctl_phy_ctrl);
+	ct[i].maxlen = sizeof lp->ctl_phy_ctrl;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Status
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_STAT;
+	ct[i].procname = "phy_stat";
+	ct[i].data = (void*)&(lp->ctl_phy_stat);
+	ct[i].maxlen = sizeof lp->ctl_phy_stat;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY ID1
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_ID1;
+	ct[i].procname = "phy_id1";
+	ct[i].data = (void*)&(lp->ctl_phy_id1);
+	ct[i].maxlen = sizeof lp->ctl_phy_id1;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY ID2
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_ID2;
+	ct[i].procname = "phy_id2";
+	ct[i].data = (void*)&(lp->ctl_phy_id2);
+	ct[i].maxlen = sizeof lp->ctl_phy_id2;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Advertise Capabilities
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_ADC;
+	ct[i].procname = "phy_adc";
+	ct[i].data = (void*)&(lp->ctl_phy_adc);
+	ct[i].maxlen = sizeof lp->ctl_phy_adc;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Remote Capabilities
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_REMC;
+	ct[i].procname = "phy_remc";
+	ct[i].data = (void*)&(lp->ctl_phy_remc);
+	ct[i].maxlen = sizeof lp->ctl_phy_remc;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Configuration 1
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_CFG1;
+	ct[i].procname = "phy_cfg1";
+	ct[i].data = (void*)&(lp->ctl_phy_cfg1);
+	ct[i].maxlen = sizeof lp->ctl_phy_cfg1;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Configuration 2
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_CFG2;
+	ct[i].procname = "phy_cfg2";
+	ct[i].data = (void*)&(lp->ctl_phy_cfg2);
+	ct[i].maxlen = sizeof lp->ctl_phy_cfg2;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Interrupt/Status Output
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_INT;
+	ct[i].procname = "phy_int";
+	ct[i].data = (void*)&(lp->ctl_phy_int);
+	ct[i].maxlen = sizeof lp->ctl_phy_int;
+	ct[i].mode = 0644; // Read by all, write by root
+
+	// PHY Interrupt/Status Mask
+	++i;
+	ct[i].ctl_name = CTL_SMC_PHY_MASK;
+	ct[i].procname = "phy_mask";
+	ct[i].data = (void*)&(lp->ctl_phy_mask);
+	ct[i].maxlen = sizeof lp->ctl_phy_mask;
+	ct[i].mode = 0644; // Read by all, write by root
+
+#endif // SMC_DEBUG
+
+	// Register /proc/sys/dev/ethX
+	lp->sysctl_header = register_sysctl_table(lp->root_table, 1);
+}
+
+
+/*------------------------------------------------------------
+ . Sysctl unregistration when driver is closed
+ .-------------------------------------------------------------*/
+static void smc_sysctl_unregister(struct rtnet_device *dev)
+{
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+
+	unregister_sysctl_table(lp->sysctl_header);
+}
+
+#endif /* endif CONFIG_SYSCTL */
+
+
+//---PHY CONTROL AND CONFIGURATION-----------------------------------------
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2 )
+
+/*------------------------------------------------------------
+ . Debugging function for viewing MII Management serial bitstream
+ .-------------------------------------------------------------*/
+static void smc_dump_mii_stream(byte* bits, int size)
+{
+	int i;
+
+	printk("BIT#:");
+	for (i = 0; i < size; ++i)
+		{
+		printk("%d", i%10);
+		}
+
+	printk("\nMDOE:");
+	for (i = 0; i < size; ++i)
+		{
+		if (bits[i] & MII_MDOE)
+			printk("1");
+		else
+			printk("0");
+		}
+
+	printk("\nMDO :");
+	for (i = 0; i < size; ++i)
+		{
+		if (bits[i] & MII_MDO)
+			printk("1");
+		else
+			printk("0");
+		}
+
+	printk("\nMDI :");
+	for (i = 0; i < size; ++i)
+		{
+		if (bits[i] & MII_MDI)
+			printk("1");
+		else
+			printk("0");
+		}
+
+	printk("\n");
+}
+#endif
+
+/*------------------------------------------------------------
+ . Reads a register from the MII Management serial interface
+ .-------------------------------------------------------------*/
+static word smc_read_phy_register(int ioaddr, byte phyaddr, byte phyreg)
+{
+	int oldBank;
+	int i;
+	byte mask;
+	word mii_reg;
+	byte bits[64];
+	int clk_idx = 0;
+	int input_idx;
+	word phydata;
+
+	// 32 consecutive ones on MDO to establish sync
+	for (i = 0; i < 32; ++i)
+		bits[clk_idx++] = MII_MDOE | MII_MDO;
+
+	// Start code <01>
+	bits[clk_idx++] = MII_MDOE;
+	bits[clk_idx++] = MII_MDOE | MII_MDO;
+
+	// Read command <10>
+	bits[clk_idx++] = MII_MDOE | MII_MDO;
+	bits[clk_idx++] = MII_MDOE;
+
+	// Output the PHY address, msb first
+	mask = (byte)0x10;
+	for (i = 0; i < 5; ++i)
+		{
+		if (phyaddr & mask)
+			bits[clk_idx++] = MII_MDOE | MII_MDO;
+		else
+			bits[clk_idx++] = MII_MDOE;
+
+		// Shift to next lowest bit
+		mask >>= 1;
+		}
+
+	// Output the phy register number, msb first
+	mask = (byte)0x10;
+	for (i = 0; i < 5; ++i)
+		{
+		if (phyreg & mask)
+			bits[clk_idx++] = MII_MDOE | MII_MDO;
+		else
+			bits[clk_idx++] = MII_MDOE;
+
+		// Shift to next lowest bit
+		mask >>= 1;
+		}
+
+	// Tristate and turnaround (2 bit times)
+	bits[clk_idx++] = 0;
+	//bits[clk_idx++] = 0;
+
+	// Input starts at this bit time
+	input_idx = clk_idx;
+
+	// Will input 16 bits
+	for (i = 0; i < 16; ++i)
+		bits[clk_idx++] = 0;
+
+	// Final clock bit
+	bits[clk_idx++] = 0;
+
+	// Save the current bank
+	oldBank = inw( ioaddr+BANK_SELECT );
+
+	// Select bank 3
+	SMC_SELECT_BANK( 3 );
+
+	// Get the current MII register value
+	mii_reg = inw( ioaddr+MII_REG );
+
+	// Turn off all MII Interface bits
+	mii_reg &= ~(MII_MDOE|MII_MCLK|MII_MDI|MII_MDO);
+
+	// Clock all 64 cycles
+	for (i = 0; i < sizeof bits; ++i)
+		{
+		// Clock Low - output data
+		outw( mii_reg | bits[i], ioaddr+MII_REG );
+		udelay(50);
+
+
+		// Clock Hi - input data
+		outw( mii_reg | bits[i] | MII_MCLK, ioaddr+MII_REG );
+		udelay(50);
+		bits[i] |= inw( ioaddr+MII_REG ) & MII_MDI;
+		}
+
+	// Return to idle state
+	// Set clock to low, data to low, and output tristated
+	outw( mii_reg, ioaddr+MII_REG );
+	udelay(50);
+
+	// Restore original bank select
+	SMC_SELECT_BANK( oldBank );
+
+	// Recover input data
+	phydata = 0;
+	for (i = 0; i < 16; ++i)
+		{
+		phydata <<= 1;
+
+		if (bits[input_idx++] & MII_MDI)
+			phydata |= 0x0001;
+		}
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2 )
+	printk("smc_read_phy_register(): phyaddr=%x,phyreg=%x,phydata=%x\n",
+		phyaddr, phyreg, phydata);
+	smc_dump_mii_stream(bits, sizeof bits);
+#endif
+
+	return(phydata);
+}
+
+
+/*------------------------------------------------------------
+ . Writes a register to the MII Management serial interface
+ .-------------------------------------------------------------*/
+static void smc_write_phy_register(int ioaddr,
+	byte phyaddr, byte phyreg, word phydata)
+{
+	int oldBank;
+	int i;
+	word mask;
+	word mii_reg;
+	byte bits[65];
+	int clk_idx = 0;
+
+	// 32 consecutive ones on MDO to establish sync
+	for (i = 0; i < 32; ++i)
+		bits[clk_idx++] = MII_MDOE | MII_MDO;
+
+	// Start code <01>
+	bits[clk_idx++] = MII_MDOE;
+	bits[clk_idx++] = MII_MDOE | MII_MDO;
+
+	// Write command <01>
+	bits[clk_idx++] = MII_MDOE;
+	bits[clk_idx++] = MII_MDOE | MII_MDO;
+
+	// Output the PHY address, msb first
+	mask = (byte)0x10;
+	for (i = 0; i < 5; ++i)
+		{
+		if (phyaddr & mask)
+			bits[clk_idx++] = MII_MDOE | MII_MDO;
+		else
+			bits[clk_idx++] = MII_MDOE;
+
+		// Shift to next lowest bit
+		mask >>= 1;
+		}
+
+	// Output the phy register number, msb first
+	mask = (byte)0x10;
+	for (i = 0; i < 5; ++i)
+		{
+		if (phyreg & mask)
+			bits[clk_idx++] = MII_MDOE | MII_MDO;
+		else
+			bits[clk_idx++] = MII_MDOE;
+
+		// Shift to next lowest bit
+		mask >>= 1;
+		}
+
+	// Tristate and turnaround (2 bit times)
+	bits[clk_idx++] = 0;
+	bits[clk_idx++] = 0;
+
+	// Write out 16 bits of data, msb first
+	mask = 0x8000;
+	for (i = 0; i < 16; ++i)
+		{
+		if (phydata & mask)
+			bits[clk_idx++] = MII_MDOE | MII_MDO;
+		else
+			bits[clk_idx++] = MII_MDOE;
+
+		// Shift to next lowest bit
+		mask >>= 1;
+		}
+
+	// Final clock bit (tristate)
+	bits[clk_idx++] = 0;
+
+	// Save the current bank
+	oldBank = inw( ioaddr+BANK_SELECT );
+
+	// Select bank 3
+	SMC_SELECT_BANK( 3 );
+
+	// Get the current MII register value
+	mii_reg = inw( ioaddr+MII_REG );
+
+	// Turn off all MII Interface bits
+	mii_reg &= ~(MII_MDOE|MII_MCLK|MII_MDI|MII_MDO);
+
+	// Clock all cycles
+	for (i = 0; i < sizeof bits; ++i)
+		{
+		// Clock Low - output data
+		outw( mii_reg | bits[i], ioaddr+MII_REG );
+		udelay(50);
+
+
+		// Clock Hi - input data
+		outw( mii_reg | bits[i] | MII_MCLK, ioaddr+MII_REG );
+		udelay(50);
+		bits[i] |= inw( ioaddr+MII_REG ) & MII_MDI;
+		}
+
+	// Return to idle state
+	// Set clock to low, data to low, and output tristated
+	outw( mii_reg, ioaddr+MII_REG );
+	udelay(50);
+
+	// Restore original bank select
+	SMC_SELECT_BANK( oldBank );
+
+#if defined(SMC_DEBUG) && (SMC_DEBUG > 2 )
+	printk("smc_write_phy_register(): phyaddr=%x,phyreg=%x,phydata=%x\n",
+		phyaddr, phyreg, phydata);
+	smc_dump_mii_stream(bits, sizeof bits);
+#endif
+}
+
+
+/*------------------------------------------------------------
+ . Finds and reports the PHY address
+ .-------------------------------------------------------------*/
+static int smc_detect_phy(struct rtnet_device* dev)
+{
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+	int ioaddr = dev->base_addr;
+	word phy_id1;
+	word phy_id2;
+	int phyaddr;
+	int found = 0;
+
+	PRINTK3("%s:smc_detect_phy()\n", dev->name);
+
+	// Scan all 32 PHY addresses if necessary
+	for (phyaddr = 0; phyaddr < 32; ++phyaddr)
+		{
+		// Read the PHY identifiers
+		phy_id1  = smc_read_phy_register(ioaddr, phyaddr, PHY_ID1_REG);
+		phy_id2  = smc_read_phy_register(ioaddr, phyaddr, PHY_ID2_REG);
+
+		PRINTK3("%s: phy_id1=%x, phy_id2=%x\n",
+			dev->name, phy_id1, phy_id2);
+
+		// Make sure it is a valid identifier
+		if ((phy_id2 > 0x0000) && (phy_id2 < 0xffff) &&
+		    (phy_id1 > 0x0000) && (phy_id1 < 0xffff))
+			{
+			if ((phy_id1 != 0x8000) && (phy_id2 != 0x8000))
+				{
+				// Save the PHY's address
+				lp->phyaddr = phyaddr;
+				found = 1;
+				break;
+				}
+			}
+		}
+
+	if (!found)
+		{
+		PRINTK("%s: No PHY found\n", dev->name);
+		return(0);
+		}
+
+	// Set the PHY type
+	if ( (phy_id1 == 0x0016) && ((phy_id2 & 0xFFF0) == 0xF840 ) )
+		{
+		lp->phytype = PHY_LAN83C183;
+		PRINTK("%s: PHY=LAN83C183 (LAN91C111 Internal)\n", dev->name);
+		}
+
+	if ( (phy_id1 == 0x0282) && ((phy_id2 & 0xFFF0) == 0x1C50) )
+		{
+		lp->phytype = PHY_LAN83C180;
+		PRINTK("%s: PHY=LAN83C180\n", dev->name);
+		}
+
+	return(1);
+}
+
+/*------------------------------------------------------------
+ . Waits the specified number of milliseconds - kernel friendly
+ .-------------------------------------------------------------*/
+static void smc_wait_ms(unsigned int ms)
+{
+
+	if (!in_interrupt())
+		{
+		current->state = TASK_UNINTERRUPTIBLE;
+		schedule_timeout(1 + ms * HZ / 1000);
+		}
+	else
+		{
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1 + ms * HZ / 1000);
+		current->state = TASK_RUNNING;
+		}
+}
+
+/*------------------------------------------------------------
+ . Sets the PHY to a configuration as determined by the user
+ .-------------------------------------------------------------*/
+#ifdef DISABLED____CONFIG_SYSCTL
+static int smc_phy_fixed(struct rtnet_device* dev)
+{
+	int ioaddr = dev->base_addr;
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+	byte phyaddr = lp->phyaddr;
+	word my_fixed_caps;
+	word cfg1;
+
+	PRINTK3("%s:smc_phy_fixed()\n", dev->name);
+
+	// Enter Link Disable state
+	cfg1 = smc_read_phy_register(ioaddr, phyaddr, PHY_CFG1_REG);
+	cfg1 |= PHY_CFG1_LNKDIS;
+	smc_write_phy_register(ioaddr, phyaddr, PHY_CFG1_REG, cfg1);
+
+	// Set our fixed capabilities
+	// Disable auto-negotiation
+	my_fixed_caps = 0;
+
+	if (lp->ctl_rfduplx)
+		my_fixed_caps |= PHY_CNTL_DPLX;
+
+	if (lp->ctl_rspeed == 100)
+		my_fixed_caps |= PHY_CNTL_SPEED;
+
+	// Write our capabilities to the phy control register
+	smc_write_phy_register(ioaddr, phyaddr, PHY_CNTL_REG, my_fixed_caps);
+
+	// Re-Configure the Receive/Phy Control register
+	outw( lp->rpc_cur_mode, ioaddr + RPC_REG );
+
+	// Success
+	return(1);
+}
+#endif // CONFIG_SYSCTL
+
+
+/*------------------------------------------------------------
+ . Configures the specified PHY using Autonegotiation. Calls
+ . smc_phy_fixed() if the user has requested a certain config.
+ .-------------------------------------------------------------*/
+static void smc_phy_configure(struct rtnet_device* dev)
+{
+	int ioaddr = dev->base_addr;
+	struct smc_local *lp = (struct smc_local *)dev->priv;
+	int timeout;
+	byte phyaddr;
+	word my_phy_caps; // My PHY capabilities
+	word my_ad_caps; // My Advertised capabilities
+	word status;
+	int failed = 0;
+
+	PRINTK3("%s:smc_program_phy()\n", dev->name);
+
+	// Set the blocking flag
+	lp->autoneg_active = 1;
+
+	// Find the address and type of our phy
+	if (!smc_detect_phy(dev))
+		{
+		goto smc_phy_configure_exit;
+		}
+
+	// Get the detected phy address
+	phyaddr = lp->phyaddr;
+
+	// Reset the PHY, setting all other bits to zero
+	smc_write_phy_register(ioaddr, phyaddr, PHY_CNTL_REG, PHY_CNTL_RST);
+
+	// Wait for the reset to complete, or time out
+	timeout = 6; // Wait up to 3 seconds
+	while (timeout--)
+		{
+		if (!(smc_read_phy_register(ioaddr, phyaddr, PHY_CNTL_REG)
+		    & PHY_CNTL_RST))
+			{
+			// reset complete
+			break;
+			}
+
+		smc_wait_ms(500); // wait 500 millisecs
+		if (signal_pending(current)) // Exit anyway if signaled
+			{
+			PRINTK2("%s:PHY reset interrupted by signal\n",
+				dev->name);
+			timeout = 0;
+			break;
+			}
+		}
+
+	if (timeout < 1)
+		{
+		PRINTK2("%s:PHY reset timed out\n", dev->name);
+		goto smc_phy_configure_exit;
+		}
+
+	// Read PHY Register 18, Status Output
+	lp->lastPhy18 = smc_read_phy_register(ioaddr, phyaddr, PHY_INT_REG);
+
+	// Enable PHY Interrupts (for register 18)
+	// Interrupts listed here are disabled
+	smc_write_phy_register(ioaddr, phyaddr, PHY_MASK_REG,
+		PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD |
+		PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB |
+		PHY_INT_SPDDET | PHY_INT_DPLXDET);
+
+	/* Configure the Receive/Phy Control register */
+	SMC_SELECT_BANK( 0 );
+	outw( lp->rpc_cur_mode, ioaddr + RPC_REG );
+
+	// Copy our capabilities from PHY_STAT_REG to PHY_AD_REG
+	my_phy_caps = smc_read_phy_register(ioaddr, phyaddr, PHY_STAT_REG);
+	my_ad_caps  = PHY_AD_CSMA; // I am CSMA capable
+
+	if (my_phy_caps & PHY_STAT_CAP_T4)
+		my_ad_caps |= PHY_AD_T4;
+
+	if (my_phy_caps & PHY_STAT_CAP_TXF)
+		my_ad_caps |= PHY_AD_TX_FDX;
+
+	if (my_phy_caps & PHY_STAT_CAP_TXH)
+		my_ad_caps |= PHY_AD_TX_HDX;
+
+	if (my_phy_caps & PHY_STAT_CAP_TF)
+		my_ad_caps |= PHY_AD_10_FDX;
+
+	if (my_phy_caps & PHY_STAT_CAP_TH)
+		my_ad_caps |= PHY_AD_10_HDX;
+
+#ifdef DISABLED____CONFIG_SYSCTL
+	// Disable capabilities not selected by our user
+	if (lp->ctl_rspeed != 100)
+		{
+		my_ad_caps &= ~(PHY_AD_T4|PHY_AD_TX_FDX|PHY_AD_TX_HDX);
+		}
+
+	if (!lp->ctl_rfduplx)
+		{
+		my_ad_caps &= ~(PHY_AD_TX_FDX|PHY_AD_10_FDX);
+		}
+#endif // CONFIG_SYSCTL
+
+	// Update our Auto-Neg Advertisement Register
+	smc_write_phy_register(ioaddr, phyaddr, PHY_AD_REG, my_ad_caps);
+
+	PRINTK2("%s:phy caps=%x\n", dev->name, my_phy_caps);
+	PRINTK2("%s:phy advertised caps=%x\n", dev->name, my_ad_caps);
+
+#ifdef DISABLED____CONFIG_SYSCTL
+	// If the user requested no auto neg, then go set his request
+	if (!(lp->ctl_autoneg))
+		{
+		smc_phy_fixed(dev);
+		goto smc_phy_configure_exit;
+		}
+#endif // CONFIG_SYSCTL
+
+	// Restart auto-negotiation process in order to advertise my caps
+	smc_write_phy_register( ioaddr, phyaddr, PHY_CNTL_REG,
+		PHY_CNTL_ANEG_EN | PHY_CNTL_ANEG_RST );
+
+	// Wait for the auto-negotiation to complete.  This may take from
+	// 2 to 3 seconds.
+	// Wait for the reset to complete, or time out
+	timeout = 20-1; // Wait up to 10 seconds
+	do
+		{
+		status = smc_read_phy_register(ioaddr, phyaddr, PHY_STAT_REG);
+		if (status & PHY_STAT_ANEG_ACK)
+			{
+			// auto-negotiate complete
+			break;
+			}
+
+		smc_wait_ms(500); // wait 500 millisecs
+		if (signal_pending(current)) // Exit anyway if signaled
+			{
+			printk(KERN_DEBUG
+				"%s:PHY auto-negotiate interrupted by signal\n",
+				dev->name);
+			timeout = 0;
+			break;
+			}
+
+		// Restart auto-negotiation if remote fault
+		if (status & PHY_STAT_REM_FLT)
+			{
+			PRINTK2("%s:PHY remote fault detected\n", dev->name);
+
+			// Restart auto-negotiation
+			PRINTK2("%s:PHY restarting auto-negotiation\n",
+				dev->name);
+			smc_write_phy_register( ioaddr, phyaddr, PHY_CNTL_REG,
+				PHY_CNTL_ANEG_EN | PHY_CNTL_ANEG_RST |
+				PHY_CNTL_SPEED | PHY_CNTL_DPLX);
+			}
+		}
+	while (timeout--);
+
+	if (timeout < 1)
+		{
+		printk(KERN_DEBUG "%s:PHY auto-negotiate timed out\n",
+			dev->name);
+		PRINTK2("%s:PHY auto-negotiate timed out\n", dev->name);
+		failed = 1;
+		}
+
+	// Fail if we detected an auto-negotiate remote fault
+	if (status & PHY_STAT_REM_FLT)
+		{
+		printk(KERN_DEBUG "%s:PHY remote fault detected\n", dev->name);
+		PRINTK2("%s:PHY remote fault detected\n", dev->name);
+		failed = 1;
+		}
+
+	// The smc_phy_interrupt() routine will be called to update lastPhy18
+
+	// Set our sysctl parameters to match auto-negotiation results
+	if ( lp->lastPhy18 & PHY_INT_SPDDET )
+		{
+		PRINTK2("%s:PHY 100BaseT\n", dev->name);
+		lp->rpc_cur_mode |= RPC_SPEED;
+		}
+	else
+		{
+		PRINTK2("%s:PHY 10BaseT\n", dev->name);
+		lp->rpc_cur_mode &= ~RPC_SPEED;
+		}
+
+	if ( lp->lastPhy18 & PHY_INT_DPLXDET )
+		{
+		PRINTK2("%s:PHY Full Duplex\n", dev->name);
+		lp->rpc_cur_mode |= RPC_DPLX;
+		}
+	else
+		{
+		PRINTK2("%s:PHY Half Duplex\n", dev->name);
+		lp->rpc_cur_mode &= ~RPC_DPLX;
+		}
+
+	// Re-Configure the Receive/Phy Control register
+	outw( lp->rpc_cur_mode, ioaddr + RPC_REG );
+
+  smc_phy_configure_exit:
+
+	// Exit auto-negotiation
+	lp->autoneg_active = 0;
+}
+
+
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/net/drivers/Makefile	2022-03-21 12:58:29.612886787 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/r8169.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_EXP_DRIVERS) += experimental/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000) += e1000/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000E) += e1000e/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MPC52XX_FEC) += mpc52xx_fec/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_TULIP) += tulip/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_IGB) += igb/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_8139) += rt_8139too.o
+
+rt_8139too-y := 8139too.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_AT91_ETHER) += rt_at91_ether.o
+
+rt_at91_ether-y := at91_ether.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100) += rt_eepro100.o
+
+rt_eepro100-y := eepro100.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK) += rt_loopback.o
+
+rt_loopback-y := loopback.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FCC_ENET) += rt_mpc8260_fcc_enet.o
+
+rt_mpc8260_fcc_enet-y := mpc8260_fcc_enet.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_SCC_ENET) += rt_mpc8xx_enet.o
+
+rt_mpc8xx_enet-y := mpc8xx_enet.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FEC_ENET) += rt_mpc8xx_fec.o
+
+rt_mpc8xx_fec-y := mpc8xx_fec.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FEC) += rt_fec.o
+
+rt_fec-y := fec.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_NATSEMI) += rt_natsemi.o
+
+rt_natsemi-y := natsemi.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_PCNET32) += rt_pcnet32.o
+
+rt_pcnet32-y := pcnet32.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_SMC91111) += rt_smc91111.o
+
+rt_smc91111-y := smc91111.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MACB) += rt_macb.o
+
+rt_macb-y := macb.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_VIA_RHINE) += rt_via-rhine.o
+
+rt_via-rhine-y := via-rhine.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_R8169) += rt_r8169.o
+
+rt_r8169-y := r8169.o
+++ linux-patched/drivers/xenomai/net/drivers/r8169.c	2022-03-21 12:58:29.606886845 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c	1970-01-01 01:00:00.000000000 +0100
+/*
+=========================================================================
+ r8169.c: A RealTek RTL8169s/8110s Gigabit Ethernet driver for Linux kernel 2.4.x.
+ --------------------------------------------------------------------
+
+ History:
+ Feb  4 2002	- created initially by ShuChen <shuchen@realtek.com.tw>.
+ May 20 2002	- Add link status force-mode and TBI mode support.
+=========================================================================
+
+RTL8169_VERSION "1.1"	<2002/10/4>
+
+	The bit4:0 of MII register 4 is called "selector field", and have to be
+	00001b to indicate support of IEEE std 802.3 during NWay process of
+	exchanging Link Code Word (FLP).
+
+RTL8169_VERSION "1.2"	<2003/6/17>
+	Update driver module name.
+	Modify ISR.
+	Add chip mcfg.
+
+RTL8169_VERSION "1.3"	<2003/6/20>
+	Add chip pcfg.
+	Add priv->phy_timer_t, rtl8169_phy_timer_t_handler()
+	Add rtl8169_hw_PHY_config()
+	Add rtl8169_hw_PHY_reset()
+
+RTL8169_VERSION "1.4"	<2003/7/14>
+	Add tx_bytes, rx_bytes.
+
+RTL8169_VERSION "1.5"	<2003/7/18>
+	Set 0x0000 to PHY at offset 0x0b.
+	Modify chip mcfg, pcfg
+	Force media for multiple card.
+RTL8169_VERSION "1.6"	<2003/8/25>
+	Modify receive data buffer.
+
+RTL8169_VERSION "1.7"	<2003/9/18>
+	Add Jumbo Frame support.
+
+RTL8169_VERSION "1.8"	<2003/10/21>
+	Performance and CPU Utilizaion Enhancement.
+
+RTL8169_VERSION "1.9"	<2003/12/29>
+	Enable Tx/Rx flow control.
+
+RTL8169_VERSION "2.0"	<2004/03/26>
+	Beta version.
+	Support for linux 2.6.x
+
+RTL8169_VERSION "2.1"	<2004/07/05>
+	Modify parameters.
+
+RTL8169_VERSION "2.2"	<2004/08/09>
+	Add.pci_dma_sync_single.
+	Add pci_alloc_consistent()/pci_free_consistent().
+	Revise parameters.
+	Recognize our interrupt for linux 2.6.x.
+*/
+
+/*
+ * Ported to RTnet by Klaus Keppler <klaus.keppler@gmx.de>
+ * All RTnet porting stuff may be used and distributed according to the
+ * terms of the GNU General Public License (GPL).
+ *
+ * Version 2.2-04 <2005/08/22>
+ *    Initial release of this driver, based on RTL8169 driver v2.2
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include <linux/timer.h>
+#include <linux/init.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#include <linux/pci-aspm.h>
+#endif
+
+#include <rtnet_port.h>	/*** RTnet ***/
+
+#define RTL8169_VERSION "2.2-04"
+#define MODULENAME "rt_r8169"
+#define RTL8169_DRIVER_NAME   MODULENAME " RTnet Gigabit Ethernet driver " RTL8169_VERSION
+#define PFX MODULENAME ": "
+
+//#define RTL8169_DEBUG
+#undef RTL8169_JUMBO_FRAME_SUPPORT	/*** RTnet: no not enable! ***/
+#undef	RTL8169_HW_FLOW_CONTROL_SUPPORT
+
+
+#undef RTL8169_IOCTL_SUPPORT	/*** RTnet: do not enable! ***/
+#undef RTL8169_DYNAMIC_CONTROL
+#undef RTL8169_USE_IO
+
+
+#ifdef RTL8169_DEBUG
+	#define assert(expr) \
+		if(!(expr)) { printk( "Assertion failed! %s,%s,%s,line=%d\n", #expr,__FILE__,__FUNCTION__,__LINE__); }
+	/*** RTnet / <kk>: rt_assert must be used instead of assert() within interrupt context! ***/
+	#define rt_assert(expr) \
+		if(!(expr)) { rtdm_printk( "Assertion failed! %s,%s,%s,line=%d\n", #expr,__FILE__,__FUNCTION__,__LINE__); }
+	/*** RTnet / <kk>: RT_DBG_PRINT must be used instead of DBG_PRINT() within interrupt context! ***/
+	#define DBG_PRINT( fmt, args...)   printk("r8169: " fmt, ## args);
+	#define RT_DBG_PRINT( fmt, args...)   rtdm_printk("r8169: " fmt, ## args);
+#else
+	#define assert(expr) do {} while (0)
+	#define rt_assert(expr) do {} while (0)
+	#define DBG_PRINT( fmt, args...)   ;
+	#define RT_DBG_PRINT( fmt, args...)   ;
+#endif	// end of #ifdef RTL8169_DEBUG
+
+/* media options */
+#define MAX_UNITS 8
+static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/*** RTnet ***/
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** /RTnet ***/
+
+/* <kk> Enable debugging output */
+#define DEBUG_RX_SYNC 1
+#define DEBUG_RX_OTHER 2
+#define DEBUG_TX_SYNC 4
+#define DEBUG_TX_OTHER 8
+#define DEBUG_RUN 16
+static int local_debug = -1;
+static int r8169_debug = -1;
+module_param_named(debug, local_debug, int, 0444);
+MODULE_PARM_DESC(debug, MODULENAME " debug level (bit mask, see docs!)");
+
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* MAC address length*/
+#define MAC_ADDR_LEN        6
+
+#define RX_FIFO_THRESH      7       /* 7 means NO threshold, Rx buffer level before first PCI xfer.  */
+#define RX_DMA_BURST        7       /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST        7       /* Maximum PCI burst, '6' is 1024 */
+#define ETTh                0x3F    /* 0x3F means NO threshold */
+
+#define ETH_HDR_LEN         14
+#define DEFAULT_MTU         1500
+#define DEFAULT_RX_BUF_LEN  1536
+
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+#define MAX_JUMBO_FRAME_MTU	( 10000 )
+#define MAX_RX_SKBDATA_SIZE	( MAX_JUMBO_FRAME_MTU + ETH_HDR_LEN )
+#else
+#define MAX_RX_SKBDATA_SIZE 1600
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+
+#define InterFrameGap       0x03    /* 3 means InterFrameGap = the shortest one */
+
+//#define NUM_TX_DESC         64	/* Number of Tx descriptor registers*/
+//#define NUM_RX_DESC         64	/* Number of Rx descriptor registers*/
+
+#define TX_RING_SIZE          16	/*** RTnet ***/
+#define NUM_TX_DESC TX_RING_SIZE	/* Number of Tx descriptor registers*/	/*** RTnet ***/
+#define RX_RING_SIZE           8	/*** RTnet ***/
+#define NUM_RX_DESC RX_RING_SIZE	/* Number of Rx descriptor registers*/	/*** RTnet ***/
+
+#define RTL_MIN_IO_SIZE     0x80
+#define TX_TIMEOUT          (6*HZ)
+//#define RTL8169_TIMER_EXPIRE_TIME 100 //100	/*** RTnet ***/
+
+
+#ifdef RTL8169_USE_IO
+#define RTL_W8(reg, val8)   outb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) outw ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) outl ((val32), ioaddr + (reg))
+#define RTL_R8(reg)         inb (ioaddr + (reg))
+#define RTL_R16(reg)        inw (ioaddr + (reg))
+#define RTL_R32(reg)        ((unsigned long) inl (ioaddr + (reg)))
+#else
+/* write/read MMIO register */
+#define RTL_W8(reg, val8)   writeb ((val8), (void *)ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), (void *)ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), (void *)ioaddr + (reg))
+#define RTL_R8(reg)         readb ((void *)ioaddr + (reg))
+#define RTL_R16(reg)        readw ((void *)ioaddr + (reg))
+#define RTL_R32(reg)        ((unsigned long) readl ((void *)ioaddr + (reg)))
+#endif
+
+#define MCFG_METHOD_1		0x01
+#define MCFG_METHOD_2		0x02
+#define MCFG_METHOD_3		0x03
+#define MCFG_METHOD_4		0x04
+
+#define PCFG_METHOD_1		0x01	//PHY Reg 0x03 bit0-3 == 0x0000
+#define PCFG_METHOD_2		0x02	//PHY Reg 0x03 bit0-3 == 0x0001
+#define PCFG_METHOD_3		0x03	//PHY Reg 0x03 bit0-3 == 0x0002
+
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+#include "r8169_callback.h"
+#endif  //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+
+const static struct {
+	const char *name;
+	u8 mcfg;                 /* depend on RTL8169 docs */
+	u32 RxConfigMask;       /* should clear the bits supported by this chip */
+} rtl_chip_info[] = {
+	{ "RTL8169",  MCFG_METHOD_1,  0xff7e1880 },
+	{ "RTL8169s/8110s",  MCFG_METHOD_2,  0xff7e1880 },
+	{ "RTL8169s/8110s",  MCFG_METHOD_3,  0xff7e1880 },
+};
+
+
+static struct pci_device_id rtl8169_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, 2 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, 1 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, 1 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, 1 },	/* <kk> D-Link DGE-528T */
+	{0,},
+};
+
+
+MODULE_DEVICE_TABLE (pci, rtl8169_pci_tbl);
+
+
+enum RTL8169_registers {
+	MAC0 = 0x0,
+	MAR0 = 0x8,
+	TxDescStartAddr	= 0x20,
+	TxHDescStartAddr= 0x28,
+	FLASH	= 0x30,
+	ERSR	= 0x36,
+	ChipCmd	= 0x37,
+	TxPoll	= 0x38,
+	IntrMask = 0x3C,
+	IntrStatus = 0x3E,
+	TxConfig = 0x40,
+	RxConfig = 0x44,
+	RxMissed = 0x4C,
+	Cfg9346 = 0x50,
+	Config0	= 0x51,
+	Config1	= 0x52,
+	Config2	= 0x53,
+	Config3	= 0x54,
+	Config4	= 0x55,
+	Config5	= 0x56,
+	MultiIntr = 0x5C,
+	PHYAR	= 0x60,
+	TBICSR	= 0x64,
+	TBI_ANAR = 0x68,
+	TBI_LPAR = 0x6A,
+	PHYstatus = 0x6C,
+	RxMaxSize = 0xDA,
+	CPlusCmd = 0xE0,
+	RxDescStartAddr	= 0xE4,
+	ETThReg	= 0xEC,
+	FuncEvent	= 0xF0,
+	FuncEventMask	= 0xF4,
+	FuncPresetState	= 0xF8,
+	FuncForceEvent	= 0xFC,
+};
+
+enum RTL8169_register_content {
+	/*InterruptStatusBits*/
+	SYSErr          = 0x8000,
+	PCSTimeout	= 0x4000,
+	SWInt		= 0x0100,
+	TxDescUnavail	= 0x80,
+	RxFIFOOver      = 0x40,
+	LinkChg         = 0x20,
+	RxOverflow      = 0x10,
+	TxErr   = 0x08,
+	TxOK    = 0x04,
+	RxErr   = 0x02,
+	RxOK    = 0x01,
+
+	/*RxStatusDesc*/
+	RxRES = 0x00200000,
+	RxCRC = 0x00080000,
+	RxRUNT= 0x00100000,
+	RxRWT = 0x00400000,
+
+	/*ChipCmdBits*/
+	CmdReset = 0x10,
+	CmdRxEnb = 0x08,
+	CmdTxEnb = 0x04,
+	RxBufEmpty = 0x01,
+
+	/*Cfg9346Bits*/
+	Cfg9346_Lock = 0x00,
+	Cfg9346_Unlock = 0xC0,
+
+	/*rx_mode_bits*/
+	AcceptErr = 0x20,
+	AcceptRunt = 0x10,
+	AcceptBroadcast = 0x08,
+	AcceptMulticast = 0x04,
+	AcceptMyPhys = 0x02,
+	AcceptAllPhys = 0x01,
+
+	/*RxConfigBits*/
+	RxCfgFIFOShift = 13,
+	RxCfgDMAShift = 8,
+
+	/*TxConfigBits*/
+	TxInterFrameGapShift = 24,
+	TxDMAShift = 8,
+
+	/* Config2 register */
+	MSIEnable	= (1 << 5),
+
+	/*rtl8169_PHYstatus*/
+	TBI_Enable	= 0x80,
+	TxFlowCtrl	= 0x40,
+	RxFlowCtrl	= 0x20,
+	_1000bpsF	= 0x10,
+	_100bps		= 0x08,
+	_10bps		= 0x04,
+	LinkStatus	= 0x02,
+	FullDup		= 0x01,
+
+	/*GIGABIT_PHY_registers*/
+	PHY_CTRL_REG = 0,
+	PHY_STAT_REG = 1,
+	PHY_AUTO_NEGO_REG = 4,
+	PHY_1000_CTRL_REG = 9,
+
+	/*GIGABIT_PHY_REG_BIT*/
+	PHY_Restart_Auto_Nego	= 0x0200,
+	PHY_Enable_Auto_Nego	= 0x1000,
+
+	//PHY_STAT_REG = 1;
+	PHY_Auto_Neco_Comp	= 0x0020,
+
+	//PHY_AUTO_NEGO_REG = 4;
+	PHY_Cap_10_Half		= 0x0020,
+	PHY_Cap_10_Full		= 0x0040,
+	PHY_Cap_100_Half	= 0x0080,
+	PHY_Cap_100_Full	= 0x0100,
+
+	//PHY_1000_CTRL_REG = 9;
+	PHY_Cap_1000_Full	= 0x0200,
+	PHY_Cap_1000_Half	= 0x0100,
+
+	PHY_Cap_PAUSE		= 0x0400,
+	PHY_Cap_ASYM_PAUSE	= 0x0800,
+
+	PHY_Cap_Null		= 0x0,
+
+	/*_MediaType*/
+	_10_Half	= 0x01,
+	_10_Full	= 0x02,
+	_100_Half	= 0x04,
+	_100_Full	= 0x08,
+	_1000_Full	= 0x10,
+
+	/*_TBICSRBit*/
+	TBILinkOK       = 0x02000000,
+};
+
+
+
+enum _DescStatusBit {
+	OWNbit	= 0x80000000,
+	EORbit	= 0x40000000,
+	FSbit	= 0x20000000,
+	LSbit	= 0x10000000,
+};
+
+
+struct TxDesc {
+	u32		status;
+	u32		vlan_tag;
+	u32		buf_addr;
+	u32		buf_Haddr;
+};
+
+struct RxDesc {
+	u32		status;
+	u32		vlan_tag;
+	u32		buf_addr;
+	u32		buf_Haddr;
+};
+
+
+typedef struct timer_list rt_timer_t;
+
+enum rtl8169_features {
+	RTL_FEATURE_WOL		= (1 << 0),
+	RTL_FEATURE_MSI		= (1 << 1),
+	RTL_FEATURE_GMII	= (1 << 2),
+};
+
+
+struct rtl8169_private {
+	unsigned long ioaddr;                /* memory map physical address*/
+	struct pci_dev *pci_dev;                /* Index of PCI device  */
+	struct net_device_stats stats;          /* statistics of net device */
+	rtdm_lock_t lock;                       /* spin lock flag */	/*** RTnet ***/
+	int chipset;
+	int mcfg;
+	int pcfg;
+/*	rt_timer_t r8169_timer; */	/*** RTnet ***/
+/*	unsigned long expire_time;	*/	/*** RTnet ***/
+
+	unsigned long phy_link_down_cnt;
+	unsigned long cur_rx;                   /* Index into the Rx descriptor buffer of next Rx pkt. */
+	unsigned long cur_tx;                   /* Index into the Tx descriptor buffer of next Rx pkt. */
+	unsigned long dirty_tx;
+	struct	TxDesc	*TxDescArray;           /* Index of 256-alignment Tx Descriptor buffer */
+	struct	RxDesc	*RxDescArray;           /* Index of 256-alignment Rx Descriptor buffer */
+	struct	rtskb	*Tx_skbuff[NUM_TX_DESC];/* Index of Transmit data buffer */	/*** RTnet ***/
+	struct	rtskb	*Rx_skbuff[NUM_RX_DESC];/* Receive data buffer */			/*** RTnet ***/
+	unsigned char   drvinit_fail;
+
+	dma_addr_t txdesc_array_dma_addr[NUM_TX_DESC];
+	dma_addr_t rxdesc_array_dma_addr[NUM_RX_DESC];
+	dma_addr_t rx_skbuff_dma_addr[NUM_RX_DESC];
+
+	void *txdesc_space;
+	dma_addr_t txdesc_phy_dma_addr;
+	int sizeof_txdesc_space;
+
+	void *rxdesc_space;
+	dma_addr_t rxdesc_phy_dma_addr;
+	int sizeof_rxdesc_space;
+
+	int curr_mtu_size;
+	int tx_pkt_len;
+	int rx_pkt_len;
+
+	int hw_rx_pkt_len;
+
+	int rx_buf_size;	/*** RTnet / <kk> ***/
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+	struct r8169_cb_t rt;
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+	unsigned char   linkstatus;
+	rtdm_irq_t irq_handle;			/*** RTnet ***/
+
+	unsigned features;
+};
+
+
+MODULE_AUTHOR ("Realtek, modified for RTnet by Klaus.Keppler@gmx.de");
+MODULE_DESCRIPTION ("RealTek RTL-8169 Gigabit Ethernet driver");
+module_param_array(media, int, NULL, 0444);
+MODULE_LICENSE("GPL");
+
+
+static int rtl8169_open (struct rtnet_device *rtdev);
+static int rtl8169_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev);
+
+static int rtl8169_interrupt(rtdm_irq_t *irq_handle);
+
+static void rtl8169_init_ring (struct rtnet_device *rtdev);
+static void rtl8169_hw_start (struct rtnet_device *rtdev);
+static int rtl8169_close (struct rtnet_device *rtdev);
+static void rtl8169_set_rx_mode (struct rtnet_device *rtdev);
+/* static void rtl8169_tx_timeout (struct net_device *dev); */	/*** RTnet ***/
+static struct net_device_stats *rtl8169_get_stats(struct rtnet_device *netdev);
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+static void rtl8169_hw_PHY_config (struct rtnet_device *rtdev);
+/* static void rtl8169_hw_PHY_reset(struct net_device *dev); */	/*** RTnet ***/
+static const u16 rtl8169_intr_mask = LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK | SYSErr;	/*** <kk> added SYSErr ***/
+static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift) | 0x0000000E;
+
+/*** <kk> these functions are backported from Linux-2.6.12's r8169.c driver ***/
+static void rtl8169_irq_mask_and_ack(unsigned long ioaddr);
+/* static void rtl8169_asic_down(unsigned long ioaddr); */ /*** RTnet ***/
+static void rtl8169_pcierr_interrupt(struct rtnet_device *rtdev);
+
+#define RTL8169_WRITE_GMII_REG_BIT( ioaddr, reg, bitnum, bitval )\
+{ \
+	int val; \
+	if( bitval == 1 ){ val = ( RTL8169_READ_GMII_REG( ioaddr, reg ) | (bitval<<bitnum) ) & 0xffff ; } \
+	else{ val = ( RTL8169_READ_GMII_REG( ioaddr, reg ) & (~(0x0001<<bitnum)) ) & 0xffff ; } \
+	RTL8169_WRITE_GMII_REG( ioaddr, reg, val ); \
+}
+
+
+
+#ifdef RTL8169_DEBUG
+unsigned alloc_rxskb_cnt = 0;
+#define RTL8169_ALLOC_RXSKB(bufsize)    dev_alloc_skb(bufsize); alloc_rxskb_cnt ++ ;
+#define RTL8169_FREE_RXSKB(skb)         kfree_skb(skb); alloc_rxskb_cnt -- ;
+#define RTL8169_NETIF_RX(skb)           netif_rx(skb); alloc_rxskb_cnt -- ;
+#else
+#define RTL8169_ALLOC_RXSKB(bufsize)    dev_alloc_skb(bufsize);
+#define RTL8169_FREE_RXSKB(skb)         kfree_skb(skb);
+#define RTL8169_NETIF_RX(skb)           netif_rx(skb);
+#endif //end #ifdef RTL8169_DEBUG
+
+
+//=================================================================
+//	PHYAR
+//	bit		Symbol
+//	31		Flag
+//	30-21	reserved
+//	20-16	5-bit GMII/MII register address
+//	15-0	16-bit GMII/MII register data
+//=================================================================
+void RTL8169_WRITE_GMII_REG( unsigned long ioaddr, int RegAddr, int value )
+{
+	int	i;
+
+	RTL_W32 ( PHYAR, 0x80000000 | (RegAddr&0xFF)<<16 | value);
+	udelay(1000);
+
+	for( i = 2000; i > 0 ; i -- ){
+		// Check if the RTL8169 has completed writing to the specified MII register
+		if( ! (RTL_R32(PHYAR)&0x80000000) ){
+			break;
+		}
+		else{
+			udelay(100);
+		}// end of if( ! (RTL_R32(PHYAR)&0x80000000) )
+	}// end of for() loop
+}
+//=================================================================
+int RTL8169_READ_GMII_REG( unsigned long ioaddr, int RegAddr )
+{
+	int i, value = -1;
+
+	RTL_W32 ( PHYAR, 0x0 | (RegAddr&0xFF)<<16 );
+	udelay(1000);
+
+	for( i = 2000; i > 0 ; i -- ){
+		// Check if the RTL8169 has completed retrieving data from the specified MII register
+		if( RTL_R32(PHYAR) & 0x80000000 ){
+			value = (int)( RTL_R32(PHYAR)&0xFFFF );
+			break;
+		}
+		else{
+			udelay(100);
+		}// end of if( RTL_R32(PHYAR) & 0x80000000 )
+	}// end of for() loop
+	return value;
+}
+
+
+#ifdef RTL8169_IOCTL_SUPPORT
+#include "r8169_ioctl.c"
+#endif //end #ifdef RTL8169_IOCTL_SUPPORT
+
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+#include "r8169_callback.c"
+#endif
+
+
+
+//======================================================================================================
+//======================================================================================================
+static int rtl8169_init_board ( struct pci_dev *pdev, struct rtnet_device **dev_out, unsigned long *ioaddr_out, int region)
+{
+	unsigned long ioaddr = 0;
+	struct rtnet_device *rtdev;
+	struct rtl8169_private *priv;
+	int rc, i;
+	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+
+
+	assert (pdev != NULL);
+	assert (ioaddr_out != NULL);
+
+	*ioaddr_out = 0;
+	*dev_out = NULL;
+
+	/*** RTnet ***/
+	rtdev = rt_alloc_etherdev(sizeof(struct rtl8169_private),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk (KERN_ERR PFX "unable to alloc new ethernet\n");
+		return -ENOMEM;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+	/*** /RTnet ***/
+
+	priv = rtdev->priv;
+
+	/* disable ASPM completely as that cause random device stop working
+	 * problems as well as full system hangs for some PCIe devices users */
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+				     PCIE_LINK_STATE_CLKPM);
+
+	// enable device (incl. PCI PM wakeup and hotplug setup)
+	rc = pci_enable_device (pdev);
+	if (rc)
+		goto err_out;
+
+	if (pci_set_mwi(pdev) < 0)
+		printk("R8169: Mem-Wr-Inval unavailable\n");
+
+	mmio_start = pci_resource_start (pdev, region);
+	mmio_end = pci_resource_end (pdev, region);
+	mmio_flags = pci_resource_flags (pdev, region);
+	mmio_len = pci_resource_len (pdev, region);
+
+	// make sure PCI base addr 1 is MMIO
+	if (!(mmio_flags & IORESOURCE_MEM)) {
+		printk (KERN_ERR PFX "region #%d not an MMIO resource, aborting\n", region);
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+	// check for weird/broken PCI region reporting
+	if ( mmio_len < RTL_MIN_IO_SIZE ) {
+		printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+
+	rc = pci_request_regions (pdev, rtdev->name);
+	if (rc)
+		goto err_out;
+
+	// enable PCI bus-mastering
+	pci_set_master (pdev);
+
+#ifdef RTL8169_USE_IO
+	ioaddr = pci_resource_start(pdev, 0);
+#else
+	// ioremap MMIO region
+	ioaddr = (unsigned long)ioremap (mmio_start, mmio_len);
+	if (ioaddr == 0) {
+		printk (KERN_ERR PFX "cannot remap MMIO, aborting\n");
+		rc = -EIO;
+		goto err_out_free_res;
+	}
+#endif
+
+	// Soft reset the chip.
+	RTL_W8 ( ChipCmd, CmdReset);
+
+	// Check that the chip has finished the reset.
+	for (i = 1000; i > 0; i--){
+		if ( (RTL_R8(ChipCmd) & CmdReset) == 0){
+			break;
+		}
+		else{
+			udelay (10);
+		}
+	}
+
+	{
+		u8 cfg2 = RTL_R8(Config2) & ~MSIEnable;
+		if (region) {
+			if (pci_enable_msi(pdev))
+				printk("R8169: no MSI, Back to INTx.\n");
+			else {
+				cfg2 |= MSIEnable;
+				priv->features |= RTL_FEATURE_MSI;
+			}
+		}
+		RTL_W8(Config2, cfg2);
+	}
+
+	// identify config method
+	{
+		unsigned long val32 = (RTL_R32(TxConfig)&0x7c800000);
+
+		if( val32 == (0x1<<28) ){
+			priv->mcfg = MCFG_METHOD_4;
+		}
+		else if( val32 == (0x1<<26) ){
+			priv->mcfg = MCFG_METHOD_3;
+		}
+		else if( val32 == (0x1<<23) ){
+			priv->mcfg = MCFG_METHOD_2;
+		}
+		else if( val32 == 0x00000000 ){
+			priv->mcfg = MCFG_METHOD_1;
+		}
+		else{
+			priv->mcfg = MCFG_METHOD_1;
+		}
+	}
+
+	{
+		unsigned char val8 = (unsigned char)(RTL8169_READ_GMII_REG(ioaddr,3)&0x000f);
+		if( val8 == 0x00 ){
+			priv->pcfg = PCFG_METHOD_1;
+		}
+		else if( val8 == 0x01 ){
+			priv->pcfg = PCFG_METHOD_2;
+		}
+		else if( val8 == 0x02 ){
+			priv->pcfg = PCFG_METHOD_3;
+		}
+		else{
+			priv->pcfg = PCFG_METHOD_3;
+		}
+	}
+
+
+	for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--){
+		if (priv->mcfg == rtl_chip_info[i].mcfg) {
+			priv->chipset = i;
+			goto match;
+		}
+	}
+
+	//if unknown chip, assume array element #0, original RTL-8169 in this case
+	printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8169\n", pci_name(pdev));
+	priv->chipset = 0;
+
+match:
+	*ioaddr_out = ioaddr;
+	*dev_out = rtdev;
+	return 0;
+
+#ifndef RTL8169_USE_IO
+err_out_free_res:
+#endif
+	pci_release_regions (pdev);	/*** <kk> moved outside of #ifdev ***/
+
+err_out:
+	/*** RTnet ***/
+	rt_rtdev_disconnect(rtdev);
+	rtdev_free(rtdev);
+	/*** /RTnet ***/
+	return rc;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct rtnet_device *rtdev = NULL;	/*** RTnet ***/
+	struct rtl8169_private *priv = NULL;
+	unsigned long ioaddr = 0;
+	static int board_idx = -1;
+	int region = ent->driver_data;
+	int i;
+	int option = -1, Cap10_100 = 0, Cap1000 = 0;
+
+
+	assert (pdev != NULL);
+	assert (ent != NULL);
+
+	board_idx++;
+
+	/*** RTnet ***/
+	if (board_idx >= MAX_UNITS) {
+		return -ENODEV;
+	}
+	if (cards[board_idx] == 0)
+		return -ENODEV;
+	/*** RTnet ***/
+
+	i = rtl8169_init_board (pdev, &rtdev, &ioaddr, region);
+	if (i < 0) {
+		return i;
+	}
+
+	priv = rtdev->priv;
+
+	assert (ioaddr != 0);
+	assert (rtdev != NULL);
+	assert (priv != NULL);
+
+	// Get MAC address //
+	for (i = 0; i < MAC_ADDR_LEN ; i++){
+		rtdev->dev_addr[i] = RTL_R8( MAC0 + i );
+	}
+
+	rtdev->open		= rtl8169_open;
+	rtdev->hard_start_xmit  = rtl8169_start_xmit;
+	rtdev->get_stats        = rtl8169_get_stats;
+	rtdev->stop             = rtl8169_close;
+	/* dev->tx_timeout      = rtl8169_tx_timeout; */			/*** RTnet ***/
+	/* dev->set_multicast_list = rtl8169_set_rx_mode; */	/*** RTnet ***/
+	/* dev->watchdog_timeo  = TX_TIMEOUT; */				/*** RTnet ***/
+	rtdev->irq              = pdev->irq;
+	rtdev->base_addr                = (unsigned long) ioaddr;
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+	rtdev->change_mtu		= rtl8169_change_mtu;
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+#ifdef RTL8169_IOCTL_SUPPORT
+	rtdev->do_ioctl                 = rtl8169_ioctl;
+#endif //end #ifdef RTL8169_IOCTL_SUPPORT
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+	priv->rt.dev = rtdev;
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+	priv = rtdev->priv;				// private data //
+	priv->pci_dev   = pdev;
+	priv->ioaddr    = ioaddr;
+
+//#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+	priv->curr_mtu_size = rtdev->mtu;
+	priv->tx_pkt_len = rtdev->mtu + ETH_HDR_LEN;
+	priv->rx_pkt_len = rtdev->mtu + ETH_HDR_LEN;
+	priv->hw_rx_pkt_len = priv->rx_pkt_len + 8;
+//#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+	DBG_PRINT("-------------------------- \n");
+	DBG_PRINT("dev->mtu = %d \n", rtdev->mtu);
+	DBG_PRINT("priv->curr_mtu_size = %d \n", priv->curr_mtu_size);
+	DBG_PRINT("priv->tx_pkt_len = %d \n", priv->tx_pkt_len);
+	DBG_PRINT("priv->rx_pkt_len = %d \n", priv->rx_pkt_len);
+	DBG_PRINT("priv->hw_rx_pkt_len = %d \n", priv->hw_rx_pkt_len);
+	DBG_PRINT("-------------------------- \n");
+
+	rtdm_lock_init(&priv->lock);	/*** RTnet ***/
+
+	/*** RTnet ***/
+	if (rt_register_rtnetdev(rtdev) < 0) {
+		/* clean up... */
+		pci_release_regions (pdev);
+		rt_rtdev_disconnect(rtdev);
+		rtdev_free(rtdev);
+		return -ENODEV;
+	}
+	/*** /RTnet ***/
+
+	pci_set_drvdata(pdev, rtdev);     //      pdev->driver_data = data;
+
+	printk (KERN_DEBUG "%s: Identified chip type is '%s'.\n", rtdev->name, rtl_chip_info[priv->chipset].name);
+	printk (KERN_INFO "%s: %s at 0x%lx, "
+				"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+				"IRQ %d\n",
+				rtdev->name,
+				RTL8169_DRIVER_NAME,
+				rtdev->base_addr,
+				rtdev->dev_addr[0], rtdev->dev_addr[1],
+				rtdev->dev_addr[2], rtdev->dev_addr[3],
+				rtdev->dev_addr[4], rtdev->dev_addr[5],
+				rtdev->irq);
+
+	// Config PHY
+	rtl8169_hw_PHY_config(rtdev);
+
+	DBG_PRINT("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+	RTL_W8( 0x82, 0x01 );
+
+	if( priv->mcfg < MCFG_METHOD_3 ){
+		DBG_PRINT("Set PCI Latency=0x40\n");
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+	}
+
+	if( priv->mcfg == MCFG_METHOD_2 ){
+		DBG_PRINT("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+		RTL_W8( 0x82, 0x01 );
+		DBG_PRINT("Set PHY Reg 0x0bh = 0x00h\n");
+		RTL8169_WRITE_GMII_REG( ioaddr, 0x0b, 0x0000 );	//w 0x0b 15 0 0
+	}
+
+	// if TBI is not endbled
+	if( !(RTL_R8(PHYstatus) & TBI_Enable) ){
+		int	val = RTL8169_READ_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG );
+
+#ifdef RTL8169_HW_FLOW_CONTROL_SUPPORT
+		val |= PHY_Cap_PAUSE | PHY_Cap_ASYM_PAUSE ;
+#endif //end #define RTL8169_HW_FLOW_CONTROL_SUPPORT
+
+		option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+		// Force RTL8169 in 10/100/1000 Full/Half mode.
+		if( option > 0 ){
+			printk(KERN_INFO "%s: Force-mode Enabled. \n", rtdev->name);
+			Cap10_100 = 0;
+			Cap1000 = 0;
+			switch( option ){
+				case _10_Half:
+						Cap10_100 = PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _10_Full:
+						Cap10_100 = PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _100_Half:
+						Cap10_100 = PHY_Cap_100_Half | PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _100_Full:
+						Cap10_100 = PHY_Cap_100_Full | PHY_Cap_100_Half | PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _1000_Full:
+						Cap10_100 = PHY_Cap_100_Full | PHY_Cap_100_Half | PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_1000_Full;
+						break;
+				default:
+						break;
+			}
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG, Cap10_100 | ( val&0xC1F ) );	//leave PHY_AUTO_NEGO_REG bit4:0 unchanged
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_1000_CTRL_REG, Cap1000 );
+		}
+		else{
+			printk(KERN_INFO "%s: Auto-negotiation Enabled.\n", rtdev->name);
+
+			// enable 10/100 Full/Half Mode, leave PHY_AUTO_NEGO_REG bit4:0 unchanged
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG,
+				PHY_Cap_10_Half | PHY_Cap_10_Full | PHY_Cap_100_Half | PHY_Cap_100_Full | ( val&0xC1F ) );
+
+			// enable 1000 Full Mode
+//			RTL8169_WRITE_GMII_REG( ioaddr, PHY_1000_CTRL_REG, PHY_Cap_1000_Full );
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_1000_CTRL_REG, PHY_Cap_1000_Full | PHY_Cap_1000_Half);	//rtl8168
+
+		}// end of if( option > 0 )
+
+		// Enable auto-negotiation and restart auto-nigotiation
+		RTL8169_WRITE_GMII_REG( ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego | PHY_Restart_Auto_Nego );
+		udelay(100);
+
+		// wait for auto-negotiation process
+		for( i = 10000; i > 0; i-- ){
+			//check if auto-negotiation complete
+			if( RTL8169_READ_GMII_REG(ioaddr, PHY_STAT_REG) & PHY_Auto_Neco_Comp ){
+				udelay(100);
+				option = RTL_R8(PHYstatus);
+				if( option & _1000bpsF ){
+					printk(KERN_INFO "%s: 1000Mbps Full-duplex operation.\n", rtdev->name);
+				}
+				else{
+					printk(KERN_INFO "%s: %sMbps %s-duplex operation.\n", rtdev->name,
+							(option & _100bps) ? "100" : "10", (option & FullDup) ? "Full" : "Half" );
+				}
+				break;
+			}
+			else{
+				udelay(100);
+			}// end of if( RTL8169_READ_GMII_REG(ioaddr, 1) & 0x20 )
+		}// end for-loop to wait for auto-negotiation process
+
+		option = RTL_R8(PHYstatus);
+		if( option & _1000bpsF ){
+			priv->linkstatus = _1000_Full;
+		}
+		else{
+			if(option & _100bps){
+				priv->linkstatus = (option & FullDup) ? _100_Full : _100_Half;
+			}
+	    else{
+				priv->linkstatus = (option & FullDup) ? _10_Full : _10_Half;
+			}
+		}
+		DBG_PRINT("priv->linkstatus = 0x%02x\n", priv->linkstatus);
+
+	}// end of TBI is not enabled
+	else{
+		udelay(100);
+		DBG_PRINT("1000Mbps Full-duplex operation, TBI Link %s!\n",(RTL_R32(TBICSR) & TBILinkOK) ? "OK" : "Failed" );
+
+	}// end of TBI is not enabled
+
+	return 0;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_remove_one (struct pci_dev *pdev)
+{
+	struct rtnet_device *rtdev = pci_get_drvdata(pdev);
+	struct rtl8169_private *priv = rtdev->priv;;
+
+	assert (rtdev != NULL);
+
+	/*** RTnet ***/
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+	/*** /RTnet ***/
+
+	if (priv->features & RTL_FEATURE_MSI)
+		pci_disable_msi(pdev);
+
+#ifdef RTL8169_USE_IO
+#else
+	iounmap ((void *)(rtdev->base_addr));
+#endif
+	pci_release_regions(pdev);
+
+	rtdev_free(rtdev);	/*** RTnet ***/
+
+	pci_disable_device(pdev);	/*** <kk> Disable device now :-) ***/
+
+	pci_set_drvdata(pdev, NULL);
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_open (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	struct pci_dev *pdev = priv->pci_dev;
+	int retval;
+//	u8 diff;
+//	u32 TxPhyAddr, RxPhyAddr;
+
+	if( priv->drvinit_fail == 1 ){
+		printk("%s: Gigabit driver open failed.\n", rtdev->name );
+		return -ENOMEM;
+	}
+
+	/*** RTnet ***/
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	retval = rtdm_irq_request(&priv->irq_handle, rtdev->irq, rtl8169_interrupt, 0, "rt_r8169", rtdev);
+	/*** /RTnet ***/
+
+	// retval = request_irq (dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
+	if (retval) {
+		return retval;
+	}
+
+
+	//2004-05-11
+	// Allocate tx/rx descriptor space
+	priv->sizeof_txdesc_space = NUM_TX_DESC * sizeof(struct TxDesc)+256;
+	priv->txdesc_space = pci_alloc_consistent( pdev, priv->sizeof_txdesc_space, &priv->txdesc_phy_dma_addr );
+	if( priv->txdesc_space == NULL ){
+		printk("%s: Gigabit driver alloc txdesc_space failed.\n", rtdev->name );
+		return -ENOMEM;
+	}
+	priv->sizeof_rxdesc_space = NUM_RX_DESC * sizeof(struct RxDesc)+256;
+	priv->rxdesc_space = pci_alloc_consistent( pdev, priv->sizeof_rxdesc_space, &priv->rxdesc_phy_dma_addr );
+	if( priv->rxdesc_space == NULL ){
+		printk("%s: Gigabit driver alloc rxdesc_space failed.\n", rtdev->name );
+		return -ENOMEM;
+	}
+
+	if(priv->txdesc_phy_dma_addr & 0xff){
+		printk("%s: Gigabit driver txdesc_phy_dma_addr is not 256-bytes-aligned.\n", rtdev->name );
+	}
+	if(priv->rxdesc_phy_dma_addr & 0xff){
+		printk("%s: Gigabit driver rxdesc_phy_dma_addr is not 256-bytes-aligned.\n", rtdev->name );
+	}
+	// Set tx/rx descriptor space
+	priv->TxDescArray = (struct TxDesc *)priv->txdesc_space;
+	priv->RxDescArray = (struct RxDesc *)priv->rxdesc_space;
+
+	{
+		int i;
+		struct rtskb *skb = NULL;	/*** RTnet ***/
+		priv->rx_buf_size = (rtdev->mtu <= 1500 ? DEFAULT_RX_BUF_LEN : rtdev->mtu + 32);	/*** RTnet / <kk> ***/
+
+		for(i=0;i<NUM_RX_DESC;i++){
+			//skb = RTL8169_ALLOC_RXSKB(MAX_RX_SKBDATA_SIZE);	/*** <kk> ***/
+			skb = rtnetdev_alloc_rtskb(rtdev, priv->rx_buf_size); /*** RTnet ***/;
+			if( skb != NULL ) {
+				rtskb_reserve (skb, 2);	// 16 byte align the IP fields. //
+				priv->Rx_skbuff[i] = skb;
+			}
+			else{
+				printk("%s: Gigabit driver failed to allocate skbuff.\n", rtdev->name);
+				priv->drvinit_fail = 1;
+			}
+		}
+	}
+
+
+	//////////////////////////////////////////////////////////////////////////////
+	rtl8169_init_ring(rtdev);
+	rtl8169_hw_start(rtdev);
+
+	// ------------------------------------------------------
+
+	//DBG_PRINT("%s: %s() alloc_rxskb_cnt = %d\n", dev->name, __FUNCTION__, alloc_rxskb_cnt );	/*** <kk> won't work anymore... ***/
+
+	return 0;
+
+}//end of rtl8169_open (struct net_device *dev)
+
+
+
+
+
+
+
+
+//======================================================================================================
+
+
+
+//======================================================================================================
+static void rtl8169_hw_PHY_config (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	void *ioaddr = (void*)priv->ioaddr;
+
+	DBG_PRINT("priv->mcfg=%d, priv->pcfg=%d\n",priv->mcfg,priv->pcfg);
+
+	if( priv->mcfg == MCFG_METHOD_4 ){
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0001 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1b, 0x841e );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x0e, 0x7bfb );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x09, 0x273a );
+
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0002 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x90D0 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0000 );
+	}else if((priv->mcfg == MCFG_METHOD_2)||(priv->mcfg == MCFG_METHOD_3)){
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0001 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x15, 0x1000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x18, 0x65C7 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0x00A1 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0x0008 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x1020 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x1000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xFF41 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDE60 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x0077 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xDF01 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDF20 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xFF95 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xFA00 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xFF41 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDE20 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x00BB );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xDF01 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDF20 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xFF95 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xBF00 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x0B, 0x0000 );
+	}
+	else{
+		DBG_PRINT("priv->mcfg=%d. Discard hw PHY config.\n",priv->mcfg);
+	}
+}
+
+
+
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_hw_start (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	u32 i;
+
+
+	/* Soft reset the chip. */
+	RTL_W8 ( ChipCmd, CmdReset);
+
+	/* Check that the chip has finished the reset. */
+	for (i = 1000; i > 0; i--){
+		if ((RTL_R8( ChipCmd ) & CmdReset) == 0) break;
+		else udelay (10);
+	}
+
+	RTL_W8 ( Cfg9346, Cfg9346_Unlock);
+	RTL_W8 ( ChipCmd, CmdTxEnb | CmdRxEnb);
+	RTL_W8 ( ETThReg, ETTh);
+
+	// For gigabit rtl8169
+	RTL_W16	( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
+
+	// Set Rx Config register
+	i = rtl8169_rx_config | ( RTL_R32( RxConfig ) & rtl_chip_info[priv->chipset].RxConfigMask);
+	RTL_W32 ( RxConfig, i);
+
+
+	/* Set DMA burst size and Interframe Gap Time */
+	RTL_W32 ( TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift) );
+
+
+
+	RTL_W16( CPlusCmd, RTL_R16(CPlusCmd) );
+
+	if(	priv->mcfg == MCFG_METHOD_2 ||
+		priv->mcfg == MCFG_METHOD_3)
+	{
+		RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<14)|(1<<3)) );
+		DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14\n");
+	}
+	else
+	{
+		RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<3)) );
+		DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3.\n");
+	}
+
+	{
+		//RTL_W16(0xE2, 0x1517);
+		//RTL_W16(0xE2, 0x152a);
+		//RTL_W16(0xE2, 0x282a);
+		RTL_W16(0xE2, 0x0000);		/* 0xE2 = IntrMitigate */
+	}
+
+	priv->cur_rx = 0;
+
+	RTL_W32 ( TxDescStartAddr, priv->txdesc_phy_dma_addr);
+	RTL_W32 ( TxDescStartAddr + 4, 0x00);
+	RTL_W32 ( RxDescStartAddr, priv->rxdesc_phy_dma_addr);
+	RTL_W32 ( RxDescStartAddr + 4, 0x00);
+
+	RTL_W8 ( Cfg9346, Cfg9346_Lock );
+	udelay (10);
+
+	RTL_W32 ( RxMissed, 0 );
+
+	rtl8169_set_rx_mode (rtdev);
+
+	/* no early-rx interrupts */
+	RTL_W16 ( MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+	/* enable all known interrupts by setting the interrupt mask */
+	RTL_W16 ( IntrMask, rtl8169_intr_mask);
+
+	rtnetif_start_queue (rtdev);	/*** RTnet ***/
+
+}//end of rtl8169_hw_start (struct net_device *dev)
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_init_ring (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	struct pci_dev *pdev = priv->pci_dev;
+	int i;
+	struct rtskb	*skb;
+
+
+	priv->cur_rx = 0;
+	priv->cur_tx = 0;
+	priv->dirty_tx = 0;
+	memset(priv->TxDescArray, 0x0, NUM_TX_DESC*sizeof(struct TxDesc));
+	memset(priv->RxDescArray, 0x0, NUM_RX_DESC*sizeof(struct RxDesc));
+
+
+	for (i=0 ; i<NUM_TX_DESC ; i++){
+		priv->Tx_skbuff[i]=NULL;
+		priv->txdesc_array_dma_addr[i] = pci_map_single(pdev, &priv->TxDescArray[i], sizeof(struct TxDesc), PCI_DMA_TODEVICE);
+	}
+
+	for (i=0; i <NUM_RX_DESC; i++) {
+		if(i==(NUM_RX_DESC-1)){
+			priv->RxDescArray[i].status = cpu_to_le32((OWNbit | EORbit) | (unsigned long)priv->hw_rx_pkt_len);
+		}
+		else{
+			priv->RxDescArray[i].status = cpu_to_le32(OWNbit | (unsigned long)priv->hw_rx_pkt_len);
+		}
+
+		{//-----------------------------------------------------------------------
+			skb = priv->Rx_skbuff[i];
+			priv->rx_skbuff_dma_addr[i] = pci_map_single(pdev, skb->data, priv->rx_buf_size /* MAX_RX_SKBDATA_SIZE */, PCI_DMA_FROMDEVICE);	/*** <kk> ***/
+
+			if( skb != NULL ){
+				priv->RxDescArray[i].buf_addr = cpu_to_le32(priv->rx_skbuff_dma_addr[i]);
+				priv->RxDescArray[i].buf_Haddr = 0;
+			}
+			else{
+				DBG_PRINT("%s: %s() Rx_skbuff == NULL\n", rtdev->name, __FUNCTION__);
+				priv->drvinit_fail = 1;
+			}
+		}//-----------------------------------------------------------------------
+		priv->rxdesc_array_dma_addr[i] = pci_map_single(pdev, &priv->RxDescArray[i], sizeof(struct RxDesc), PCI_DMA_TODEVICE);
+		pci_dma_sync_single_for_device(pdev, priv->rxdesc_array_dma_addr[i], sizeof(struct RxDesc), PCI_DMA_TODEVICE);
+	}
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_tx_clear (struct rtl8169_private *priv)
+{
+	int i;
+
+	priv->cur_tx = 0;
+	for ( i = 0 ; i < NUM_TX_DESC ; i++ ){
+		if ( priv->Tx_skbuff[i] != NULL ) {
+			dev_kfree_rtskb ( priv->Tx_skbuff[i] );
+			priv->Tx_skbuff[i] = NULL;
+			priv->stats.tx_dropped++;
+		}
+	}
+}
+
+
+
+
+
+
+
+//======================================================================================================
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	struct pci_dev *pdev = priv->pci_dev;
+	int entry = priv->cur_tx % NUM_TX_DESC;
+	// int buf_len = 60;
+	dma_addr_t txbuf_dma_addr;
+	rtdm_lockctx_t context;	/*** RTnet ***/
+	u32 status, len;		/* <kk> */
+
+	rtdm_lock_get_irqsave(&priv->lock, context);	/*** RTnet ***/
+
+	status = le32_to_cpu(priv->TxDescArray[entry].status);
+
+	if( (status & OWNbit)==0 ){
+
+		priv->Tx_skbuff[entry] = skb;
+
+		len = skb->len;
+		if (len < ETH_ZLEN) {
+			skb = rtskb_padto(skb, ETH_ZLEN);
+			if (skb == NULL) {
+				/* Error... */
+				rtdm_printk("%s: Error -- rtskb_padto returned NULL; out of memory?\n", rtdev->name);
+			}
+			len = ETH_ZLEN;
+		}
+
+		txbuf_dma_addr = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+		priv->TxDescArray[entry].buf_addr = cpu_to_le32(txbuf_dma_addr);
+
+		/* <kk> print TX frame debug informations? */
+		while (r8169_debug & (DEBUG_TX_SYNC | DEBUG_TX_OTHER)) {
+			unsigned short proto = 0;
+
+			/* get ethernet protocol id */
+			if (skb->len < 14) break;	/* packet too small! */
+			if (skb->len > 12) proto = be16_to_cpu(*((unsigned short *)(skb->data + 12)));
+
+			if (proto == 0x9021 && !(r8169_debug & DEBUG_TX_SYNC)) {
+				/* don't show TDMA Sync frames for better debugging, so look at RTmac frame type... */
+				unsigned short type;
+
+				if (skb->len < 16) break;	/* packet too small! */
+				type = be16_to_cpu(*((unsigned short *)(skb->data + 14)));
+
+				if (type == 0x0001) {
+					/* TDMA-Frame; get Message ID */
+					unsigned short tdma_version;
+
+					if (skb->len < 20) break;	/* packet too small! */
+					tdma_version = be16_to_cpu(*((unsigned short *)(skb->data + 18)));
+
+					if (tdma_version == 0x0201) {
+						unsigned short tdma_id;
+
+						if (skb->len < 22) break;	/* packet too small! */
+						tdma_id = be16_to_cpu(*((unsigned short *)(skb->data + 20)));
+
+						if (tdma_id == 0x0000 && !(r8169_debug & DEBUG_TX_SYNC)) {
+							/* TDMA sync frame found, but not allowed to print it */
+							break;
+						}
+					}
+				}
+
+			}
+
+			/* print frame informations */
+			RT_DBG_PRINT("%s: TX len = %d, skb->len = %d, eth_proto=%04x\n", __FUNCTION__, len, skb->len, proto);
+
+			break;	/* leave loop */
+		}
+
+		if( len > priv->tx_pkt_len ){
+			rtdm_printk("%s: Error -- Tx packet size(%d) > mtu(%d)+14\n", rtdev->name, len, rtdev->mtu);
+			len = priv->tx_pkt_len;
+		}
+
+		/*** RTnet ***/
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp)
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+		/*** /RTnet ***/
+
+		if( entry != (NUM_TX_DESC-1) ){
+			status = (OWNbit | FSbit | LSbit) | len;
+		}
+		else{
+			status = (OWNbit | EORbit | FSbit | LSbit) | len;
+		}
+		priv->TxDescArray[entry].status = cpu_to_le32(status);
+
+		pci_dma_sync_single_for_device(pdev, priv->txdesc_array_dma_addr[entry], sizeof(struct TxDesc), PCI_DMA_TODEVICE);
+
+		RTL_W8 ( TxPoll, 0x40);		//set polling bit
+
+		//rtdev->trans_start = jiffies;
+
+		priv->stats.tx_bytes += len;
+		priv->cur_tx++;
+	}//end of if( (priv->TxDescArray[entry].status & 0x80000000)==0 )
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);	/*** RTnet ***/
+
+	if ( (priv->cur_tx - NUM_TX_DESC) == priv->dirty_tx ){
+		if (r8169_debug & DEBUG_RUN) rtdm_printk(KERN_DEBUG "%s: stopping rtnetif queue", __FUNCTION__);
+		rtnetif_stop_queue (rtdev);
+	}
+	else{
+		if (rtnetif_queue_stopped (rtdev)){
+			if (r8169_debug & DEBUG_RUN) rtdm_printk(KERN_DEBUG "%s: waking rtnetif queue", __FUNCTION__);
+			rtnetif_wake_queue (rtdev);
+		}
+	}
+
+	return 0;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity. */
+static void rtl8169_tx_interrupt (struct rtnet_device *rtdev, struct rtl8169_private *priv, unsigned long ioaddr)
+{
+	unsigned long dirty_tx, tx_left=0;
+	//int entry = priv->cur_tx % NUM_TX_DESC;	/* <kk> */
+	int txloop_cnt = 0;
+
+	rt_assert (rtdev != NULL);
+	rt_assert (priv != NULL);
+	rt_assert (ioaddr != 0);
+
+	rtdm_lock_get(&priv->lock); /*** RTnet ***/
+
+	dirty_tx = priv->dirty_tx;
+	smp_rmb();	/*** <kk> ***/
+	tx_left = priv->cur_tx - dirty_tx;
+
+	while( (tx_left > 0) && (txloop_cnt < max_interrupt_work) ){
+		unsigned int entry = dirty_tx % NUM_TX_DESC;	/* <kk> */
+		if( (le32_to_cpu(priv->TxDescArray[entry].status) & OWNbit) == 0 ){
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+			r8169_callback_tx(&(priv->rt), 1, priv->Tx_skbuff[dirty_tx % NUM_TX_DESC]->len);
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+			if (priv->txdesc_array_dma_addr[entry])
+				pci_unmap_single(priv->pci_dev, priv->txdesc_array_dma_addr[entry], priv->Tx_skbuff[entry]->len, PCI_DMA_TODEVICE);	/*** ##KK## ***/
+			dev_kfree_rtskb( priv->Tx_skbuff[entry] );	/*** RTnet; previously: dev_kfree_skb_irq() - luckily we're within an IRQ ***/
+			priv->Tx_skbuff[entry] = NULL;
+			priv->stats.tx_packets++;
+			dirty_tx++;
+			tx_left--;
+			entry++;
+		}
+		txloop_cnt ++;
+	}
+
+	if (priv->dirty_tx != dirty_tx) {
+		priv->dirty_tx = dirty_tx;
+		smp_wmb();	/*** <kk> ***/
+		if (rtnetif_queue_stopped (rtdev))
+			rtnetif_wake_queue (rtdev);
+	}
+
+	rtdm_lock_put(&priv->lock); /*** RTnet ***/
+
+}
+
+
+
+
+
+
+//======================================================================================================
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity. */
+static void rtl8169_rx_interrupt (struct rtnet_device *rtdev, struct rtl8169_private *priv, unsigned long ioaddr, nanosecs_abs_t *time_stamp)
+{
+	struct pci_dev *pdev = priv->pci_dev;
+	int cur_rx;
+	int pkt_size = 0 ;
+	int rxdesc_cnt = 0;
+	/* int ret; */	/*** RTnet ***/
+	struct rtskb *n_skb = NULL;
+	struct rtskb *cur_skb;
+	struct rtskb *rx_skb;
+	struct	RxDesc	*rxdesc;
+
+	rt_assert (rtdev != NULL);
+	rt_assert (priv != NULL);
+	rt_assert (ioaddr != 0);
+
+
+	cur_rx = priv->cur_rx;
+
+	rxdesc = &priv->RxDescArray[cur_rx];
+	pci_dma_sync_single_for_cpu(pdev, priv->rxdesc_array_dma_addr[cur_rx], sizeof(struct RxDesc), PCI_DMA_FROMDEVICE);
+
+	while ( ((le32_to_cpu(rxdesc->status) & OWNbit)== 0) && (rxdesc_cnt < max_interrupt_work) ){
+
+	    rxdesc_cnt++;
+
+	    if( le32_to_cpu(rxdesc->status) & RxRES ){
+			rtdm_printk(KERN_INFO "%s: Rx ERROR!!!\n", rtdev->name);
+			priv->stats.rx_errors++;
+			if ( le32_to_cpu(rxdesc->status) & (RxRWT|RxRUNT) )
+				priv->stats.rx_length_errors++;
+			if ( le32_to_cpu(rxdesc->status) & RxCRC) {
+				/* in the rt_via-rhine.c there's a lock around the incrementation... we'll do that also here <kk> */
+				rtdm_lock_get(&priv->lock); /*** RTnet ***/
+				priv->stats.rx_crc_errors++;
+				rtdm_lock_put(&priv->lock); /*** RTnet ***/
+			}
+	    }
+	    else{
+			pkt_size=(int)(le32_to_cpu(rxdesc->status) & 0x00001FFF)-4;
+
+			if( pkt_size > priv->rx_pkt_len ){
+				rtdm_printk("%s: Error -- Rx packet size(%d) > mtu(%d)+14\n", rtdev->name, pkt_size, rtdev->mtu);
+				pkt_size = priv->rx_pkt_len;
+			}
+
+			{// -----------------------------------------------------
+				rx_skb = priv->Rx_skbuff[cur_rx];
+				// n_skb = RTL8169_ALLOC_RXSKB(MAX_RX_SKBDATA_SIZE);	/*** <kk> ***/
+				n_skb = rtnetdev_alloc_rtskb(rtdev, priv->rx_buf_size);	/*** RTnet ***/
+				if( n_skb != NULL ) {
+					rtskb_reserve (n_skb, 2);	// 16 byte align the IP fields. //
+
+					// Indicate rx_skb
+					if( rx_skb != NULL ){
+						pci_dma_sync_single_for_cpu(pdev, priv->rx_skbuff_dma_addr[cur_rx], sizeof(struct RxDesc), PCI_DMA_FROMDEVICE);
+
+						rtskb_put ( rx_skb, pkt_size );
+						rx_skb->protocol = rt_eth_type_trans ( rx_skb, rtdev );
+						rx_skb->time_stamp = *time_stamp;	/*** RTnet ***/
+						//ret = RTL8169_NETIF_RX (rx_skb);
+						rtnetif_rx(rx_skb);	/*** RTnet ***/
+
+//						dev->last_rx = jiffies;
+						priv->stats.rx_bytes += pkt_size;
+						priv->stats.rx_packets++;
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+						r8169_callback_rx( &(priv->rt), 1, pkt_size);
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+					}//end if( rx_skb != NULL )
+
+					priv->Rx_skbuff[cur_rx] = n_skb;
+				}
+				else{
+					RT_DBG_PRINT("%s: Allocate n_skb failed! (priv->rx_buf_size = %d)\n",__FUNCTION__, priv->rx_buf_size );
+					priv->Rx_skbuff[cur_rx] = rx_skb;
+				}
+
+
+				// Update rx descriptor
+				if( cur_rx == (NUM_RX_DESC-1) ){
+					priv->RxDescArray[cur_rx].status  = cpu_to_le32((OWNbit | EORbit) | (unsigned long)priv->hw_rx_pkt_len);
+				}
+				else{
+					priv->RxDescArray[cur_rx].status  = cpu_to_le32(OWNbit | (unsigned long)priv->hw_rx_pkt_len);
+				}
+
+				cur_skb = priv->Rx_skbuff[cur_rx];
+
+				if( cur_skb != NULL ){
+					priv->rx_skbuff_dma_addr[cur_rx] = pci_map_single(pdev, cur_skb->data, priv->rx_buf_size /* <kk> MAX_RX_SKBDATA_SIZE */, PCI_DMA_FROMDEVICE);
+					rxdesc->buf_addr = cpu_to_le32(priv->rx_skbuff_dma_addr[cur_rx]);
+				}
+				else{
+					RT_DBG_PRINT("%s: %s() cur_skb == NULL\n", rtdev->name, __FUNCTION__);
+				}
+
+			}//------------------------------------------------------------
+
+	    }// end of if( priv->RxDescArray[cur_rx].status & RxRES )
+
+	    cur_rx = (cur_rx +1) % NUM_RX_DESC;
+	    rxdesc = &priv->RxDescArray[cur_rx];
+	    pci_dma_sync_single_for_cpu(pdev, priv->rxdesc_array_dma_addr[cur_rx], sizeof(struct RxDesc), PCI_DMA_FROMDEVICE);
+
+	}// end of while ( (priv->RxDescArray[cur_rx].status & 0x80000000)== 0)
+
+	if( rxdesc_cnt >= max_interrupt_work ){
+		RT_DBG_PRINT("%s: Too much work at Rx interrupt.\n", rtdev->name);
+	}
+
+	priv->cur_rx = cur_rx;
+}
+
+
+
+
+
+
+
+
+//======================================================================================================
+/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
+static int rtl8169_interrupt(rtdm_irq_t *irq_handle)
+{
+	/* struct net_device *dev = (struct net_device *) dev_instance; */	/*** RTnet ***/
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/
+	struct rtl8169_private *priv = rtdev->priv;
+	int boguscnt = max_interrupt_work;
+	unsigned long ioaddr = priv->ioaddr;
+	int status = 0;
+	unsigned int old_packet_cnt = priv->stats.rx_packets; /*** RTnet ***/
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+
+	int interrupt_handled = RTDM_IRQ_NONE; /*** <kk> ***/
+
+	do {
+		status = RTL_R16(IntrStatus);	/* read interrupt status */
+
+		if ((status == 0xFFFF) || (!status)) {
+			break;						/* hotplug/major error/no more work/shared irq */
+		}
+
+
+		interrupt_handled = RTDM_IRQ_HANDLED;
+
+/*		if (unlikely(!rtnetif_running(rtdev))) {
+			rtl8169_asic_down(ioaddr);
+			goto out;
+		}
+*/
+
+		/* Acknowledge interrupts */
+		RTL_W16(IntrStatus, 0xffff);
+
+		if (!(status & rtl8169_intr_mask)) {
+			break;
+		}
+
+		if (unlikely(status & SYSErr)) {
+			RT_DBG_PRINT("PCI error...!? %i\n", __LINE__);
+			rtl8169_pcierr_interrupt(rtdev);
+			break;
+		}
+
+		/*** RTnet / <kk> (Linux-2.6.12-Backport) ***/
+		if (unlikely(status & LinkChg)) {
+			rtdm_lock_get(&priv->lock);
+			if (RTL_R8(PHYstatus) & LinkStatus)	/*** <kk> only supporting XMII, not yet TBI ***/
+				rtnetif_carrier_on(rtdev);
+			else
+				rtnetif_carrier_off(rtdev);
+			rtdm_lock_put(&priv->lock);
+		}
+
+		// Rx interrupt
+		if (status & (RxOK | RxOverflow | RxFIFOOver)) {
+			rtl8169_rx_interrupt (rtdev, priv, ioaddr, &time_stamp);
+		}
+
+		// Tx interrupt
+		if (status & (TxOK | TxErr)) {
+			rtl8169_tx_interrupt (rtdev, priv, ioaddr);
+		}
+
+		boguscnt--;
+	} while (boguscnt > 0);
+
+	if (boguscnt <= 0) {
+		rtdm_printk(KERN_WARNING "%s: Too much work at interrupt!\n", rtdev->name);
+		RTL_W16( IntrStatus, 0xffff);	/* Clear all interrupt sources */
+	}
+
+//out:
+
+	if (old_packet_cnt != priv->stats.rx_packets)
+		rt_mark_stack_mgr(rtdev);
+	return interrupt_handled;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_close (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	int i;
+	rtdm_lockctx_t context;	/*** RTnet, for rtdm_lock_get_irqsave ***/
+
+	// -----------------------------------------
+	/* rtl8169_delete_timer( &(priv->r8169_timer) ); */	/*** RTnet ***/
+
+
+	rtdm_lock_get_irqsave (&priv->lock, context);	/*** RTnet ***/
+
+	rtnetif_stop_queue (rtdev);		/*** RTnet / <kk>: moved behind spin_lock! ***/
+
+	/* Stop the chip's Tx and Rx processes. */
+	RTL_W8 ( ChipCmd, 0x00);
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	RTL_W16 ( IntrMask, 0x0000);
+
+	/* Update the error counts. */
+	priv->stats.rx_missed_errors += RTL_R32(RxMissed);
+	RTL_W32( RxMissed, 0);
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);	/*** RTnet ***/
+
+	/*** RTnet ***/
+	if ( (i=rtdm_irq_free(&priv->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(rtdev);
+	/*** /RTnet ***/
+
+	rtl8169_tx_clear (priv);
+
+	//2004-05-11
+	if(priv->txdesc_space != NULL){
+		pci_free_consistent(
+				priv->pci_dev,
+				priv->sizeof_txdesc_space,
+				priv->txdesc_space,
+				priv->txdesc_phy_dma_addr
+		);
+		priv->txdesc_space = NULL;
+	}
+
+	if(priv->rxdesc_space != NULL){
+		pci_free_consistent(
+				priv->pci_dev,
+				priv->sizeof_rxdesc_space,
+				priv->rxdesc_space,
+				priv->rxdesc_phy_dma_addr
+		);
+		priv->rxdesc_space = NULL;
+	}
+
+	priv->TxDescArray = NULL;
+	priv->RxDescArray = NULL;
+
+	{//-----------------------------------------------------------------------------
+		for(i=0;i<NUM_RX_DESC;i++){
+			if( priv->Rx_skbuff[i] != NULL ) {
+				//RTL8169_FREE_RXSKB ( priv->Rx_skbuff[i] );	/*** <kk> ***/
+				dev_kfree_rtskb(priv->Rx_skbuff[i]);	/*** RTnet ***/
+			}
+		}
+	}//-----------------------------------------------------------------------------
+
+	//DBG_PRINT("%s: %s() alloc_rxskb_cnt = %d\n", dev->name, __FUNCTION__, alloc_rxskb_cnt );	/*** <kk> won't work anymore ***/
+
+	return 0;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_set_rx_mode (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	rtdm_lockctx_t context;
+	u32 mc_filter[2];	/* Multicast hash filter */
+	int rx_mode;
+	u32 tmp=0;
+
+
+	if (rtdev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", rtdev->name);
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if (rtdev->flags & IFF_ALLMULTI) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+	}
+
+	rtdm_lock_get_irqsave(&priv->lock, context);			/*** RTnet ***/
+
+	tmp = rtl8169_rx_config | rx_mode | (RTL_R32(RxConfig) & rtl_chip_info[priv->chipset].RxConfigMask);
+
+	RTL_W32 ( RxConfig, tmp);
+	RTL_W32 ( MAR0 + 0, mc_filter[0]);
+	RTL_W32 ( MAR0 + 4, mc_filter[1]);
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);	/*** RTnet ***/
+
+}//end of rtl8169_set_rx_mode (struct net_device *dev)
+
+
+
+
+
+
+
+//================================================================================
+static struct net_device_stats *rtl8169_get_stats(struct rtnet_device *rtdev)
+
+{
+    struct rtl8169_private *priv = rtdev->priv;
+
+    return &priv->stats;
+}
+
+
+
+
+
+
+
+//================================================================================
+static struct pci_driver rtl8169_pci_driver = {
+	name:		MODULENAME,
+	id_table:	rtl8169_pci_tbl,
+	probe:		rtl8169_init_one,
+	remove:		rtl8169_remove_one,
+	suspend:	NULL,
+	resume:		NULL,
+};
+
+
+
+
+
+//======================================================================================================
+static int __init rtl8169_init_module (void)
+{
+	/* <kk> Enable debugging output... */
+	if (local_debug > 0) {
+		r8169_debug = local_debug;
+	}
+	if (r8169_debug & DEBUG_RUN) printk("Initializing " MODULENAME " driver");
+	return pci_register_driver (&rtl8169_pci_driver);
+}
+
+
+
+
+//======================================================================================================
+static void __exit rtl8169_cleanup_module (void)
+{
+	pci_unregister_driver (&rtl8169_pci_driver);
+}
+
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct rtl8169_private *priv = dev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+
+	if( new_mtu > MAX_JUMBO_FRAME_MTU ){
+		printk("%s: Error -- new_mtu(%d) > MAX_JUMBO_FRAME_MTU(%d).\n", dev->name, new_mtu, MAX_JUMBO_FRAME_MTU);
+		return -1;
+	}
+
+	dev->mtu = new_mtu;
+
+	priv->curr_mtu_size = new_mtu;
+	priv->tx_pkt_len = new_mtu + ETH_HDR_LEN;
+	priv->rx_pkt_len = new_mtu + ETH_HDR_LEN;
+	priv->hw_rx_pkt_len = priv->rx_pkt_len + 8;
+
+	RTL_W8 ( Cfg9346, Cfg9346_Unlock);
+	RTL_W16	( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
+	RTL_W8 ( Cfg9346, Cfg9346_Lock);
+
+	DBG_PRINT("-------------------------- \n");
+	DBG_PRINT("dev->mtu = %d \n", dev->mtu);
+	DBG_PRINT("priv->curr_mtu_size = %d \n", priv->curr_mtu_size);
+	DBG_PRINT("priv->rx_pkt_len = %d \n", priv->rx_pkt_len);
+	DBG_PRINT("priv->tx_pkt_len = %d \n", priv->tx_pkt_len);
+	DBG_PRINT("RTL_W16( RxMaxSize, %d )\n", priv->hw_rx_pkt_len);
+	DBG_PRINT("-------------------------- \n");
+
+	rtl8169_close (dev);
+	rtl8169_open (dev);
+
+	return 0;
+}
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+
+
+/*** <kk> these functions are backported from Linux-2.6.12's r8169.c driver ***/
+static void rtl8169_irq_mask_and_ack(unsigned long ioaddr)
+{
+	RTL_W16(IntrMask, 0x0000);
+
+	RTL_W16(IntrStatus, 0xffff);
+}
+
+static void rtl8169_pcierr_interrupt(struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	struct pci_dev *pdev = priv->pci_dev;
+	unsigned long ioaddr = priv->ioaddr;
+	u16 pci_status, pci_cmd;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+	rtdm_printk(KERN_ERR PFX "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
+	       rtdev->name, pci_cmd, pci_status);
+
+	/*
+	 * The recovery sequence below admits a very elaborated explanation:
+	 * - it seems to work;
+	 * - I did not see what else could be done.
+	 *
+	 * Feel free to adjust to your needs.
+	 */
+	pci_write_config_word(pdev, PCI_COMMAND,
+			      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+	pci_write_config_word(pdev, PCI_STATUS,
+		pci_status & (PCI_STATUS_DETECTED_PARITY |
+		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
+		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
+
+	/* The infamous DAC f*ckup only happens at boot time */
+	/*** <kk> ***
+	if ((priv->cp_cmd & PCIDAC) && !priv->dirty_rx && !priv->cur_rx) {
+		rtdm_printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", rtdev->name);
+		priv->cp_cmd &= ~PCIDAC;
+		RTL_W16(CPlusCmd, priv->cp_cmd);
+		rtdev->features &= ~NETIF_F_HIGHDMA;
+		rtl8169_schedule_work(rtdev, rtl8169_reinit_task);
+	}
+	 *** /RTnet ***/
+
+	/* Disable interrupts */
+	rtl8169_irq_mask_and_ack(ioaddr);
+
+	/* Reset the chipset */
+	RTL_W8(ChipCmd, CmdReset);
+
+	/* PCI commit */
+	RTL_R8(ChipCmd);
+
+}
+
+
+
+
+
+
+//======================================================================================================
+module_init(rtl8169_init_module);
+module_exit(rtl8169_cleanup_module);
+++ linux-patched/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c	2022-03-21 12:58:29.601886894 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/rt_macb.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Fast Ethernet Controller (FCC) driver for Motorola MPC8260.
+ * Copyright (c) 2000 MontaVista Software, Inc.   Dan Malek (dmalek@jlc.net)
+ *
+ * This version of the driver is a combination of the 8xx fec and
+ * 8260 SCC Ethernet drivers.  This version has some additional
+ * configuration options, which should probably be moved out of
+ * here.  This driver currently works for the EST SBC8260,
+ * SBS Diablo/BCM, Embedded Planet RPX6, TQM8260, and others.
+ *
+ * Right now, I am very watseful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.  Since this is a cache coherent processor and CPM,
+ * I could also preallocate SKB's and use them directly on the interface.
+ *
+ * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/8260_io/fcc_enet.c".
+ * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/immap_8260.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8260.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/cpm_8260.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#error "MDIO for PHY configuration is not yet supported!"
+#endif
+
+#include <rtnet_port.h>
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for the MPC8260 FCC Ethernet");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+static unsigned int rtnet_fcc = 1;
+MODULE_PARM(rtnet_fcc, "i");
+MODULE_PARM_DESC(rtnet_fcc, "FCCx port for RTnet (default=1)");
+
+#define RT_DEBUG(fmt,args...)
+
+/* The transmitter timeout
+ */
+#define TX_TIMEOUT	(2*HZ)
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* Forward declarations of some structures to support different PHYs */
+
+typedef struct {
+	uint mii_data;
+	void (*funct)(uint mii_reg, struct net_device *dev);
+} phy_cmd_t;
+
+typedef struct {
+	uint id;
+	char *name;
+
+	const phy_cmd_t *config;
+	const phy_cmd_t *startup;
+	const phy_cmd_t *ack_int;
+	const phy_cmd_t *shutdown;
+} phy_info_t;
+
+/* Register definitions for the PHY. */
+
+#define MII_REG_CR          0  /* Control Register                         */
+#define MII_REG_SR          1  /* Status Register                          */
+#define MII_REG_PHYIR1      2  /* PHY Identification Register 1            */
+#define MII_REG_PHYIR2      3  /* PHY Identification Register 2            */
+#define MII_REG_ANAR        4  /* A-N Advertisement Register               */
+#define MII_REG_ANLPAR      5  /* A-N Link Partner Ability Register        */
+#define MII_REG_ANER        6  /* A-N Expansion Register                   */
+#define MII_REG_ANNPTR      7  /* A-N Next Page Transmit Register          */
+#define MII_REG_ANLPRNPR    8  /* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE	0x0001  /* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP	0x0002  /* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK	0x00f0  /* mask for speed */
+#define PHY_CONF_10HDX	0x0010  /* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX	0x0020  /* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX	0x0040  /* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX	0x0080  /* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK	0x0100  /* 1 up - 0 down */
+#define PHY_STAT_FAULT	0x0200  /* 1 remote fault */
+#define PHY_STAT_ANC	0x0400  /* 1 auto-negotiation complete	*/
+#define PHY_STAT_SPMASK	0xf000  /* mask for speed */
+#define PHY_STAT_10HDX	0x1000  /* 10 Mbit half duplex selected	*/
+#define PHY_STAT_10FDX	0x2000  /* 10 Mbit full duplex selected	*/
+#define PHY_STAT_100HDX	0x4000  /* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX	0x8000  /* 100 Mbit full duplex selected */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it is best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define FCC_ENET_RX_PAGES	16
+#define FCC_ENET_RX_FRSIZE	2048
+#define FCC_ENET_RX_FRPPG	(PAGE_SIZE / FCC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FCC_ENET_RX_FRPPG * FCC_ENET_RX_PAGES)
+#define TX_RING_SIZE		16	/* Must be power of two */
+#define TX_RING_MOD_MASK	15	/*   for this to work */
+
+/* The FCC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+
+/* Maximum input DMA size.  Must be a should(?) be a multiple of 4.
+*/
+#define PKT_MAXDMA_SIZE		1520
+
+/* Maximum input buffer size.  Must be a multiple of 32.
+*/
+#define PKT_MAXBLR_SIZE		1536
+
+static int  fcc_enet_open(struct rtnet_device *rtev);
+static int  fcc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int  fcc_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int fcc_enet_interrupt(rtdm_irq_t *irq_handle);
+static int  fcc_enet_close(struct rtnet_device *dev);
+
+static struct net_device_stats *fcc_enet_get_stats(struct rtnet_device *rtdev);
+#ifdef ORIGINAL_VERSION
+static void set_multicast_list(struct net_device *dev);
+static int fcc_enet_set_mac_address(struct net_device *dev, void *addr);
+#endif /* ORIGINAL_VERSION */
+
+static void fcc_restart(struct rtnet_device *rtdev, int duplex);
+
+/* These will be configurable for the FCC choice.
+ * Multiple ports can be configured.  There is little choice among the
+ * I/O pins to the PHY, except the clocks.  We will need some board
+ * dependent clock selection.
+ * Why in the hell did I put these inside #ifdef's?  I dunno, maybe to
+ * help show what pins are used for each device.
+ */
+
+/* I/O Pin assignment for FCC1.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PA1_COL		((uint)0x00000001)
+#define PA1_CRS		((uint)0x00000002)
+#define PA1_TXER	((uint)0x00000004)
+#define PA1_TXEN	((uint)0x00000008)
+#define PA1_RXDV	((uint)0x00000010)
+#define PA1_RXER	((uint)0x00000020)
+#define PA1_TXDAT	((uint)0x00003c00)
+#define PA1_RXDAT	((uint)0x0003c000)
+#define PA1_PSORA0	(PA1_RXDAT | PA1_TXDAT)
+#define PA1_PSORA1	(PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
+				PA1_RXDV | PA1_RXER)
+#define PA1_DIRA0	(PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
+#define PA1_DIRA1	(PA1_TXDAT | PA1_TXEN | PA1_TXER)
+
+/* CLK12 is receive, CLK11 is transmit.  These are board specific.
+*/
+#define PC_F1RXCLK	((uint)0x00000800)
+#define PC_F1TXCLK	((uint)0x00000400)
+#if defined(CONFIG_PM826)
+#ifndef CONFIG_RTAI_RTNET_DB_CR826_J30x_ON
+#define CMX1_CLK_ROUTE  ((uint)0x35000000)
+#define CMX1_CLK_MASK   ((uint)0x7f000000)
+#else
+#define CMX1_CLK_ROUTE	((uint)0x37000000)
+#define CMX1_CLK_MASK	((uint)0x7f000000)
+#endif
+#elif defined(CONFIG_CPU86)
+#define CMX1_CLK_ROUTE  ((uint)0x37000000)
+#define CMX1_CLK_MASK   ((uint)0x7f000000)
+#else
+#define CMX1_CLK_ROUTE	((uint)0x3e000000)
+#define CMX1_CLK_MASK	((uint)0xff000000)
+#endif	/* CONFIG_PM826 */
+
+/* I/O Pin assignment for FCC2.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB2_TXER	((uint)0x00000001)
+#define PB2_RXDV	((uint)0x00000002)
+#define PB2_TXEN	((uint)0x00000004)
+#define PB2_RXER	((uint)0x00000008)
+#define PB2_COL		((uint)0x00000010)
+#define PB2_CRS		((uint)0x00000020)
+#define PB2_TXDAT	((uint)0x000003c0)
+#define PB2_RXDAT	((uint)0x00003c00)
+#define PB2_PSORB0	(PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
+				PB2_RXER | PB2_RXDV | PB2_TXER)
+#define PB2_PSORB1	(PB2_TXEN)
+#define PB2_DIRB0	(PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
+#define PB2_DIRB1	(PB2_TXDAT | PB2_TXEN | PB2_TXER)
+
+/* CLK13 is receive, CLK14 is transmit.  These are board dependent.
+*/
+#define PC_F2RXCLK	((uint)0x00001000)
+#define PC_F2TXCLK	((uint)0x00002000)
+#define CMX2_CLK_ROUTE	((uint)0x00250000)
+#define CMX2_CLK_MASK	((uint)0x00ff0000)
+
+/* I/O Pin assignment for FCC3.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB3_RXDV	((uint)0x00004000)
+#define PB3_RXER	((uint)0x00008000)
+#define PB3_TXER	((uint)0x00010000)
+#define PB3_TXEN	((uint)0x00020000)
+#define PB3_COL		((uint)0x00040000)
+#define PB3_CRS		((uint)0x00080000)
+#define PB3_TXDAT	((uint)0x0f000000)
+#define PB3_RXDAT	((uint)0x00f00000)
+#define PB3_PSORB0	(PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
+				PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
+#define PB3_PSORB1	(0)
+#define PB3_DIRB0	(PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
+#define PB3_DIRB1	(PB3_TXDAT | PB3_TXEN | PB3_TXER)
+
+/* CLK15 is receive, CLK16 is transmit.  These are board dependent.
+*/
+#ifdef CONFIG_IPHASE4539
+#define PC_F3RXCLK	((uint)0x00002000) /* CLK 14 is receive  */
+#define PC_F3TXCLK	((uint)0x00008000) /* CLK 16 is transmit */
+#define CMX3_CLK_ROUTE	((uint)0x00002f00)
+#define CMX3_CLK_MASK	((uint)0x00007f00)
+#else
+#define PC_F3RXCLK	((uint)0x00004000)
+#define PC_F3TXCLK	((uint)0x00008000)
+#define CMX3_CLK_ROUTE	((uint)0x00003700)
+#define CMX3_CLK_MASK	((uint)0x0000ff00)
+#endif
+
+/* MII status/control serial interface.
+*/
+#define IOP_PORT_OFF(f)	((uint)(&((iop8260_t *)0)->iop_p##f))
+#define IOP_PORT(x)	IOP_PORT_OFF(dir##x)
+
+#define IOP_DIR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(dira)-IOP_PORT_OFF(dira))))
+#define IOP_PAR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(para)-IOP_PORT_OFF(dira))))
+#define IOP_SOR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(sora)-IOP_PORT_OFF(dira))))
+#define IOP_ODR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(odra)-IOP_PORT_OFF(dira))))
+#define IOP_DAT(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(data)-IOP_PORT_OFF(dira))))
+
+#if defined(CONFIG_TQM8260)
+/* TQM8260 has MDIO and MDCK on PC30 and PC31 respectively */
+#define MII_MDIO		((uint)0x00000002)
+#define MII_MDCK		((uint)0x00000001)
+#elif defined (CONFIG_PM826)
+#ifndef CONFIG_RTAI_RTNET_DB_CR826_J30x_ON
+#define MII_MDIO		((uint)0x00000080) /* MDIO on PC24 */
+#define MII_MDCK		((uint)0x00000100) /* MDCK on PC23 */
+#else
+#define MII_MDIO		((uint)0x00000100) /* MDIO on PA23 */
+#define MII_MDCK		((uint)0x00000200) /* MDCK on PA22 */
+#define MII_PORT		IOP_PORT(a)
+#endif	/* CONFIG_RTAI_RTNET_DB_CR826_J30x_ON */
+#elif defined (CONFIG_IPHASE4539)
+#define MII_MDIO		((uint)0x00000080) /* MDIO on PC24 */
+#define MII_MDCK		((uint)0x00000100) /* MDCK on PC23 */
+#else
+#define MII_MDIO		((uint)0x00000004)
+#define MII_MDCK		((uint)0x00000100)
+#endif
+
+# if defined(CONFIG_TQM8260)
+#define MII_MDIO2		MII_MDIO
+#define MII_MDCK2		MII_MDCK
+#elif defined(CONFIG_EST8260) || defined(CONFIG_ADS8260)
+#define MII_MDIO2		((uint)0x00400000)
+#define MII_MDCK2		((uint)0x00200000)
+#elif defined(CONFIG_PM826)
+#define MII_MDIO2		((uint)0x00000040) /* MDIO on PA25 */
+#define MII_MDCK2		((uint)0x00000080) /* MDCK on PA24 */
+#define MII_PORT2		IOP_PORT(a)
+#else
+#define MII_MDIO2		((uint)0x00000002)
+#define MII_MDCK2		((uint)0x00000080)
+#endif
+
+# if defined(CONFIG_TQM8260)
+#define MII_MDIO3		MII_MDIO
+#define MII_MDCK3		MII_MDCK
+#else
+#define MII_MDIO3		((uint)0x00000001)
+#define MII_MDCK3		((uint)0x00000040)
+#endif
+
+#ifndef MII_PORT
+#define MII_PORT		IOP_PORT(c)
+#endif
+
+#ifndef MII_PORT2
+#define MII_PORT2		IOP_PORT(c)
+#endif
+
+#ifndef MII_PORT3
+#define MII_PORT3		IOP_PORT(c)
+#endif
+
+/* A table of information for supporting FCCs.  This does two things.
+ * First, we know how many FCCs we have and they are always externally
+ * numbered from zero.  Second, it holds control register and I/O
+ * information that could be different among board designs.
+ */
+typedef struct fcc_info {
+	uint	fc_fccnum;
+	uint	fc_cpmblock;
+	uint	fc_cpmpage;
+	uint	fc_proff;
+	uint	fc_interrupt;
+	uint	fc_trxclocks;
+	uint	fc_clockroute;
+	uint	fc_clockmask;
+	uint	fc_mdio;
+	uint	fc_mdck;
+	uint	fc_port;
+	struct rtnet_device *rtdev;
+} fcc_info_t;
+
+static fcc_info_t fcc_ports[] = {
+	{ 0, CPM_CR_FCC1_SBLOCK, CPM_CR_FCC1_PAGE, PROFF_FCC1, SIU_INT_FCC1,
+		(PC_F1RXCLK | PC_F1TXCLK), CMX1_CLK_ROUTE, CMX1_CLK_MASK,
+		MII_MDIO, MII_MDCK, MII_PORT },
+	{ 1, CPM_CR_FCC2_SBLOCK, CPM_CR_FCC2_PAGE, PROFF_FCC2, SIU_INT_FCC2,
+		(PC_F2RXCLK | PC_F2TXCLK), CMX2_CLK_ROUTE, CMX2_CLK_MASK,
+		MII_MDIO2, MII_MDCK2, MII_PORT2 },
+	{ 2, CPM_CR_FCC3_SBLOCK, CPM_CR_FCC3_PAGE, PROFF_FCC3, SIU_INT_FCC3,
+		(PC_F3RXCLK | PC_F3TXCLK), CMX3_CLK_ROUTE, CMX3_CLK_MASK,
+		MII_MDIO3, MII_MDCK3, MII_PORT3 },
+};
+
+/* The FCC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fcc_enet_private {
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct	rtskb *tx_skbuff[TX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses.
+	*/
+	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
+	cbd_t	*tx_bd_base;
+	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
+	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
+	volatile fcc_t	*fccp;
+	volatile fcc_enet_t	*ep;
+	struct	net_device_stats stats;
+	uint	tx_full;
+	rtdm_lock_t lock;
+	rtdm_irq_t irq_handle;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	uint	phy_id;
+	uint	phy_id_done;
+	uint	phy_status;
+	phy_info_t	*phy;
+	struct tq_struct phy_task;
+
+	uint	sequence_done;
+
+	uint	phy_addr;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	int	link;
+	int	old_link;
+	int	full_duplex;
+
+	fcc_info_t	*fip;
+};
+
+static void init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
+	volatile immap_t *immap);
+static void init_fcc_startup(fcc_info_t *fip, struct rtnet_device *rtdev);
+static void init_fcc_ioports(fcc_info_t *fip, volatile iop8260_t *io,
+	volatile immap_t *immap);
+static void init_fcc_param(fcc_info_t *fip, struct rtnet_device *rtdev,
+	volatile immap_t *immap);
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static int	mii_queue(struct net_device *dev, int request, void (*func)(uint, struct net_device *));
+static uint	mii_send_receive(fcc_info_t *fip, uint cmd);
+
+static void	fcc_stop(struct net_device *dev);
+
+/* Make MII read/write commands for the FCC.
+*/
+#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
+						(VAL & 0xffff))
+#define mk_mii_end	0
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+static int
+fcc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct fcc_enet_private *cep = (struct fcc_enet_private *)rtdev->priv;
+	volatile cbd_t	*bdp;
+	rtdm_lockctx_t	context;
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	if (!cep->link) {
+		/* Link is down or autonegotiation is in progress. */
+		return 1;
+	}
+
+	/* Fill in a Tx ring entry */
+	bdp = cep->cur_tx;
+
+#ifndef final_version
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since cep->tx_full should be set.
+		 */
+		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
+		return 1;
+	}
+#endif
+
+	/* Clear all of the status flags. */
+	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+	/* If the frame is short, tell CPM to pad it. */
+	if (skb->len <= ETH_ZLEN)
+		bdp->cbd_sc |= BD_ENET_TX_PAD;
+	else
+		bdp->cbd_sc &= ~BD_ENET_TX_PAD;
+
+	/* Set buffer length and buffer pointer. */
+	bdp->cbd_datlen = skb->len;
+	bdp->cbd_bufaddr = __pa(skb->data);
+
+	/* Save skb pointer. */
+	cep->tx_skbuff[cep->skb_cur] = skb;
+
+	cep->stats.tx_bytes += skb->len;
+	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	rtdm_lock_get_irqsave(&cep->lock, context);
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* Send it on its way.  Tell CPM its ready, interrupt when done,
+	 * its the last BD of the frame, and to put the CRC on the end.
+	 */
+	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+#ifdef ORIGINAL_VERSION
+	dev->trans_start = jiffies;
+#endif
+
+	/* If this was the last BD in the ring, start at the beginning again. */
+	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+		bdp = cep->tx_bd_base;
+	else
+		bdp++;
+
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		rtnetif_stop_queue(rtdev);
+		cep->tx_full = 1;
+	}
+
+	cep->cur_tx = (cbd_t *)bdp;
+
+	rtdm_lock_put_irqrestore(&cep->lock, context);
+
+	return 0;
+}
+
+
+#ifdef ORIGINAL_VERSION
+static void
+fcc_enet_timeout(struct net_device *dev)
+{
+	struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
+
+	printk("%s: transmit timed out.\n", dev->name);
+	cep->stats.tx_errors++;
+#ifndef final_version
+	{
+		int	i;
+		cbd_t	*bdp;
+		printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
+		       cep->cur_tx, cep->tx_full ? " (full)" : "",
+		       cep->cur_rx);
+		bdp = cep->tx_bd_base;
+		printk(" Tx @base %p :\n", bdp);
+		for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+		bdp = cep->rx_bd_base;
+		printk(" Rx @base %p :\n", bdp);
+		for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+	}
+#endif
+	if (!cep->tx_full)
+		netif_wake_queue(dev);
+}
+#endif /* ORIGINAL_VERSION */
+
+/* The interrupt handler. */
+static int fcc_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	struct	fcc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	ushort	int_events;
+	int	must_restart;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+
+	/* Get the interrupt events that caused us to be here.
+	*/
+	int_events = cep->fccp->fcc_fcce;
+	cep->fccp->fcc_fcce = int_events;
+	must_restart = 0;
+
+	/* Handle receive event in its own function.
+	*/
+	if (int_events & FCC_ENET_RXF) {
+		fcc_enet_rx(rtdev, &packets, &time_stamp);
+	}
+
+	/* Check for a transmit error.  The manual is a little unclear
+	 * about this, so the debug code until I get it figured out.  It
+	 * appears that if TXE is set, then TXB is not set.  However,
+	 * if carrier sense is lost during frame transmission, the TXE
+	 * bit is set, "and continues the buffer transmission normally."
+	 * I don't know if "normally" implies TXB is set when the buffer
+	 * descriptor is closed.....trial and error :-).
+	 */
+
+	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
+	*/
+	if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) {
+	    rtdm_lock_get(&cep->lock);
+	    bdp = cep->dirty_tx;
+	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
+		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
+		    break;
+
+		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
+			cep->stats.tx_heartbeat_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
+			cep->stats.tx_window_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
+			cep->stats.tx_aborted_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
+			cep->stats.tx_fifo_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
+			cep->stats.tx_carrier_errors++;
+
+
+		/* No heartbeat or Lost carrier are not really bad errors.
+		 * The others require a restart transmit command.
+		 */
+		if (bdp->cbd_sc &
+		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+			must_restart = 1;
+			cep->stats.tx_errors++;
+		}
+
+		cep->stats.tx_packets++;
+
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_DEF)
+			cep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit. */
+		dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]);
+		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted. */
+		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+			bdp = cep->tx_bd_base;
+		else
+			bdp++;
+
+		/* I don't know if we can be held off from processing these
+		 * interrupts for more than one frame time.  I really hope
+		 * not.  In such a case, we would now want to check the
+		 * currently available BD (cur_tx) and determine if any
+		 * buffers between the dirty_tx and cur_tx have also been
+		 * sent.  We would want to process anything in between that
+		 * does not have BD_ENET_TX_READY set.
+		 */
+
+		/* Since we have freed up a buffer, the ring is no longer
+		 * full.
+		 */
+		if (cep->tx_full) {
+			cep->tx_full = 0;
+			if (rtnetif_queue_stopped(rtdev))
+				rtnetif_wake_queue(rtdev);
+		}
+
+		cep->dirty_tx = (cbd_t *)bdp;
+	    }
+
+	    if (must_restart) {
+		volatile cpm8260_t *cp;
+
+		/* Some transmit errors cause the transmitter to shut
+		 * down.  We now issue a restart transmit.  Since the
+		 * errors close the BD and update the pointers, the restart
+		 * _should_ pick up without having to reset any of our
+		 * pointers either.  Also, To workaround 8260 device erratum
+		 * CPM37, we must disable and then re-enable the transmitter
+		 * following a Late Collision, Underrun, or Retry Limit error.
+		 */
+		cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT;
+#ifdef ORIGINAL_VERSION
+		udelay(10); /* wait a few microseconds just on principle */
+#endif
+		cep->fccp->fcc_gfmr |=  FCC_GFMR_ENT;
+
+		cp = cpmp;
+		cp->cp_cpcr =
+		    mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock,
+				0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG;
+		while (cp->cp_cpcr & CPM_CR_FLG); // looks suspicious - how long may it take?
+	    }
+	    rtdm_lock_put(&cep->lock);
+	}
+
+	/* Check for receive busy, i.e. packets coming but no place to
+	 * put them.
+	 */
+	if (int_events & FCC_ENET_BSY) {
+		cep->stats.rx_dropped++;
+	}
+
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static int
+fcc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
+{
+	struct	fcc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	struct	rtskb *skb;
+	ushort	pkt_len;
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = cep->cur_rx;
+
+for (;;) {
+	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
+		break;
+
+#ifndef final_version
+	/* Since we have allocated space to hold a complete frame, both
+	 * the first and last indicators should be set.
+	 */
+	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
+		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
+			rtdm_printk("CPM ENET: rcv is not first+last\n");
+#endif
+
+	/* Frame too long or too short. */
+	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+		cep->stats.rx_length_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
+		cep->stats.rx_frame_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
+		cep->stats.rx_crc_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
+		cep->stats.rx_crc_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_CL)	/* Late Collision */
+		cep->stats.rx_frame_errors++;
+
+	if (!(bdp->cbd_sc &
+	      (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR
+	       | BD_ENET_RX_OV | BD_ENET_RX_CL)))
+	{
+		/* Process the incoming frame. */
+		cep->stats.rx_packets++;
+
+		/* Remove the FCS from the packet length. */
+		pkt_len = bdp->cbd_datlen - 4;
+		cep->stats.rx_bytes += pkt_len;
+
+		/* This does 16 byte alignment, much more than we need. */
+		skb = rtnetdev_alloc_rtskb(rtdev, pkt_len);
+
+		if (skb == NULL) {
+			rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
+			cep->stats.rx_dropped++;
+		}
+		else {
+			rtskb_put(skb,pkt_len); /* Make room */
+			memcpy(skb->data,
+			       (unsigned char *)__va(bdp->cbd_bufaddr),
+			       pkt_len);
+			skb->protocol=rt_eth_type_trans(skb,rtdev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			(*packets)++;
+		}
+	}
+
+	/* Clear the status flags for this buffer. */
+	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+	/* Mark the buffer empty. */
+	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+	/* Update BD pointer to next entry. */
+	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+		bdp = cep->rx_bd_base;
+	else
+		bdp++;
+
+   }
+	cep->cur_rx = (cbd_t *)bdp;
+
+	return 0;
+}
+
+static int
+fcc_enet_close(struct rtnet_device *rtdev)
+{
+	/* Don't know what to do yet. */
+	rtnetif_stop_queue(rtdev);
+
+	return 0;
+}
+
+static struct net_device_stats *fcc_enet_get_stats(struct rtnet_device *rtdev)
+{
+	struct fcc_enet_private *cep = (struct fcc_enet_private *)rtdev->priv;
+
+	return &cep->stats;
+}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+
+/* NOTE: Most of the following comes from the FEC driver for 860. The
+ * overall structure of MII code has been retained (as it's proved stable
+ * and well-tested), but actual transfer requests are processed "at once"
+ * instead of being queued (there's no interrupt-driven MII transfer
+ * mechanism, one has to toggle the data/clock bits manually).
+ */
+static int
+mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
+{
+	struct fcc_enet_private *fep;
+	int		retval, tmp;
+
+	/* Add PHY address to register command. */
+	fep = dev->priv;
+	regval |= fep->phy_addr << 23;
+
+	retval = 0;
+
+	tmp = mii_send_receive(fep->fip, regval);
+	if (func)
+		func(tmp, dev);
+
+	return retval;
+}
+
+static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
+{
+	int k;
+
+	if(!c)
+		return;
+
+	for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
+		mii_queue(dev, (c+k)->mii_data, (c+k)->funct);
+}
+
+static void mii_parse_sr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+	if (mii_reg & 0x0004)
+		s |= PHY_STAT_LINK;
+	if (mii_reg & 0x0010)
+		s |= PHY_STAT_FAULT;
+	if (mii_reg & 0x0020)
+		s |= PHY_STAT_ANC;
+
+	fep->phy_status = s;
+	fep->link = (s & PHY_STAT_LINK) ? 1 : 0;
+}
+
+static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+	if (mii_reg & 0x1000)
+		s |= PHY_CONF_ANE;
+	if (mii_reg & 0x4000)
+		s |= PHY_CONF_LOOP;
+
+	fep->phy_status = s;
+}
+
+static void mii_parse_anar(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_SPMASK);
+
+	if (mii_reg & 0x0020)
+		s |= PHY_CONF_10HDX;
+	if (mii_reg & 0x0040)
+		s |= PHY_CONF_10FDX;
+	if (mii_reg & 0x0080)
+		s |= PHY_CONF_100HDX;
+	if (mii_reg & 0x00100)
+		s |= PHY_CONF_100FDX;
+
+	fep->phy_status = s;
+}
+
+/* Some boards don't have the MDIRQ line connected (PM826 is such a board) */
+
+static void mii_waitfor_anc(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep;
+	int regval;
+	int i;
+
+	fep = dev->priv;
+	regval = mk_mii_read(MII_REG_SR) | (fep->phy_addr << 23);
+
+	for (i = 0; i < 1000; i++)
+	{
+		if (mii_send_receive(fep->fip, regval) & 0x20)
+			return;
+		udelay(10000);
+	}
+
+	printk("%s: autonegotiation timeout\n", dev->name);
+}
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT970 is used by many boards				     */
+
+#ifdef CONFIG_FCC_LXT970
+
+#define MII_LXT970_MIRROR    16  /* Mirror register           */
+#define MII_LXT970_IER       17  /* Interrupt Enable Register */
+#define MII_LXT970_ISR       18  /* Interrupt Status Register */
+#define MII_LXT970_CONFIG    19  /* Configuration Register    */
+#define MII_LXT970_CSR       20  /* Chip Status Register      */
+
+static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0800) {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	} else {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt970 = {
+	0x07810000,
+	"LXT970",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* read SR and ISR to acknowledge */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT970_ISR), NULL },
+
+		/* find out the current status */
+
+		{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT970 */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards                  */
+
+#ifdef CONFIG_FCC_LXT971
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR       16  /* Port Control Register     */
+#define MII_LXT971_SR2       17  /* Status Register 2         */
+#define MII_LXT971_IER       18  /* Interrupt Enable Register */
+#define MII_LXT971_ISR       19  /* Interrupt Status Register */
+#define MII_LXT971_LCR       20  /* LED Control Register      */
+#define MII_LXT971_TCR       30  /* Transmit Control Register */
+
+/*
+ * I had some nice ideas of running the MDIO faster...
+ * The 971 should support 8MHz and I tried it, but things acted really
+ * weird, so 2.5 MHz ought to be enough for anyone...
+ */
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x4000) {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	} else {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	if (mii_reg & 0x0008)
+		s |= PHY_STAT_FAULT;
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt971 = {
+	0x0001378e,
+	"LXT971",
+
+	(const phy_cmd_t []) {  /* config */
+//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+
+		/* Somehow does the 971 tell me that the link is down
+		 * the first read after power-up.
+		 * read here to get a valid value in ack_int */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+#ifdef	CONFIG_PM826
+		{ mk_mii_read(MII_REG_SR), mii_waitfor_anc },
+#endif
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+
+		/* we only need to read ISR to acknowledge */
+
+		{ mk_mii_read(MII_LXT971_ISR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT971 */
+
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
+
+#ifdef CONFIG_FCC_QS6612
+
+/* register definitions */
+
+#define MII_QS6612_MCR       17  /* Mode Control Register      */
+#define MII_QS6612_FTR       27  /* Factory Test Register      */
+#define MII_QS6612_MCO       28  /* Misc. Control Register     */
+#define MII_QS6612_ISR       29  /* Interrupt Source Register  */
+#define MII_QS6612_IMR       30  /* Interrupt Mask Register    */
+#define MII_QS6612_PCR       31  /* 100BaseTx PHY Control Reg. */
+
+static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	switch((mii_reg >> 2) & 7) {
+	case 1: s |= PHY_STAT_10HDX;  break;
+	case 2: s |= PHY_STAT_100HDX; break;
+	case 5: s |= PHY_STAT_10FDX;  break;
+	case 6: s |= PHY_STAT_100FDX; break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_qs6612 = {
+	0x00181440,
+	"QS6612",
+
+	(const phy_cmd_t []) {  /* config */
+//	{ mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10  Mbps */
+
+		/* The PHY powers up isolated on the RPX,
+		 * so send a command to allow operation.
+		 */
+
+		{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
+
+		/* parse cr and anar to get some info */
+
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+
+		/* we need to read ISR, SR and ANER to acknowledge */
+
+		{ mk_mii_read(MII_QS6612_ISR), NULL },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_ANER), NULL },
+
+		/* read pcr to get info */
+
+		{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+
+#endif /* CONFIG_FCC_QS6612 */
+
+/* ------------------------------------------------------------------------- */
+/* The AMD Am79C873 PHY is on PM826				*/
+
+#ifdef CONFIG_FCC_AMD79C873
+
+#define MII_79C873_IER       17  /* Interrupt Enable Register */
+#define MII_79C873_DR        18  /* Diagnostic Register       */
+
+static void mii_parse_79c873_cr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x2000) {
+		if (mii_reg & 0x0100)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	} else {
+		if (mii_reg & 0x0100)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_79c873 = {
+	0x00181b80,
+	"AMD79C873",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+#ifdef	CONFIG_PM826
+		{ mk_mii_read(MII_REG_SR), mii_waitfor_anc },
+#endif
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* read SR twice: to acknowledge and to get link status */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+
+		/* find out the current link parameters */
+
+		{ mk_mii_read(MII_REG_CR), mii_parse_79c873_cr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_79C873_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FCC_AMD79C873 */
+
+
+/* ------------------------------------------------------------------------- */
+/* The Davicom DM9131 is used on the HYMOD board			     */
+
+#ifdef CONFIG_FCC_DM9131
+
+/* register definitions */
+
+#define MII_DM9131_ACR		16	/* Aux. Config Register		*/
+#define MII_DM9131_ACSR		17	/* Aux. Config/Status Register	*/
+#define MII_DM9131_10TCSR	18	/* 10BaseT Config/Status Reg.	*/
+#define MII_DM9131_INTR		21	/* Interrupt Register		*/
+#define MII_DM9131_RECR		22	/* Receive Error Counter Reg.	*/
+#define MII_DM9131_DISCR	23	/* Disconnect Counter Register	*/
+
+static void mii_parse_dm9131_acsr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	switch ((mii_reg >> 12) & 0xf) {
+	case 1: s |= PHY_STAT_10HDX;  break;
+	case 2: s |= PHY_STAT_10FDX;  break;
+	case 4: s |= PHY_STAT_100HDX; break;
+	case 8: s |= PHY_STAT_100FDX; break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_dm9131 = {
+	0x00181b80,
+	"DM9131",
+
+	(const phy_cmd_t []) {  /* config */
+		/* parse cr and anar to get some info */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_DM9131_INTR, 0x0002), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+
+		/* we need to read INTR, SR and ANER to acknowledge */
+
+		{ mk_mii_read(MII_DM9131_INTR), NULL },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_ANER), NULL },
+
+		/* read acsr to get info */
+
+		{ mk_mii_read(MII_DM9131_ACSR), mii_parse_dm9131_acsr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_DM9131_INTR, 0x0f00), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+
+#endif /* CONFIG_FEC_DM9131 */
+
+
+static phy_info_t *phy_info[] = {
+
+#ifdef CONFIG_FCC_LXT970
+	&phy_info_lxt970,
+#endif /* CONFIG_FCC_LXT970 */
+
+#ifdef CONFIG_FCC_LXT971
+	&phy_info_lxt971,
+#endif /* CONFIG_FCC_LXT971 */
+
+#ifdef CONFIG_FCC_QS6612
+	&phy_info_qs6612,
+#endif /* CONFIG_FCC_QS6612 */
+
+#ifdef CONFIG_FCC_DM9131
+	&phy_info_dm9131,
+#endif /* CONFIG_FCC_DM9131 */
+
+#ifdef CONFIG_FCC_AMD79C873
+	&phy_info_79c873,
+#endif /* CONFIG_FCC_AMD79C873 */
+
+	NULL
+};
+
+static void mii_display_status(struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	if (!fep->link && !fep->old_link) {
+		/* Link is still down - don't print anything */
+		return;
+	}
+
+	printk("%s: status: ", dev->name);
+
+	if (!fep->link) {
+		printk("link down");
+	} else {
+		printk("link up");
+
+		switch(s & PHY_STAT_SPMASK) {
+		case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
+		case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
+		case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
+		case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
+		default:
+			printk(", Unknown speed/duplex");
+		}
+
+		if (s & PHY_STAT_ANC)
+			printk(", auto-negotiation complete");
+	}
+
+	if (s & PHY_STAT_FAULT)
+		printk(", remote fault");
+
+	printk(".\n");
+}
+
+static void mii_display_config(struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	printk("%s: config: auto-negotiation ", dev->name);
+
+	if (s & PHY_CONF_ANE)
+		printk("on");
+	else
+		printk("off");
+
+	if (s & PHY_CONF_100FDX)
+		printk(", 100FDX");
+	if (s & PHY_CONF_100HDX)
+		printk(", 100HDX");
+	if (s & PHY_CONF_10FDX)
+		printk(", 10FDX");
+	if (s & PHY_CONF_10HDX)
+		printk(", 10HDX");
+	if (!(s & PHY_CONF_SPMASK))
+		printk(", No speed/duplex selected?");
+
+	if (s & PHY_CONF_LOOP)
+		printk(", loopback enabled");
+
+	printk(".\n");
+
+	fep->sequence_done = 1;
+}
+
+static void mii_relink(struct net_device *dev)
+{
+	struct fcc_enet_private *fep = dev->priv;
+	int duplex;
+
+	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
+	mii_display_status(dev);
+	fep->old_link = fep->link;
+
+	if (fep->link) {
+		duplex = 0;
+		if (fep->phy_status
+		    & (PHY_STAT_100FDX | PHY_STAT_10FDX))
+			duplex = 1;
+		fcc_restart(dev, duplex);
+	} else {
+		fcc_stop(dev);
+	}
+}
+
+static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_relink;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+static void mii_queue_config(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_display_config;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+
+
+phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink },
+			       { mk_mii_end, } };
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+			       { mk_mii_end, } };
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep;
+	int	i;
+
+	fep = dev->priv;
+	fep->phy_id |= (mii_reg & 0xffff);
+
+	for(i = 0; phy_info[i]; i++)
+		if(phy_info[i]->id == (fep->phy_id >> 4))
+			break;
+
+	if(!phy_info[i])
+		panic("%s: PHY id 0x%08x is not supported!\n",
+		      dev->name, fep->phy_id);
+
+	fep->phy = phy_info[i];
+
+	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
+		dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID.  This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep;
+	uint	phytype;
+
+	fep = dev->priv;
+
+	if ((phytype = (mii_reg & 0xfff)) != 0xfff && phytype != 0) {
+
+		/* Got first part of ID, now get remainder. */
+		fep->phy_id = phytype << 16;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3);
+	} else {
+		fep->phy_addr++;
+		if (fep->phy_addr < 32) {
+			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+							mii_discover_phy);
+		} else {
+			printk("FCC: No PHY device found.\n");
+		}
+	}
+}
+
+/* This interrupt occurs when the PHY detects a link change. */
+#if !defined (CONFIG_PM826)
+static void
+mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+	struct	net_device *dev = dev_id;
+	struct fcc_enet_private *fep = dev->priv;
+
+	mii_do_cmd(dev, fep->phy->ack_int);
+	mii_do_cmd(dev, phy_cmd_relink);  /* restart and display status */
+}
+#endif	/* !CONFIG_PM826 */
+
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifdef ORIGINAL_VERSION
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+	struct	fcc_enet_private *cep;
+	struct	dev_mc_list *dmi;
+	u_char	*mcptr, *tdptr;
+	volatile fcc_enet_t *ep;
+	int	i, j;
+
+	cep = (struct fcc_enet_private *)dev->priv;
+
+return;
+	/* Get pointer to FCC area in parameter RAM.
+	*/
+	ep = (fcc_enet_t *)dev->base_addr;
+
+	if (dev->flags&IFF_PROMISC) {
+
+		/* Log any net taps. */
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		cep->fccp->fcc_fpsmr |= FCC_PSMR_PRO;
+	} else {
+
+		cep->fccp->fcc_fpsmr &= ~FCC_PSMR_PRO;
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Catch all multicast addresses, so set the
+			 * filter to all 1's.
+			 */
+			ep->fen_gaddrh = 0xffffffff;
+			ep->fen_gaddrl = 0xffffffff;
+		}
+		else {
+			/* Clear filter and add the addresses in the list.
+			*/
+			ep->fen_gaddrh = 0;
+			ep->fen_gaddrl = 0;
+
+			dmi = dev->mc_list;
+
+			for (i=0; i<dev->mc_count; i++) {
+
+				/* Only support group multicast for now.
+				*/
+				if (!(dmi->dmi_addr[0] & 1))
+					continue;
+
+				/* The address in dmi_addr is LSB first,
+				 * and taddr is MSB first.  We have to
+				 * copy bytes MSB first from dmi_addr.
+				 */
+				mcptr = (u_char *)dmi->dmi_addr + 5;
+				tdptr = (u_char *)&ep->fen_taddrh;
+				for (j=0; j<6; j++)
+					*tdptr++ = *mcptr--;
+
+				/* Ask CPM to run CRC and set bit in
+				 * filter mask.
+				 */
+				cpmp->cp_cpcr = mk_cr_cmd(cep->fip->fc_cpmpage,
+						cep->fip->fc_cpmblock, 0x0c,
+						CPM_CR_SET_GADDR) | CPM_CR_FLG;
+				udelay(10);
+				while (cpmp->cp_cpcr & CPM_CR_FLG);
+			}
+		}
+	}
+}
+
+
+/* Set the individual MAC address.
+ */
+int fcc_enet_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr= (struct sockaddr *) p;
+	struct fcc_enet_private *cep;
+	volatile fcc_enet_t *ep;
+	unsigned char *eap;
+	int i;
+
+	cep = (struct fcc_enet_private *)(dev->priv);
+	ep = cep->ep;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	eap = (unsigned char *) &(ep->fen_paddrh);
+	for (i=5; i>=0; i--)
+		*eap++ = addr->sa_data[i];
+
+	return 0;
+}
+#endif /* ORIGINAL_VERSION */
+
+
+/* Initialize the CPM Ethernet on FCC.
+ */
+int __init fec_enet_init(void)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct fcc_enet_private *cep;
+	fcc_info_t	*fip;
+	int		i, np;
+	volatile	immap_t		*immap;
+	volatile	iop8260_t	*io;
+
+	immap = (immap_t *)IMAP_ADDR;	/* and to internal registers */
+	io = &immap->im_ioport;
+
+	for (np = 0, fip = fcc_ports;
+	     np < sizeof(fcc_ports) / sizeof(fcc_info_t);
+	     np++, fip++) {
+
+		/* Skip FCC ports not used for RTnet.
+		 */
+		if (np != rtnet_fcc - 1) continue;
+
+		/* Allocate some private information and create an Ethernet device instance.
+		*/
+		if (!rx_pool_size)
+			rx_pool_size = RX_RING_SIZE * 2;
+
+		rtdev = rt_alloc_etherdev(sizeof(struct fcc_enet_private),
+					rx_pool_size + TX_RING_SIZE);
+		if (rtdev == NULL) {
+			printk(KERN_ERR "fcc_enet: Could not allocate ethernet device.\n");
+			return -1;
+		}
+		rtdev_alloc_name(rtdev, "rteth%d");
+		rt_rtdev_connect(rtdev, &RTDEV_manager);
+		rtdev->vers = RTDEV_VERS_2_0;
+
+		cep = (struct fcc_enet_private *)rtdev->priv;
+		rtdm_lock_init(&cep->lock);
+		cep->fip = fip;
+		fip->rtdev = rtdev; /* need for cleanup */
+
+		init_fcc_shutdown(fip, cep, immap);
+		init_fcc_ioports(fip, io, immap);
+		init_fcc_param(fip, rtdev, immap);
+
+		rtdev->base_addr = (unsigned long)(cep->ep);
+
+		/* The CPM Ethernet specific entries in the device
+		 * structure.
+		 */
+		rtdev->open = fcc_enet_open;
+		rtdev->hard_start_xmit = fcc_enet_start_xmit;
+		rtdev->stop = fcc_enet_close;
+		rtdev->hard_header = &rt_eth_header;
+		rtdev->get_stats = fcc_enet_get_stats;
+
+		if ((i = rt_register_rtnetdev(rtdev))) {
+			rtdm_irq_disable(&cep->irq_handle);
+			rtdm_irq_free(&cep->irq_handle);
+			rtdev_free(rtdev);
+			return i;
+		}
+		init_fcc_startup(fip, rtdev);
+
+		printk("%s: FCC%d ENET Version 0.4, %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       rtdev->name, fip->fc_fccnum + 1,
+		       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
+		       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		/* Queue up command to detect the PHY and initialize the
+		 * remainder of the interface.
+		 */
+		cep->phy_addr = 0;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+	}
+
+	return 0;
+}
+
+/* Make sure the device is shut down during initialization.
+*/
+static void __init
+init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
+						volatile immap_t *immap)
+{
+	volatile	fcc_enet_t	*ep;
+	volatile	fcc_t		*fccp;
+
+	/* Get pointer to FCC area in parameter RAM.
+	*/
+	ep = (fcc_enet_t *)(&immap->im_dprambase[fip->fc_proff]);
+
+	/* And another to the FCC register area.
+	*/
+	fccp = (volatile fcc_t *)(&immap->im_fcc[fip->fc_fccnum]);
+	cep->fccp = fccp;		/* Keep the pointers handy */
+	cep->ep = ep;
+
+	/* Disable receive and transmit in case someone left it running.
+	*/
+	fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+
+/* Initialize the I/O pins for the FCC Ethernet.
+*/
+static void __init
+init_fcc_ioports(fcc_info_t *fip, volatile iop8260_t *io,
+						volatile immap_t *immap)
+{
+
+	/* FCC1 pins are on port A/C.  FCC2/3 are port B/C.
+	*/
+	if (fip->fc_proff == PROFF_FCC1) {
+		/* Configure port A and C pins for FCC1 Ethernet.
+		 */
+		io->iop_pdira &= ~PA1_DIRA0;
+		io->iop_pdira |= PA1_DIRA1;
+		io->iop_psora &= ~PA1_PSORA0;
+		io->iop_psora |= PA1_PSORA1;
+		io->iop_ppara |= (PA1_DIRA0 | PA1_DIRA1);
+	}
+	if (fip->fc_proff == PROFF_FCC2) {
+		/* Configure port B and C pins for FCC Ethernet.
+		 */
+		io->iop_pdirb &= ~PB2_DIRB0;
+		io->iop_pdirb |= PB2_DIRB1;
+		io->iop_psorb &= ~PB2_PSORB0;
+		io->iop_psorb |= PB2_PSORB1;
+		io->iop_pparb |= (PB2_DIRB0 | PB2_DIRB1);
+	}
+	if (fip->fc_proff == PROFF_FCC3) {
+		/* Configure port B and C pins for FCC Ethernet.
+		 */
+		io->iop_pdirb &= ~PB3_DIRB0;
+		io->iop_pdirb |= PB3_DIRB1;
+		io->iop_psorb &= ~PB3_PSORB0;
+		io->iop_psorb |= PB3_PSORB1;
+		io->iop_pparb |= (PB3_DIRB0 | PB3_DIRB1);
+	}
+
+	/* Port C has clocks......
+	*/
+	io->iop_psorc &= ~(fip->fc_trxclocks);
+	io->iop_pdirc &= ~(fip->fc_trxclocks);
+	io->iop_pparc |= fip->fc_trxclocks;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* ....and the MII serial clock/data.
+	*/
+#ifndef	CONFIG_PM826
+	IOP_DAT(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+	IOP_ODR(io,fip->fc_port) &= ~(fip->fc_mdio | fip->fc_mdck);
+#endif	/* CONFIG_PM826 */
+	IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+	IOP_PAR(io,fip->fc_port) &= ~(fip->fc_mdio | fip->fc_mdck);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Configure Serial Interface clock routing.
+	 * First, clear all FCC bits to zero,
+	 * then set the ones we want.
+	 */
+	immap->im_cpmux.cmx_fcr &= ~(fip->fc_clockmask);
+	immap->im_cpmux.cmx_fcr |= fip->fc_clockroute;
+}
+
+static void __init
+init_fcc_param(fcc_info_t *fip, struct rtnet_device *rtdev,
+						volatile immap_t *immap)
+{
+	unsigned char	*eap;
+	unsigned long	mem_addr;
+	bd_t		*bd;
+	int		i, j;
+	struct		fcc_enet_private *cep;
+	volatile	fcc_enet_t	*ep;
+	volatile	cbd_t		*bdp;
+	volatile	cpm8260_t	*cp;
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+	ep = cep->ep;
+	cp = cpmp;
+
+	bd = (bd_t *)__res;
+
+	/* Zero the whole thing.....I must have missed some individually.
+	 * It works when I do this.
+	 */
+	memset((char *)ep, 0, sizeof(fcc_enet_t));
+
+	/* Allocate space for the buffer descriptors in the DP ram.
+	 * These are relative offsets in the DP ram address space.
+	 * Initialize base addresses for the buffer descriptors.
+	 */
+	cep->rx_bd_base = (cbd_t *)m8260_cpm_hostalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
+	ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base);
+	cep->tx_bd_base = (cbd_t *)m8260_cpm_hostalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
+	ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base);
+
+	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
+	cep->cur_rx = cep->rx_bd_base;
+
+	ep->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
+	ep->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
+
+	/* Set maximum bytes per receive buffer.
+	 * It must be a multiple of 32.
+	 */
+	ep->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE;
+
+	/* Allocate space in the reserved FCC area of DPRAM for the
+	 * internal buffers.  No one uses this space (yet), so we
+	 * can do this.  Later, we will add resource management for
+	 * this area.
+	 */
+	mem_addr = CPM_FCC_SPECIAL_BASE + (fip->fc_fccnum * 128);
+	ep->fen_genfcc.fcc_riptr = mem_addr;
+	ep->fen_genfcc.fcc_tiptr = mem_addr+32;
+	ep->fen_padptr = mem_addr+64;
+	memset((char *)(&(immap->im_dprambase[(mem_addr+64)])), 0x88, 32);
+
+	ep->fen_genfcc.fcc_rbptr = 0;
+	ep->fen_genfcc.fcc_tbptr = 0;
+	ep->fen_genfcc.fcc_rcrc = 0;
+	ep->fen_genfcc.fcc_tcrc = 0;
+	ep->fen_genfcc.fcc_res1 = 0;
+	ep->fen_genfcc.fcc_res2 = 0;
+
+	ep->fen_camptr = 0;	/* CAM isn't used in this driver */
+
+	/* Set CRC preset and mask.
+	*/
+	ep->fen_cmask = 0xdebb20e3;
+	ep->fen_cpres = 0xffffffff;
+
+	ep->fen_crcec = 0;	/* CRC Error counter */
+	ep->fen_alec = 0;	/* alignment error counter */
+	ep->fen_disfc = 0;	/* discard frame counter */
+	ep->fen_retlim = 15;	/* Retry limit threshold */
+	ep->fen_pper = 0;	/* Normal persistence */
+
+	/* Clear hash filter tables.
+	*/
+	ep->fen_gaddrh = 0;
+	ep->fen_gaddrl = 0;
+	ep->fen_iaddrh = 0;
+	ep->fen_iaddrl = 0;
+
+	/* Clear the Out-of-sequence TxBD.
+	*/
+	ep->fen_tfcstat = 0;
+	ep->fen_tfclen = 0;
+	ep->fen_tfcptr = 0;
+
+	ep->fen_mflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
+	ep->fen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */
+
+	/* Set Ethernet station address.
+	 *
+	 * This is supplied in the board information structure, so we
+	 * copy that into the controller.
+	 */
+	eap = (unsigned char *)&(ep->fen_paddrh);
+#if defined(CONFIG_CPU86) || defined(CONFIG_TQM8260)
+	/*
+	 * TQM8260 and CPU86 use sequential MAC addresses
+	 */
+	*eap++ = rtdev->dev_addr[5] = bd->bi_enetaddr[5] + fip->fc_fccnum;
+	for (i=4; i>=0; i--) {
+		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+	}
+#elif defined(CONFIG_PM826)
+	*eap++ = rtdev->dev_addr[5] = bd->bi_enetaddr[5] + fip->fc_fccnum + 1;
+	for (i=4; i>=0; i--) {
+		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+	}
+#else
+	/*
+	 * So, far we have only been given one Ethernet address. We make
+	 * it unique by toggling selected bits in the upper byte of the
+	 * non-static part of the address (for the second and third ports,
+	 * the first port uses the address supplied as is).
+	 */
+	for (i=5; i>=0; i--) {
+		if (i == 3 && fip->fc_fccnum != 0) {
+			rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+			rtdev->dev_addr[i] ^= (1 << (7 - fip->fc_fccnum));
+			*eap++ = dev->dev_addr[i];
+		}
+		else {
+			*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
+		}
+	}
+#endif
+
+	ep->fen_taddrh = 0;
+	ep->fen_taddrm = 0;
+	ep->fen_taddrl = 0;
+
+	ep->fen_maxd1 = PKT_MAXDMA_SIZE;	/* maximum DMA1 length */
+	ep->fen_maxd2 = PKT_MAXDMA_SIZE;	/* maximum DMA2 length */
+
+	/* Clear stat counters, in case we ever enable RMON.
+	*/
+	ep->fen_octc = 0;
+	ep->fen_colc = 0;
+	ep->fen_broc = 0;
+	ep->fen_mulc = 0;
+	ep->fen_uspc = 0;
+	ep->fen_frgc = 0;
+	ep->fen_ospc = 0;
+	ep->fen_jbrc = 0;
+	ep->fen_p64c = 0;
+	ep->fen_p65c = 0;
+	ep->fen_p128c = 0;
+	ep->fen_p256c = 0;
+	ep->fen_p512c = 0;
+	ep->fen_p1024c = 0;
+
+	ep->fen_rfthr = 0;	/* Suggested by manual */
+	ep->fen_rfcnt = 0;
+	ep->fen_cftype = 0;
+
+	/* Now allocate the host memory pages and initialize the
+	 * buffer descriptors.
+	 */
+	bdp = cep->tx_bd_base;
+	for (i=0; i<TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = 0;
+		bdp->cbd_datlen = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	bdp = cep->rx_bd_base;
+	for (i=0; i<FCC_ENET_RX_PAGES; i++) {
+
+		/* Allocate a page.
+		*/
+		mem_addr = __get_free_page(GFP_KERNEL);
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		for (j=0; j<FCC_ENET_RX_FRPPG; j++) {
+			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
+			bdp->cbd_datlen = 0;
+			bdp->cbd_bufaddr = __pa(mem_addr);
+			mem_addr += FCC_ENET_RX_FRSIZE;
+			bdp++;
+		}
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Let's re-initialize the channel now.  We have to do it later
+	 * than the manual describes because we have just now finished
+	 * the BD initialization.
+	 */
+	cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, 0x0c,
+			CPM_CR_INIT_TRX) | CPM_CR_FLG;
+	while (cp->cp_cpcr & CPM_CR_FLG);
+
+	cep->skb_cur = cep->skb_dirty = 0;
+}
+
+/* Let 'er rip.
+*/
+static void __init
+init_fcc_startup(fcc_info_t *fip, struct rtnet_device *rtdev)
+{
+	volatile fcc_t	*fccp;
+	struct fcc_enet_private *cep;
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+	fccp = cep->fccp;
+
+	fccp->fcc_fcce = 0xffff;	/* Clear any pending events */
+
+	/* Enable interrupts for transmit error, complete frame
+	 * received, and any transmit buffer we have also set the
+	 * interrupt flag.
+	 */
+	fccp->fcc_fccm = (FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
+
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	/* Install our interrupt handler.
+	*/
+	if (rtdm_irq_request(&cep->irq_handle, fip->fc_interrupt,
+			     fcc_enet_interrupt, 0, "rt_mpc8260_fcc_enet", rtdev))  {
+		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
+		rtdev_free(rtdev);
+		return;
+	}
+
+
+#if defined (CONFIG_XENO_DRIVERS_NET_USE_MDIO) && !defined (CONFIG_PM826)
+# ifndef PHY_INTERRUPT
+#  error Want to use MDIO, but PHY_INTERRUPT not defined!
+# endif
+	if (request_8xxirq(PHY_INTERRUPT, mii_link_interrupt, 0,
+							"mii", dev) < 0)
+		printk("Can't get MII IRQ %d\n", PHY_INTERRUPT);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO, CONFIG_PM826 */
+
+	/* Set GFMR to enable Ethernet operating mode.
+	 */
+#ifndef CONFIG_EST8260
+	fccp->fcc_gfmr = (FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
+#else
+	fccp->fcc_gfmr = FCC_GFMR_MODE_ENET;
+#endif
+
+	/* Set sync/delimiters.
+	*/
+	fccp->fcc_fdsr = 0xd555;
+
+	/* Set protocol specific processing mode for Ethernet.
+	 * This has to be adjusted for Full Duplex operation after we can
+	 * determine how to detect that.
+	 */
+	fccp->fcc_fpsmr = FCC_PSMR_ENCRC;
+
+#ifdef CONFIG_ADS8260
+	/* Enable the PHY.
+	*/
+	ads_csr_addr[1] |= BCSR1_FETH_RST;	/* Remove reset */
+	ads_csr_addr[1] &= ~BCSR1_FETHIEN;	/* Enable */
+#endif
+
+#if defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) || defined(CONFIG_TQM8260)
+	/* start in full duplex mode, and negotiate speed */
+	fcc_restart (rtdev, 1);
+#else
+	/* start in half duplex mode */
+	fcc_restart (rtdev, 0);
+#endif
+}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* MII command/status interface.
+ * I'm not going to describe all of the details.  You can find the
+ * protocol definition in many other places, including the data sheet
+ * of most PHY parts.
+ * I wonder what "they" were thinking (maybe weren't) when they leave
+ * the I2C in the CPM but I have to toggle these bits......
+ *
+ * Timing is a critical, especially on faster CPU's ...
+ */
+#define MDIO_DELAY	5
+
+#define FCC_MDIO(bit) do {					\
+	udelay(MDIO_DELAY);					\
+	if (bit)						\
+		IOP_DAT(io,fip->fc_port) |= fip->fc_mdio;	\
+	else							\
+		IOP_DAT(io,fip->fc_port) &= ~fip->fc_mdio;	\
+} while(0)
+
+#define FCC_MDC(bit) do {					\
+	udelay(MDIO_DELAY);					\
+	if (bit)						\
+		IOP_DAT(io,fip->fc_port) |= fip->fc_mdck;	\
+	else							\
+		IOP_DAT(io,fip->fc_port) &= ~fip->fc_mdck;	\
+} while(0)
+
+static uint
+mii_send_receive(fcc_info_t *fip, uint cmd)
+{
+	uint		retval;
+	int		read_op, i, off;
+	volatile	immap_t		*immap;
+	volatile	iop8260_t	*io;
+
+	immap = (immap_t *)IMAP_ADDR;
+	io = &immap->im_ioport;
+
+	IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+
+	read_op = ((cmd & 0xf0000000) == 0x60000000);
+
+	/* Write preamble
+	 */
+	for (i = 0; i < 32; i++)
+	{
+		FCC_MDC(0);
+		FCC_MDIO(1);
+		FCC_MDC(1);
+	}
+
+	/* Write data
+	 */
+	for (i = 0, off = 31; i < (read_op ? 14 : 32); i++, --off)
+	{
+		FCC_MDC(0);
+		FCC_MDIO((cmd >> off) & 0x00000001);
+		FCC_MDC(1);
+	}
+
+	retval = cmd;
+
+	if (read_op)
+	{
+		retval >>= 16;
+
+		FCC_MDC(0);
+		IOP_DIR(io,fip->fc_port) &= ~fip->fc_mdio;
+		FCC_MDC(1);
+		FCC_MDC(0);
+
+		for (i = 0, off = 15; i < 16; i++, off--)
+		{
+			FCC_MDC(1);
+			udelay(MDIO_DELAY);
+			retval <<= 1;
+			if (IOP_DAT(io,fip->fc_port) & fip->fc_mdio)
+				retval++;
+			FCC_MDC(0);
+		}
+	}
+
+	IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+
+	for (i = 0; i < 32; i++)
+	{
+		FCC_MDC(0);
+		FCC_MDIO(1);
+		FCC_MDC(1);
+	}
+
+	return retval;
+}
+
+static void
+fcc_stop(struct net_device *dev)
+{
+	volatile fcc_t	*fccp;
+	struct fcc_enet_private	*fcp;
+
+	fcp = (struct fcc_enet_private *)(dev->priv);
+	fccp = fcp->fccp;
+
+	/* Disable transmit/receive */
+	fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+static void
+fcc_restart(struct rtnet_device *rtdev, int duplex)
+{
+	volatile fcc_t	*fccp;
+	struct fcc_enet_private	*fcp;
+
+	fcp = (struct fcc_enet_private *)rtdev->priv;
+	fccp = fcp->fccp;
+
+	if (duplex)
+		fccp->fcc_fpsmr |= (FCC_PSMR_FDE | FCC_PSMR_LPB);
+	else
+		fccp->fcc_fpsmr &= ~(FCC_PSMR_FDE | FCC_PSMR_LPB);
+
+	/* Enable transmit/receive */
+	fccp->fcc_gfmr |= FCC_GFMR_ENR | FCC_GFMR_ENT;
+}
+
+static int
+fcc_enet_open(struct rtnet_device *rtdev)
+{
+	struct fcc_enet_private *fep = rtdev->priv;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	fep->sequence_done = 0;
+	fep->link = 0;
+
+	if (fep->phy) {
+		mii_do_cmd(dev, fep->phy->ack_int);
+		mii_do_cmd(dev, fep->phy->config);
+		mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
+		while(!fep->sequence_done)
+			schedule();
+
+		mii_do_cmd(dev, fep->phy->startup);
+#ifdef	CONFIG_PM826
+		/* Read the autonegotiation results */
+		mii_do_cmd(dev, fep->phy->ack_int);
+		mii_do_cmd(dev, phy_cmd_relink);
+#endif	/* CONFIG_PM826 */
+		rtnetif_start_queue(rtdev);
+		return 0;		/* Success */
+	}
+	return -ENODEV;		/* No PHY we understand */
+#else
+	fep->link = 1;
+	rtnetif_start_queue(rtdev);
+	return 0;					/* Always succeed */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+}
+
+static void __exit fcc_enet_cleanup(void)
+{
+	struct rtnet_device *rtdev;
+	volatile immap_t *immap = (immap_t *)IMAP_ADDR;
+	struct fcc_enet_private *cep;
+	fcc_info_t *fip;
+	int np;
+
+	for (np = 0, fip = fcc_ports;
+	     np < sizeof(fcc_ports) / sizeof(fcc_info_t);
+	     np++, fip++) {
+
+		/* Skip FCC ports not used for RTnet. */
+		if (np != rtnet_fcc - 1) continue;
+
+		rtdev = fip->rtdev;
+		cep = (struct fcc_enet_private *)rtdev->priv;
+
+		rtdm_irq_disable(&cep->irq_handle);
+		rtdm_irq_free(&cep->irq_handle);
+
+		init_fcc_shutdown(fip, cep, immap);
+		printk("%s: cleanup incomplete (m8260_cpm_dpfree does not exit)!\n",
+		       rtdev->name);
+		rt_stack_disconnect(rtdev);
+		rt_unregister_rtnetdev(rtdev);
+		rt_rtdev_disconnect(rtdev);
+
+		printk("%s: unloaded\n", rtdev->name);
+		rtdev_free(rtdev);
+		fip++;
+	}
+}
+
+module_init(fec_enet_init);
+module_exit(fcc_enet_cleanup);
+++ linux-patched/drivers/xenomai/net/drivers/rt_macb.h	2022-03-21 12:58:29.595886953 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/8139too.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MACB_H
+#define _MACB_H
+
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
+/* MACB register offsets */
+#define MACB_NCR				0x0000
+#define MACB_NCFGR				0x0004
+#define MACB_NSR				0x0008
+#define MACB_TAR				0x000c /* AT91RM9200 only */
+#define MACB_TCR				0x0010 /* AT91RM9200 only */
+#define MACB_TSR				0x0014
+#define MACB_RBQP				0x0018
+#define MACB_TBQP				0x001c
+#define MACB_RSR				0x0020
+#define MACB_ISR				0x0024
+#define MACB_IER				0x0028
+#define MACB_IDR				0x002c
+#define MACB_IMR				0x0030
+#define MACB_MAN				0x0034
+#define MACB_PTR				0x0038
+#define MACB_PFR				0x003c
+#define MACB_FTO				0x0040
+#define MACB_SCF				0x0044
+#define MACB_MCF				0x0048
+#define MACB_FRO				0x004c
+#define MACB_FCSE				0x0050
+#define MACB_ALE				0x0054
+#define MACB_DTF				0x0058
+#define MACB_LCOL				0x005c
+#define MACB_EXCOL				0x0060
+#define MACB_TUND				0x0064
+#define MACB_CSE				0x0068
+#define MACB_RRE				0x006c
+#define MACB_ROVR				0x0070
+#define MACB_RSE				0x0074
+#define MACB_ELE				0x0078
+#define MACB_RJA				0x007c
+#define MACB_USF				0x0080
+#define MACB_STE				0x0084
+#define MACB_RLE				0x0088
+#define MACB_TPF				0x008c
+#define MACB_HRB				0x0090
+#define MACB_HRT				0x0094
+#define MACB_SA1B				0x0098
+#define MACB_SA1T				0x009c
+#define MACB_SA2B				0x00a0
+#define MACB_SA2T				0x00a4
+#define MACB_SA3B				0x00a8
+#define MACB_SA3T				0x00ac
+#define MACB_SA4B				0x00b0
+#define MACB_SA4T				0x00b4
+#define MACB_TID				0x00b8
+#define MACB_TPQ				0x00bc
+#define MACB_USRIO				0x00c0
+#define MACB_WOL				0x00c4
+#define MACB_MID				0x00fc
+
+/* GEM register offsets. */
+#define GEM_NCFGR				0x0004
+#define GEM_USRIO				0x000c
+#define GEM_DMACFG				0x0010
+#define GEM_HRB					0x0080
+#define GEM_HRT					0x0084
+#define GEM_SA1B				0x0088
+#define GEM_SA1T				0x008C
+#define GEM_SA2B				0x0090
+#define GEM_SA2T				0x0094
+#define GEM_SA3B				0x0098
+#define GEM_SA3T				0x009C
+#define GEM_SA4B				0x00A0
+#define GEM_SA4T				0x00A4
+#define GEM_OTX					0x0100
+#define GEM_DCFG1				0x0280
+#define GEM_DCFG2				0x0284
+#define GEM_DCFG3				0x0288
+#define GEM_DCFG4				0x028c
+#define GEM_DCFG5				0x0290
+#define GEM_DCFG6				0x0294
+#define GEM_DCFG7				0x0298
+
+/* Bitfields in NCR */
+#define MACB_LB_OFFSET				0
+#define MACB_LB_SIZE				1
+#define MACB_LLB_OFFSET				1
+#define MACB_LLB_SIZE				1
+#define MACB_RE_OFFSET				2
+#define MACB_RE_SIZE				1
+#define MACB_TE_OFFSET				3
+#define MACB_TE_SIZE				1
+#define MACB_MPE_OFFSET				4
+#define MACB_MPE_SIZE				1
+#define MACB_CLRSTAT_OFFSET			5
+#define MACB_CLRSTAT_SIZE			1
+#define MACB_INCSTAT_OFFSET			6
+#define MACB_INCSTAT_SIZE			1
+#define MACB_WESTAT_OFFSET			7
+#define MACB_WESTAT_SIZE			1
+#define MACB_BP_OFFSET				8
+#define MACB_BP_SIZE				1
+#define MACB_TSTART_OFFSET			9
+#define MACB_TSTART_SIZE			1
+#define MACB_THALT_OFFSET			10
+#define MACB_THALT_SIZE				1
+#define MACB_NCR_TPF_OFFSET			11
+#define MACB_NCR_TPF_SIZE			1
+#define MACB_TZQ_OFFSET				12
+#define MACB_TZQ_SIZE				1
+
+/* Bitfields in NCFGR */
+#define MACB_SPD_OFFSET				0
+#define MACB_SPD_SIZE				1
+#define MACB_FD_OFFSET				1
+#define MACB_FD_SIZE				1
+#define MACB_BIT_RATE_OFFSET			2
+#define MACB_BIT_RATE_SIZE			1
+#define MACB_JFRAME_OFFSET			3
+#define MACB_JFRAME_SIZE			1
+#define MACB_CAF_OFFSET				4
+#define MACB_CAF_SIZE				1
+#define MACB_NBC_OFFSET				5
+#define MACB_NBC_SIZE				1
+#define MACB_NCFGR_MTI_OFFSET			6
+#define MACB_NCFGR_MTI_SIZE			1
+#define MACB_UNI_OFFSET				7
+#define MACB_UNI_SIZE				1
+#define MACB_BIG_OFFSET				8
+#define MACB_BIG_SIZE				1
+#define MACB_EAE_OFFSET				9
+#define MACB_EAE_SIZE				1
+#define MACB_CLK_OFFSET				10
+#define MACB_CLK_SIZE				2
+#define MACB_RTY_OFFSET				12
+#define MACB_RTY_SIZE				1
+#define MACB_PAE_OFFSET				13
+#define MACB_PAE_SIZE				1
+#define MACB_RM9200_RMII_OFFSET			13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE			1  /* AT91RM9200 only */
+#define MACB_RBOF_OFFSET			14
+#define MACB_RBOF_SIZE				2
+#define MACB_RLCE_OFFSET			16
+#define MACB_RLCE_SIZE				1
+#define MACB_DRFCS_OFFSET			17
+#define MACB_DRFCS_SIZE				1
+#define MACB_EFRHD_OFFSET			18
+#define MACB_EFRHD_SIZE				1
+#define MACB_IRXFCS_OFFSET			19
+#define MACB_IRXFCS_SIZE			1
+
+/* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET				10
+#define GEM_GBE_SIZE				1
+#define GEM_CLK_OFFSET				18
+#define GEM_CLK_SIZE				3
+#define GEM_DBW_OFFSET				21
+#define GEM_DBW_SIZE				2
+
+/* Constants for data bus width. */
+#define GEM_DBW32				0
+#define GEM_DBW64				1
+#define GEM_DBW128				2
+
+/* Bitfields in DMACFG. */
+#define GEM_FBLDO_OFFSET			0
+#define GEM_FBLDO_SIZE				5
+#define GEM_ENDIA_OFFSET			7
+#define GEM_ENDIA_SIZE				1
+#define GEM_RXBMS_OFFSET			8
+#define GEM_RXBMS_SIZE				2
+#define GEM_TXPBMS_OFFSET			10
+#define GEM_TXPBMS_SIZE				1
+#define GEM_TXCOEN_OFFSET			11
+#define GEM_TXCOEN_SIZE				1
+#define GEM_RXBS_OFFSET				16
+#define GEM_RXBS_SIZE				8
+#define GEM_DDRP_OFFSET				24
+#define GEM_DDRP_SIZE				1
+
+
+/* Bitfields in NSR */
+#define MACB_NSR_LINK_OFFSET			0
+#define MACB_NSR_LINK_SIZE			1
+#define MACB_MDIO_OFFSET			1
+#define MACB_MDIO_SIZE				1
+#define MACB_IDLE_OFFSET			2
+#define MACB_IDLE_SIZE				1
+
+/* Bitfields in TSR */
+#define MACB_UBR_OFFSET				0
+#define MACB_UBR_SIZE				1
+#define MACB_COL_OFFSET				1
+#define MACB_COL_SIZE				1
+#define MACB_TSR_RLE_OFFSET			2
+#define MACB_TSR_RLE_SIZE			1
+#define MACB_TGO_OFFSET				3
+#define MACB_TGO_SIZE				1
+#define MACB_BEX_OFFSET				4
+#define MACB_BEX_SIZE				1
+#define MACB_RM9200_BNQ_OFFSET			4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE			1 /* AT91RM9200 only */
+#define MACB_COMP_OFFSET			5
+#define MACB_COMP_SIZE				1
+#define MACB_UND_OFFSET				6
+#define MACB_UND_SIZE				1
+
+/* Bitfields in RSR */
+#define MACB_BNA_OFFSET				0
+#define MACB_BNA_SIZE				1
+#define MACB_REC_OFFSET				1
+#define MACB_REC_SIZE				1
+#define MACB_OVR_OFFSET				2
+#define MACB_OVR_SIZE				1
+
+/* Bitfields in ISR/IER/IDR/IMR */
+#define MACB_MFD_OFFSET				0
+#define MACB_MFD_SIZE				1
+#define MACB_RCOMP_OFFSET			1
+#define MACB_RCOMP_SIZE				1
+#define MACB_RXUBR_OFFSET			2
+#define MACB_RXUBR_SIZE				1
+#define MACB_TXUBR_OFFSET			3
+#define MACB_TXUBR_SIZE				1
+#define MACB_ISR_TUND_OFFSET			4
+#define MACB_ISR_TUND_SIZE			1
+#define MACB_ISR_RLE_OFFSET			5
+#define MACB_ISR_RLE_SIZE			1
+#define MACB_TXERR_OFFSET			6
+#define MACB_TXERR_SIZE				1
+#define MACB_TCOMP_OFFSET			7
+#define MACB_TCOMP_SIZE				1
+#define MACB_ISR_LINK_OFFSET			9
+#define MACB_ISR_LINK_SIZE			1
+#define MACB_ISR_ROVR_OFFSET			10
+#define MACB_ISR_ROVR_SIZE			1
+#define MACB_HRESP_OFFSET			11
+#define MACB_HRESP_SIZE				1
+#define MACB_PFR_OFFSET				12
+#define MACB_PFR_SIZE				1
+#define MACB_PTZ_OFFSET				13
+#define MACB_PTZ_SIZE				1
+
+/* Bitfields in MAN */
+#define MACB_DATA_OFFSET			0
+#define MACB_DATA_SIZE				16
+#define MACB_CODE_OFFSET			16
+#define MACB_CODE_SIZE				2
+#define MACB_REGA_OFFSET			18
+#define MACB_REGA_SIZE				5
+#define MACB_PHYA_OFFSET			23
+#define MACB_PHYA_SIZE				5
+#define MACB_RW_OFFSET				28
+#define MACB_RW_SIZE				2
+#define MACB_SOF_OFFSET				30
+#define MACB_SOF_SIZE				2
+
+/* Bitfields in USRIO (AVR32) */
+#define MACB_MII_OFFSET				0
+#define MACB_MII_SIZE				1
+#define MACB_EAM_OFFSET				1
+#define MACB_EAM_SIZE				1
+#define MACB_TX_PAUSE_OFFSET			2
+#define MACB_TX_PAUSE_SIZE			1
+#define MACB_TX_PAUSE_ZERO_OFFSET		3
+#define MACB_TX_PAUSE_ZERO_SIZE			1
+
+/* Bitfields in USRIO (AT91) */
+#define MACB_RMII_OFFSET			0
+#define MACB_RMII_SIZE				1
+#define GEM_RGMII_OFFSET			0	/* GEM gigabit mode */
+#define GEM_RGMII_SIZE				1
+#define MACB_CLKEN_OFFSET			1
+#define MACB_CLKEN_SIZE				1
+
+/* Bitfields in WOL */
+#define MACB_IP_OFFSET				0
+#define MACB_IP_SIZE				16
+#define MACB_MAG_OFFSET				16
+#define MACB_MAG_SIZE				1
+#define MACB_ARP_OFFSET				17
+#define MACB_ARP_SIZE				1
+#define MACB_SA1_OFFSET				18
+#define MACB_SA1_SIZE				1
+#define MACB_WOL_MTI_OFFSET			19
+#define MACB_WOL_MTI_SIZE			1
+
+/* Bitfields in MID */
+#define MACB_IDNUM_OFFSET			16
+#define MACB_IDNUM_SIZE				16
+#define MACB_REV_OFFSET				0
+#define MACB_REV_SIZE				16
+
+/* Bitfields in DCFG1. */
+#define GEM_IRQCOR_OFFSET			23
+#define GEM_IRQCOR_SIZE				1
+#define GEM_DBWDEF_OFFSET			25
+#define GEM_DBWDEF_SIZE				3
+
+/* Constants for CLK */
+#define MACB_CLK_DIV8				0
+#define MACB_CLK_DIV16				1
+#define MACB_CLK_DIV32				2
+#define MACB_CLK_DIV64				3
+
+/* GEM specific constants for CLK. */
+#define GEM_CLK_DIV8				0
+#define GEM_CLK_DIV16				1
+#define GEM_CLK_DIV32				2
+#define GEM_CLK_DIV48				3
+#define GEM_CLK_DIV64				4
+#define GEM_CLK_DIV96				5
+
+/* Constants for MAN register */
+#define MACB_MAN_SOF				1
+#define MACB_MAN_WRITE				1
+#define MACB_MAN_READ				2
+#define MACB_MAN_CODE				2
+
+/* Capability mask bits */
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE		0x1
+
+/* Bit manipulation macros */
+#define MACB_BIT(name)					\
+	(1 << MACB_##name##_OFFSET)
+#define MACB_BF(name,value)				\
+	(((value) & ((1 << MACB_##name##_SIZE) - 1))	\
+	 << MACB_##name##_OFFSET)
+#define MACB_BFEXT(name,value)\
+	(((value) >> MACB_##name##_OFFSET)		\
+	 & ((1 << MACB_##name##_SIZE) - 1))
+#define MACB_BFINS(name,value,old)			\
+	(((old) & ~(((1 << MACB_##name##_SIZE) - 1)	\
+		    << MACB_##name##_OFFSET))		\
+	 | MACB_BF(name,value))
+
+#define GEM_BIT(name)					\
+	(1 << GEM_##name##_OFFSET)
+#define GEM_BF(name, value)				\
+	(((value) & ((1 << GEM_##name##_SIZE) - 1))	\
+	 << GEM_##name##_OFFSET)
+#define GEM_BFEXT(name, value)\
+	(((value) >> GEM_##name##_OFFSET)		\
+	 & ((1 << GEM_##name##_SIZE) - 1))
+#define GEM_BFINS(name, value, old)			\
+	(((old) & ~(((1 << GEM_##name##_SIZE) - 1)	\
+		    << GEM_##name##_OFFSET))		\
+	 | GEM_BF(name, value))
+
+/* Register access macros */
+#define macb_readl(port,reg)				\
+	__raw_readl((port)->regs + MACB_##reg)
+#define macb_writel(port,reg,value)			\
+	__raw_writel((value), (port)->regs + MACB_##reg)
+#define gem_readl(port, reg)				\
+	__raw_readl((port)->regs + GEM_##reg)
+#define gem_writel(port, reg, value)			\
+	__raw_writel((value), (port)->regs + GEM_##reg)
+
+/*
+ * Conditional GEM/MACB macros.  These perform the operation to the correct
+ * register dependent on whether the device is a GEM or a MACB.  For registers
+ * and bitfields that are common across both devices, use macb_{read,write}l
+ * to avoid the cost of the conditional.
+ */
+#define macb_or_gem_writel(__bp, __reg, __value) \
+	({ \
+		if (macb_is_gem((__bp))) \
+			gem_writel((__bp), __reg, __value); \
+		else \
+			macb_writel((__bp), __reg, __value); \
+	})
+
+#define macb_or_gem_readl(__bp, __reg) \
+	({ \
+		u32 __v; \
+		if (macb_is_gem((__bp))) \
+			__v = gem_readl((__bp), __reg); \
+		else \
+			__v = macb_readl((__bp), __reg); \
+		__v; \
+	})
+
+/**
+ * @brief Hardware DMA descriptor
+ * @anchor macb_dma_desc
+ */
+struct macb_dma_desc {
+	/** DMA address of data buffer */
+	u32	addr;
+	/** Control and status bits */
+	u32	ctrl;
+};
+
+/* DMA descriptor bitfields */
+#define MACB_RX_USED_OFFSET			0
+#define MACB_RX_USED_SIZE			1
+#define MACB_RX_WRAP_OFFSET			1
+#define MACB_RX_WRAP_SIZE			1
+#define MACB_RX_WADDR_OFFSET			2
+#define MACB_RX_WADDR_SIZE			30
+
+#define MACB_RX_FRMLEN_OFFSET			0
+#define MACB_RX_FRMLEN_SIZE			12
+#define MACB_RX_OFFSET_OFFSET			12
+#define MACB_RX_OFFSET_SIZE			2
+#define MACB_RX_SOF_OFFSET			14
+#define MACB_RX_SOF_SIZE			1
+#define MACB_RX_EOF_OFFSET			15
+#define MACB_RX_EOF_SIZE			1
+#define MACB_RX_CFI_OFFSET			16
+#define MACB_RX_CFI_SIZE			1
+#define MACB_RX_VLAN_PRI_OFFSET			17
+#define MACB_RX_VLAN_PRI_SIZE			3
+#define MACB_RX_PRI_TAG_OFFSET			20
+#define MACB_RX_PRI_TAG_SIZE			1
+#define MACB_RX_VLAN_TAG_OFFSET			21
+#define MACB_RX_VLAN_TAG_SIZE			1
+#define MACB_RX_TYPEID_MATCH_OFFSET		22
+#define MACB_RX_TYPEID_MATCH_SIZE		1
+#define MACB_RX_SA4_MATCH_OFFSET		23
+#define MACB_RX_SA4_MATCH_SIZE			1
+#define MACB_RX_SA3_MATCH_OFFSET		24
+#define MACB_RX_SA3_MATCH_SIZE			1
+#define MACB_RX_SA2_MATCH_OFFSET		25
+#define MACB_RX_SA2_MATCH_SIZE			1
+#define MACB_RX_SA1_MATCH_OFFSET		26
+#define MACB_RX_SA1_MATCH_SIZE			1
+#define MACB_RX_EXT_MATCH_OFFSET		28
+#define MACB_RX_EXT_MATCH_SIZE			1
+#define MACB_RX_UHASH_MATCH_OFFSET		29
+#define MACB_RX_UHASH_MATCH_SIZE		1
+#define MACB_RX_MHASH_MATCH_OFFSET		30
+#define MACB_RX_MHASH_MATCH_SIZE		1
+#define MACB_RX_BROADCAST_OFFSET		31
+#define MACB_RX_BROADCAST_SIZE			1
+
+#define MACB_TX_FRMLEN_OFFSET			0
+#define MACB_TX_FRMLEN_SIZE			11
+#define MACB_TX_LAST_OFFSET			15
+#define MACB_TX_LAST_SIZE			1
+#define MACB_TX_NOCRC_OFFSET			16
+#define MACB_TX_NOCRC_SIZE			1
+#define MACB_TX_BUF_EXHAUSTED_OFFSET		27
+#define MACB_TX_BUF_EXHAUSTED_SIZE		1
+#define MACB_TX_UNDERRUN_OFFSET			28
+#define MACB_TX_UNDERRUN_SIZE			1
+#define MACB_TX_ERROR_OFFSET			29
+#define MACB_TX_ERROR_SIZE			1
+#define MACB_TX_WRAP_OFFSET			30
+#define MACB_TX_WRAP_SIZE			1
+#define MACB_TX_USED_OFFSET			31
+#define MACB_TX_USED_SIZE			1
+
+/**
+ * @brief Data about an skb which is being transmitted
+ * @anchor macb_tx_skb
+ */
+struct macb_tx_skb {
+	/** skb currently being transmitted */
+	struct rtskb		*skb;
+	/** DMA address of the skb's data buffer */
+	dma_addr_t		mapping;
+};
+
+/*
+ * Hardware-collected statistics. Used when updating the network
+ * device stats by a periodic timer.
+ */
+struct macb_stats {
+	u32	rx_pause_frames;
+	u32	tx_ok;
+	u32	tx_single_cols;
+	u32	tx_multiple_cols;
+	u32	rx_ok;
+	u32	rx_fcs_errors;
+	u32	rx_align_errors;
+	u32	tx_deferred;
+	u32	tx_late_cols;
+	u32	tx_excessive_cols;
+	u32	tx_underruns;
+	u32	tx_carrier_errors;
+	u32	rx_resource_errors;
+	u32	rx_overruns;
+	u32	rx_symbol_errors;
+	u32	rx_oversize_pkts;
+	u32	rx_jabbers;
+	u32	rx_undersize_pkts;
+	u32	sqe_test_errors;
+	u32	rx_length_mismatch;
+	u32	tx_pause_frames;
+};
+
+struct gem_stats {
+	u32	tx_octets_31_0;
+	u32	tx_octets_47_32;
+	u32	tx_frames;
+	u32	tx_broadcast_frames;
+	u32	tx_multicast_frames;
+	u32	tx_pause_frames;
+	u32	tx_64_byte_frames;
+	u32	tx_65_127_byte_frames;
+	u32	tx_128_255_byte_frames;
+	u32	tx_256_511_byte_frames;
+	u32	tx_512_1023_byte_frames;
+	u32	tx_1024_1518_byte_frames;
+	u32	tx_greater_than_1518_byte_frames;
+	u32	tx_underrun;
+	u32	tx_single_collision_frames;
+	u32	tx_multiple_collision_frames;
+	u32	tx_excessive_collisions;
+	u32	tx_late_collisions;
+	u32	tx_deferred_frames;
+	u32	tx_carrier_sense_errors;
+	u32	rx_octets_31_0;
+	u32	rx_octets_47_32;
+	u32	rx_frames;
+	u32	rx_broadcast_frames;
+	u32	rx_multicast_frames;
+	u32	rx_pause_frames;
+	u32	rx_64_byte_frames;
+	u32	rx_65_127_byte_frames;
+	u32	rx_128_255_byte_frames;
+	u32	rx_256_511_byte_frames;
+	u32	rx_512_1023_byte_frames;
+	u32	rx_1024_1518_byte_frames;
+	u32	rx_greater_than_1518_byte_frames;
+	u32	rx_undersized_frames;
+	u32	rx_oversize_frames;
+	u32	rx_jabbers;
+	u32	rx_frame_check_sequence_errors;
+	u32	rx_length_field_frame_errors;
+	u32	rx_symbol_errors;
+	u32	rx_alignment_errors;
+	u32	rx_resource_errors;
+	u32	rx_overruns;
+	u32	rx_ip_header_checksum_errors;
+	u32	rx_tcp_checksum_errors;
+	u32	rx_udp_checksum_errors;
+};
+
+struct macb;
+
+struct macb_or_gem_ops {
+	int	(*mog_alloc_rx_buffers)(struct macb *bp);
+	void	(*mog_free_rx_buffers)(struct macb *bp);
+	void	(*mog_init_rings)(struct macb *bp);
+	int	(*mog_rx)(struct macb *bp, int budget, nanosecs_abs_t *ts);
+};
+
+struct macb {
+	void __iomem		*regs;
+
+	unsigned int		rx_tail;
+	unsigned int		rx_prepared_head;
+	struct macb_dma_desc	*rx_ring;
+	struct rtskb		**rx_skbuff;
+	void			*rx_buffers;
+	size_t			rx_buffer_size;
+
+	unsigned int		tx_head, tx_tail;
+	struct macb_dma_desc	*tx_ring;
+	struct macb_tx_skb	*tx_skb;
+
+	rtdm_lock_t		lock;
+	struct platform_device	*pdev;
+	struct clk		*pclk;
+	struct clk		*hclk;
+	struct clk		*tx_clk;
+	struct rtnet_device	*dev;
+	struct work_struct	tx_error_task;
+	struct net_device_stats	stats;
+	union {
+		struct macb_stats	macb;
+		struct gem_stats	gem;
+	}			hw_stats;
+
+	dma_addr_t		rx_ring_dma;
+	dma_addr_t		tx_ring_dma;
+	dma_addr_t		rx_buffers_dma;
+
+	struct macb_or_gem_ops	macbgem_ops;
+
+	struct mii_bus		*mii_bus;
+	struct phy_device	*phy_dev;
+	unsigned int		link;
+	unsigned int		speed;
+	unsigned int		duplex;
+
+	u32			caps;
+
+	phy_interface_t		phy_interface;
+
+	struct net_device	*phy_phony_net_device;
+	rtdm_irq_t		irq_handle;
+
+	/* AT91RM9200 transmit */
+	struct rtskb *skb;			/* holds skb until xmit interrupt completes */
+	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
+	int skb_length;				/* saved skb length for pci_unmap_single */
+};
+
+extern const struct ethtool_ops macb_ethtool_ops;
+
+int rtmacb_mii_init(struct macb *bp);
+int rtmacb_ioctl(struct rtnet_device *dev, unsigned cmd, void *arg);
+struct net_device_stats *rtmacb_get_stats(struct rtnet_device *dev);
+void rtmacb_set_hwaddr(struct macb *bp);
+void rtmacb_get_hwaddr(struct macb *bp);
+
+static inline bool macb_is_gem(struct macb *bp)
+{
+	return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+}
+
+#endif /* _MACB_H */
+++ linux-patched/drivers/xenomai/net/drivers/8139too.c	2022-03-21 12:58:29.590887001 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/hw.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ * rt_8139too.c - Realtime driver for
+ * for more information, look to end of file or '8139too.c'
+ *
+ * Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+ /*
+  * This Version was modified by Fabian Koch
+  * It includes a different implementation of the 'cards' module parameter
+  * we are using an array of integers to determine which cards to use
+  * for RTnet (e.g. cards=0,1,0)
+  *
+  * Thanks to Jan Kiszka for this idea
+  */
+
+#define DRV_NAME            "rt_8139too"
+#define DRV_VERSION         "0.9.24-rt0.7"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+/* *** RTnet *** */
+#include <rtnet_port.h>
+
+#define MAX_UNITS               8
+#define DEFAULT_RX_POOL_SIZE    16
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+static int media[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = -1 };
+static unsigned int rx_pool_size = DEFAULT_RX_POOL_SIZE;
+module_param_array(cards, int, NULL, 0444);
+module_param_array(media, int, NULL, 0444);
+module_param(rx_pool_size, uint, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+MODULE_PARM_DESC(media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC(rx_pool_size, "number of receive buffers");
+
+/* *** RTnet *** */
+
+
+#define RTL8139_DRIVER_NAME   DRV_NAME " Fast Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
+/* *** RTnet ***
+#ifdef CONFIG_8139TOO_PIO
+#define USE_IO_OPS 1
+#endif
+ *** RTnet *** */
+
+/* Size of the in-memory receive ring. */
+#define RX_BUF_LEN_IDX        2        /* 0==8K, 1==16K, 2==32K, 3==64K */
+#define RX_BUF_LEN        (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD        16
+#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
+#define RX_BUF_TOT_LEN        (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC        4
+
+/* max supported ethernet frame size -- must be at least (rtdev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE        1536
+
+/* Size of the Tx bounce buffers -- must be at least (rtdev->mtu+14+4). */
+#define TX_BUF_SIZE        MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN        (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* PCI Tuning Parameters
+   Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256        /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH        7        /* Rx buffer level before first PCI xfer.  */
+#define RX_DMA_BURST        7        /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST        6        /* Maximum PCI burst, '6' is 1024 */
+#define TX_RETRY        8        /* 0-15.  retries = 16 + (TX_RETRY * 16) */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
+
+
+enum {
+	HAS_MII_XCVR = 0x010000,
+	HAS_CHIP_XCVR = 0x020000,
+	HAS_LNK_CHNG = 0x040000,
+};
+
+#define RTL_MIN_IO_SIZE 0x80
+#define RTL8139B_IO_SIZE 256
+
+#define RTL8129_CAPS        HAS_MII_XCVR
+#define RTL8139_CAPS        HAS_CHIP_XCVR|HAS_LNK_CHNG
+
+typedef enum {
+	RTL8139 = 0,
+	RTL8139_CB,
+	SMC1211TX,
+	/*MPX5030,*/
+	DELTA8139,
+	ADDTRON8139,
+	DFE538TX,
+	DFE690TXD,
+	FE2000VX,
+	ALLIED8139,
+	RTL8129,
+} board_t;
+
+
+/* indexed by board_t, above */
+static struct {
+	const char *name;
+	u32 hw_flags;
+} board_info[] = {
+	{ "RealTek RTL8139", RTL8139_CAPS },
+	{ "RealTek RTL8129", RTL8129_CAPS },
+};
+
+
+static struct pci_device_id rtl8139_pci_tbl[] = {
+	{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+
+#ifdef CONFIG_SH_SECUREEDGE5410
+	/* Bogus 8139 silicon reports 8129 without external PROM :-( */
+	{0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+#endif
+#ifdef CONFIG_8139TOO_8129
+	{0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
+#endif
+
+	/* some crazy cards report invalid vendor ids like
+	 * 0x0001 here.  The other ids are valid and constant,
+	 * so we simply don't match on the main vendor id.
+	 */
+	{PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
+	{PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 },
+	{PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 },
+
+	{0,}
+};
+MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
+
+/* The rest of these values should never change. */
+
+/* Symbolic offsets to registers. */
+enum RTL8139_registers {
+	MAC0 = 0,                /* Ethernet hardware address. */
+	MAR0 = 8,                /* Multicast filter. */
+	TxStatus0 = 0x10,        /* Transmit status (Four 32bit registers). */
+	TxAddr0 = 0x20,                /* Tx descriptors (also four 32bit). */
+	RxBuf = 0x30,
+	ChipCmd = 0x37,
+	RxBufPtr = 0x38,
+	RxBufAddr = 0x3A,
+	IntrMask = 0x3C,
+	IntrStatus = 0x3E,
+	TxConfig = 0x40,
+	ChipVersion = 0x43,
+	RxConfig = 0x44,
+	Timer = 0x48,                /* A general-purpose counter. */
+	RxMissed = 0x4C,        /* 24 bits valid, write clears. */
+	Cfg9346 = 0x50,
+	Config0 = 0x51,
+	Config1 = 0x52,
+	FlashReg = 0x54,
+	MediaStatus = 0x58,
+	Config3 = 0x59,
+	Config4 = 0x5A,                /* absent on RTL-8139A */
+	HltClk = 0x5B,
+	MultiIntr = 0x5C,
+	TxSummary = 0x60,
+	BasicModeCtrl = 0x62,
+	BasicModeStatus = 0x64,
+	NWayAdvert = 0x66,
+	NWayLPAR = 0x68,
+	NWayExpansion = 0x6A,
+	/* Undocumented registers, but required for proper operation. */
+	FIFOTMS = 0x70,                /* FIFO Control and test. */
+	CSCR = 0x74,                /* Chip Status and Configuration Register. */
+	PARA78 = 0x78,
+	PARA7c = 0x7c,                /* Magic transceiver parameter register. */
+	Config5 = 0xD8,                /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+	MultiIntrClear = 0xF000,
+	ChipCmdClear = 0xE2,
+	Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+};
+
+enum ChipCmdBits {
+	CmdReset = 0x10,
+	CmdRxEnb = 0x08,
+	CmdTxEnb = 0x04,
+	RxBufEmpty = 0x01,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+	PCIErr = 0x8000,
+	PCSTimeout = 0x4000,
+	RxFIFOOver = 0x40,
+	RxUnderrun = 0x20,
+	RxOverflow = 0x10,
+	TxErr = 0x08,
+	TxOK = 0x04,
+	RxErr = 0x02,
+	RxOK = 0x01,
+
+	RxAckBits = RxFIFOOver | RxOverflow | RxOK,
+};
+
+enum TxStatusBits {
+	TxHostOwns = 0x2000,
+	TxUnderrun = 0x4000,
+	TxStatOK = 0x8000,
+	TxOutOfWindow = 0x20000000,
+	TxAborted = 0x40000000,
+	TxCarrierLost = 0x80000000,
+};
+enum RxStatusBits {
+	RxMulticast = 0x8000,
+	RxPhysical = 0x4000,
+	RxBroadcast = 0x2000,
+	RxBadSymbol = 0x0020,
+	RxRunt = 0x0010,
+	RxTooLong = 0x0008,
+	RxCRCErr = 0x0004,
+	RxBadAlign = 0x0002,
+	RxStatusOK = 0x0001,
+};
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+	AcceptErr = 0x20,
+	AcceptRunt = 0x10,
+	AcceptBroadcast = 0x08,
+	AcceptMulticast = 0x04,
+	AcceptMyPhys = 0x02,
+	AcceptAllPhys = 0x01,
+};
+
+/* Bits in TxConfig. */
+enum tx_config_bits {
+
+	/* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
+	TxIFGShift = 24,
+	TxIFG84 = (0 << TxIFGShift),    /* 8.4us / 840ns (10 / 100Mbps) */
+	TxIFG88 = (1 << TxIFGShift),    /* 8.8us / 880ns (10 / 100Mbps) */
+	TxIFG92 = (2 << TxIFGShift),    /* 9.2us / 920ns (10 / 100Mbps) */
+	TxIFG96 = (3 << TxIFGShift),    /* 9.6us / 960ns (10 / 100Mbps) */
+
+	TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
+	TxCRC = (1 << 16),        /* DISABLE appending CRC to end of Tx packets */
+	TxClearAbt = (1 << 0),        /* Clear abort (WO) */
+	TxDMAShift = 8,                /* DMA burst value (0-7) is shifted this many bits */
+	TxRetryShift = 4,        /* TXRR value (0-15) is shifted this many bits */
+
+	TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
+};
+
+/* Bits in Config1 */
+enum Config1Bits {
+	Cfg1_PM_Enable = 0x01,
+	Cfg1_VPD_Enable = 0x02,
+	Cfg1_PIO = 0x04,
+	Cfg1_MMIO = 0x08,
+	LWAKE = 0x10,                /* not on 8139, 8139A */
+	Cfg1_Driver_Load = 0x20,
+	Cfg1_LED0 = 0x40,
+	Cfg1_LED1 = 0x80,
+	SLEEP = (1 << 1),        /* only on 8139, 8139A */
+	PWRDN = (1 << 0),        /* only on 8139, 8139A */
+};
+
+/* Bits in Config3 */
+enum Config3Bits {
+	Cfg3_FBtBEn    = (1 << 0), /* 1 = Fast Back to Back */
+	Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
+	Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
+	Cfg3_CardB_En  = (1 << 3), /* 1 = enable CardBus registers */
+	Cfg3_LinkUp    = (1 << 4), /* 1 = wake up on link up */
+	Cfg3_Magic     = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
+	Cfg3_PARM_En   = (1 << 6), /* 0 = software can set twister parameters */
+	Cfg3_GNTSel    = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
+};
+
+/* Bits in Config4 */
+enum Config4Bits {
+	LWPTN = (1 << 2),        /* not on 8139, 8139A */
+};
+
+/* Bits in Config5 */
+enum Config5Bits {
+	Cfg5_PME_STS     = (1 << 0), /* 1 = PCI reset resets PME_Status */
+	Cfg5_LANWake     = (1 << 1), /* 1 = enable LANWake signal */
+	Cfg5_LDPS        = (1 << 2), /* 0 = save power when link is down */
+	Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */
+	Cfg5_UWF         = (1 << 4), /* 1 = accept unicast wakeup frame */
+	Cfg5_MWF         = (1 << 5), /* 1 = accept multicast wakeup frame */
+	Cfg5_BWF         = (1 << 6), /* 1 = accept broadcast wakeup frame */
+};
+
+enum RxConfigBits {
+	/* rx fifo threshold */
+	RxCfgFIFOShift = 13,
+	RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+	/* Max DMA burst */
+	RxCfgDMAShift = 8,
+	RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+	/* rx ring buffer length */
+	RxCfgRcv8K = 0,
+	RxCfgRcv16K = (1 << 11),
+	RxCfgRcv32K = (1 << 12),
+	RxCfgRcv64K = (1 << 11) | (1 << 12),
+
+	/* Disable packet wrap at end of Rx buffer */
+	RxNoWrap = (1 << 7),
+};
+
+
+/* Twister tuning parameters from RealTek.
+   Completely undocumented, but required to tune bad links. */
+enum CSCRBits {
+	CSCR_LinkOKBit = 0x0400,
+	CSCR_LinkChangeBit = 0x0800,
+	CSCR_LinkStatusBits = 0x0f000,
+	CSCR_LinkDownOffCmd = 0x003c0,
+	CSCR_LinkDownCmd = 0x0f3c0,
+};
+
+
+enum Cfg9346Bits {
+	Cfg9346_Lock = 0x00,
+	Cfg9346_Unlock = 0xC0,
+};
+
+
+#define PARA78_default        0x78fa8388
+#define PARA7c_default        0xcb38de43        /* param[0][3] */
+#define PARA7c_xxx                0xcb38de43
+/*static const unsigned long param[4][4] = {
+	{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+	{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};*/
+
+typedef enum {
+	CH_8139 = 0,
+	CH_8139_K,
+	CH_8139A,
+	CH_8139B,
+	CH_8130,
+	CH_8139C,
+} chip_t;
+
+enum chip_flags {
+	HasHltClk = (1 << 0),
+	HasLWake = (1 << 1),
+};
+
+
+/* directly indexed by chip_t, above */
+const static struct {
+	const char *name;
+	u8 version; /* from RTL8139C docs */
+	u32 flags;
+} rtl_chip_info[] = {
+	{ "RTL-8139",
+	  0x40,
+	  HasHltClk,
+	},
+
+	{ "RTL-8139 rev K",
+	  0x60,
+	  HasHltClk,
+	},
+
+	{ "RTL-8139A",
+	  0x70,
+	  HasHltClk, /* XXX undocumented? */
+	},
+
+	{ "RTL-8139A rev G",
+	  0x72,
+	  HasHltClk, /* XXX undocumented? */
+	},
+
+	{ "RTL-8139B",
+	  0x78,
+	  HasLWake,
+	},
+
+	{ "RTL-8130",
+	  0x7C,
+	  HasLWake,
+	},
+
+	{ "RTL-8139C",
+	  0x74,
+	  HasLWake,
+	},
+
+	{ "RTL-8100",
+	  0x7A,
+	  HasLWake,
+	 },
+
+	{ "RTL-8100B/8139D",
+	  0x75,
+	  HasHltClk /* XXX undocumented? */
+	  | HasLWake,
+	},
+
+	{ "RTL-8101",
+	  0x77,
+	  HasLWake,
+	},
+};
+
+struct rtl_extra_stats {
+	unsigned long early_rx;
+	unsigned long tx_buf_mapped;
+	unsigned long tx_timeouts;
+	unsigned long rx_lost_in_ring;
+};
+
+struct rtl8139_private {
+	void *mmio_addr;
+	int drv_flags;
+	struct pci_dev *pci_dev;
+	struct net_device_stats stats;
+	unsigned char *rx_ring;
+	unsigned int cur_rx;        /* Index into the Rx buffer of next Rx pkt. */
+	unsigned int tx_flag;
+	unsigned long cur_tx;
+	unsigned long dirty_tx;
+	unsigned char *tx_buf[NUM_TX_DESC];        /* Tx bounce buffers */
+	unsigned char *tx_bufs;        /* Tx bounce buffer region. */
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_bufs_dma;
+	signed char phys[4];                /* MII device addresses. */
+	char twistie, twist_row, twist_col;        /* Twister tune state. */
+	unsigned int default_port:4;        /* Last rtdev->if_port value. */
+	unsigned int medialock:1;        /* Don't sense media type. */
+	rtdm_lock_t lock;
+	chip_t chipset;
+	pid_t thr_pid;
+	u32 rx_config;
+	struct rtl_extra_stats xstats;
+	int time_to_die;
+	struct mii_if_info mii;
+	rtdm_irq_t irq_handle;
+};
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>");
+MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int read_eeprom (void *ioaddr, int location, int addr_len);
+static int mdio_read (struct rtnet_device *rtdev, int phy_id, int location);
+static void mdio_write (struct rtnet_device *rtdev, int phy_id, int location, int val);
+
+
+static int rtl8139_open (struct rtnet_device *rtdev);
+static int rtl8139_close (struct rtnet_device *rtdev);
+static int rtl8139_interrupt (rtdm_irq_t *irq_handle);
+static int rtl8139_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev);
+
+static int rtl8139_ioctl(struct rtnet_device *, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8139_get_stats(struct rtnet_device*rtdev);
+
+static void rtl8139_init_ring (struct rtnet_device *rtdev);
+static void rtl8139_set_rx_mode (struct rtnet_device *rtdev);
+static void __set_rx_mode (struct rtnet_device *rtdev);
+static void rtl8139_hw_start (struct rtnet_device *rtdev);
+
+#ifdef USE_IO_OPS
+
+#define RTL_R8(reg)                inb (((unsigned long)ioaddr) + (reg))
+#define RTL_R16(reg)                inw (((unsigned long)ioaddr) + (reg))
+#define RTL_R32(reg)                inl (((unsigned long)ioaddr) + (reg))
+#define RTL_W8(reg, val8)        outb ((val8), ((unsigned long)ioaddr) + (reg))
+#define RTL_W16(reg, val16)        outw ((val16), ((unsigned long)ioaddr) + (reg))
+#define RTL_W32(reg, val32)        outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define RTL_W8_F                RTL_W8
+#define RTL_W16_F                RTL_W16
+#define RTL_W32_F                RTL_W32
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+
+#else
+
+/* write MMIO register, with flush */
+/* Flush avoids rtl8139 bug w/ posted MMIO writes */
+#define RTL_W8_F(reg, val8)        do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
+#define RTL_W16_F(reg, val16)        do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
+#define RTL_W32_F(reg, val32)        do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+
+
+#define MMIO_FLUSH_AUDIT_COMPLETE 1
+#if MMIO_FLUSH_AUDIT_COMPLETE
+
+/* write MMIO register */
+#define RTL_W8(reg, val8)        writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16)        writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32)        writel ((val32), ioaddr + (reg))
+
+#else
+
+/* write MMIO register, then flush */
+#define RTL_W8                RTL_W8_F
+#define RTL_W16                RTL_W16_F
+#define RTL_W32                RTL_W32_F
+
+#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
+
+/* read MMIO register */
+#define RTL_R8(reg)                readb (ioaddr + (reg))
+#define RTL_R16(reg)                readw (ioaddr + (reg))
+#define RTL_R32(reg)                readl (ioaddr + (reg))
+
+#endif /* USE_IO_OPS */
+
+
+static const u16 rtl8139_intr_mask =
+	PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
+	TxErr | TxOK | RxErr | RxOK;
+
+static const unsigned int rtl8139_rx_config =
+	RxCfgRcv32K | RxNoWrap |
+	(RX_FIFO_THRESH << RxCfgFIFOShift) |
+	(RX_DMA_BURST << RxCfgDMAShift);
+
+static const unsigned int rtl8139_tx_config =
+	TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
+
+
+
+
+static void rtl8139_chip_reset (void *ioaddr)
+{
+	int i;
+
+	/* Soft reset the chip. */
+	RTL_W8 (ChipCmd, CmdReset);
+
+	/* Check that the chip has finished the reset. */
+	for (i = 1000; i > 0; i--) {
+		barrier();
+		if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+			break;
+		udelay (10);
+	}
+}
+
+
+static int rtl8139_init_board (struct pci_dev *pdev,
+					 struct rtnet_device **dev_out)
+{
+	void *ioaddr;
+	struct rtnet_device *rtdev;
+	struct rtl8139_private *tp;
+	u8 tmp8;
+	int rc;
+	unsigned int i;
+#ifdef USE_IO_OPS
+	u32 pio_start, pio_end, pio_flags, pio_len;
+#endif
+	unsigned long mmio_start, mmio_flags, mmio_len;
+	u32 tmp;
+
+
+	*dev_out = NULL;
+
+	/* dev and rtdev->priv zeroed in alloc_etherdev */
+	rtdev=rt_alloc_etherdev(sizeof (struct rtl8139_private),
+				rx_pool_size + NUM_TX_DESC);
+	if (rtdev==NULL) {
+		rtdm_printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev));
+		return -ENOMEM;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+	tp = rtdev->priv;
+	tp->pci_dev = pdev;
+
+	/* enable device (incl. PCI PM wakeup and hotplug setup) */
+	rc = pci_enable_device (pdev);
+	if (rc)
+		goto err_out;
+
+	rc = pci_request_regions (pdev, "rtnet8139too");
+	if (rc)
+		goto err_out;
+
+	/* enable PCI bus-mastering */
+	pci_set_master (pdev);
+
+	mmio_start = pci_resource_start (pdev, 1);
+	mmio_flags = pci_resource_flags (pdev, 1);
+	mmio_len = pci_resource_len (pdev, 1);
+
+	/* set this immediately, we need to know before
+	 * we talk to the chip directly */
+#ifdef USE_IO_OPS
+	pio_start = pci_resource_start (pdev, 0);
+	pio_end = pci_resource_end (pdev, 0);
+	pio_flags = pci_resource_flags (pdev, 0);
+	pio_len = pci_resource_len (pdev, 0);
+
+	/* make sure PCI base addr 0 is PIO */
+	if (!(pio_flags & IORESOURCE_IO)) {
+		rtdm_printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+	/* check for weird/broken PCI region reporting */
+	if (pio_len < RTL_MIN_IO_SIZE) {
+		rtdm_printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+#else
+	/* make sure PCI base addr 1 is MMIO */
+	if (!(mmio_flags & IORESOURCE_MEM)) {
+		rtdm_printk(KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+	if (mmio_len < RTL_MIN_IO_SIZE) {
+		rtdm_printk(KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+#endif
+
+#ifdef USE_IO_OPS
+	ioaddr = (void *) pio_start;
+	rtdev->base_addr = pio_start;
+	tp->mmio_addr = ioaddr;
+#else
+	/* ioremap MMIO region */
+	ioaddr = ioremap (mmio_start, mmio_len);
+	if (ioaddr == NULL) {
+		rtdm_printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev));
+		rc = -EIO;
+		goto err_out;
+	}
+	rtdev->base_addr = (long) ioaddr;
+	tp->mmio_addr = ioaddr;
+#endif /* USE_IO_OPS */
+
+	/* Bring old chips out of low-power mode. */
+	RTL_W8 (HltClk, 'R');
+
+	/* check for missing/broken hardware */
+	if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
+		rtdm_printk(KERN_ERR PFX "%s: Chip not responding, ignoring board\n", pci_name(pdev));
+		rc = -EIO;
+		goto err_out;
+	}
+
+	/* identify chip attached to board */
+	tmp = RTL_R8 (ChipVersion);
+	for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
+		if (tmp == rtl_chip_info[i].version) {
+			tp->chipset = i;
+			goto match;
+		}
+
+	rtdm_printk("rt8139too: unknown chip version, assuming RTL-8139\n");
+	rtdm_printk("rt8139too: TxConfig = 0x%08x\n", RTL_R32 (TxConfig));
+
+	tp->chipset = 0;
+
+match:
+	if (tp->chipset >= CH_8139B) {
+		u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
+		if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
+		    (tmp8 & LWAKE))
+			new_tmp8 &= ~LWAKE;
+		new_tmp8 |= Cfg1_PM_Enable;
+		if (new_tmp8 != tmp8) {
+			RTL_W8 (Cfg9346, Cfg9346_Unlock);
+			RTL_W8 (Config1, tmp8);
+			RTL_W8 (Cfg9346, Cfg9346_Lock);
+		}
+		if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+			tmp8 = RTL_R8 (Config4);
+			if (tmp8 & LWPTN) {
+				RTL_W8 (Cfg9346, Cfg9346_Unlock);
+				RTL_W8 (Config4, tmp8 & ~LWPTN);
+				RTL_W8 (Cfg9346, Cfg9346_Lock);
+			}
+		}
+	} else {
+		tmp8 = RTL_R8 (Config1);
+		tmp8 &= ~(SLEEP | PWRDN);
+		RTL_W8 (Config1, tmp8);
+	}
+
+	rtl8139_chip_reset (ioaddr);
+
+	*dev_out = rtdev;
+	return 0;
+
+err_out:
+#ifndef USE_IO_OPS
+	if (tp->mmio_addr) iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+	/* it's ok to call this even if we have no regions to free */
+	pci_release_regions (pdev);
+	rtdev_free(rtdev);
+	pci_set_drvdata (pdev, NULL);
+
+	return rc;
+}
+
+
+
+
+static int rtl8139_init_one (struct pci_dev *pdev,
+				       const struct pci_device_id *ent)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct rtl8139_private *tp;
+	int i, addr_len;
+	int option;
+	void *ioaddr;
+	static int board_idx = -1;
+
+	board_idx++;
+
+	if( cards[board_idx] == 0)
+		return -ENODEV;
+
+	/* when we're built into the kernel, the driver version message
+	 * is only printed if at least one 8139 board has been found
+	 */
+#ifndef MODULE
+	{
+		static int printed_version;
+		if (!printed_version++)
+			rtdm_printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+	}
+#endif
+
+	if ((i=rtl8139_init_board (pdev, &rtdev)) < 0)
+		return i;
+
+
+	tp = rtdev->priv;
+	ioaddr = tp->mmio_addr;
+
+	addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+	for (i = 0; i < 3; i++)
+		((u16 *) (rtdev->dev_addr))[i] =
+		    le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+
+	/* The Rtl8139-specific entries in the device structure. */
+	rtdev->open = rtl8139_open;
+	rtdev->stop = rtl8139_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->hard_start_xmit = rtl8139_start_xmit;
+	rtdev->do_ioctl = rtl8139_ioctl;
+	rtdev->get_stats = rtl8139_get_stats;
+
+	/*rtdev->set_multicast_list = rtl8139_set_rx_mode; */
+	rtdev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+	rtdev->irq = pdev->irq;
+
+	/* rtdev->priv/tp zeroed and aligned in init_etherdev */
+	tp = rtdev->priv;
+
+	/* note: tp->chipset set in rtl8139_init_board */
+	tp->drv_flags = board_info[ent->driver_data].hw_flags;
+	tp->mmio_addr = ioaddr;
+	rtdm_lock_init (&tp->lock);
+
+	if ( (i=rt_register_rtnetdev(rtdev)) )
+		goto err_out;
+
+	pci_set_drvdata (pdev, rtdev);
+
+	tp->phys[0] = 32;
+
+	/* The lower four bits are the media type. */
+	option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+	if (option > 0) {
+		tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
+		tp->default_port = option & 0xFF;
+		if (tp->default_port)
+			tp->medialock = 1;
+	}
+	if (tp->default_port) {
+		rtdm_printk(KERN_INFO "  Forcing %dMbps %s-duplex operation.\n",
+			    (option & 0x20 ? 100 : 10),
+			    (option & 0x10 ? "full" : "half"));
+		mdio_write(rtdev, tp->phys[0], 0,
+				   ((option & 0x20) ? 0x2000 : 0) |         /* 100Mbps? */
+				   ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+	}
+
+
+	/* Put the chip into low-power mode. */
+	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+		RTL_W8 (HltClk, 'H');        /* 'R' would leave the clock running. */
+
+	return 0;
+
+
+err_out:
+#ifndef USE_IO_OPS
+	if (tp->mmio_addr) iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+	/* it's ok to call this even if we have no regions to free */
+	pci_release_regions (pdev);
+	rtdev_free(rtdev);
+	pci_set_drvdata (pdev, NULL);
+
+	return i;
+}
+
+
+static void rtl8139_remove_one (struct pci_dev *pdev)
+{
+	struct rtnet_device *rtdev = pci_get_drvdata(pdev);
+
+#ifndef USE_IO_OPS
+	struct rtl8139_private *tp = rtdev->priv;
+
+	if (tp->mmio_addr)
+		iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+
+	/* it's ok to call this even if we have no regions to free */
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	rtdev_free(rtdev);
+}
+
+
+/* Serial EEPROM section. */
+
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK        0x04        /* EEPROM shift clock. */
+#define EE_CS                        0x08        /* EEPROM chip select. */
+#define EE_DATA_WRITE        0x02        /* EEPROM chip data in. */
+#define EE_WRITE_0                0x00
+#define EE_WRITE_1                0x02
+#define EE_DATA_READ        0x01        /* EEPROM chip data out. */
+#define EE_ENB                        (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay()        readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD        (5)
+#define EE_READ_CMD                (6)
+#define EE_ERASE_CMD        (7)
+
+static int read_eeprom (void *ioaddr, int location, int addr_len)
+{
+	int i;
+	unsigned retval = 0;
+	void *ee_addr = ioaddr + Cfg9346;
+	int read_cmd = location | (EE_READ_CMD << addr_len);
+
+	writeb (EE_ENB & ~EE_CS, ee_addr);
+	writeb (EE_ENB, ee_addr);
+	eeprom_delay ();
+
+	/* Shift the read command bits out. */
+	for (i = 4 + addr_len; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+		writeb (EE_ENB | dataval, ee_addr);
+		eeprom_delay ();
+		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay ();
+	}
+	writeb (EE_ENB, ee_addr);
+	eeprom_delay ();
+
+	for (i = 16; i > 0; i--) {
+		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay ();
+		retval =
+		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+				     0);
+		writeb (EE_ENB, ee_addr);
+		eeprom_delay ();
+	}
+
+	/* Terminate the EEPROM access. */
+	writeb (~EE_CS, ee_addr);
+	eeprom_delay ();
+
+	return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+   serial MDIO protocol.
+   The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues. */
+#define MDIO_DIR                0x80
+#define MDIO_DATA_OUT        0x04
+#define MDIO_DATA_IN        0x02
+#define MDIO_CLK                0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr)        readb(mdio_addr)
+
+
+
+static char mii_2_8139_map[8] = {
+	BasicModeCtrl,
+	BasicModeStatus,
+	0,
+	0,
+	NWayAdvert,
+	NWayLPAR,
+	NWayExpansion,
+	0
+};
+
+#ifdef CONFIG_8139TOO_8129
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync (void *mdio_addr)
+{
+	int i;
+
+	for (i = 32; i >= 0; i--) {
+		writeb (MDIO_WRITE1, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+}
+#endif
+
+
+static int mdio_read (struct rtnet_device *rtdev, int phy_id, int location)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	int retval = 0;
+#ifdef CONFIG_8139TOO_8129
+	void *mdio_addr = tp->mmio_addr + Config4;
+	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	int i;
+#endif
+
+	if (phy_id > 31) {        /* Really a 8139.  Use internal registers. */
+		return location < 8 && mii_2_8139_map[location] ?
+		    readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+	}
+
+#ifdef CONFIG_8139TOO_8129
+	mdio_sync (mdio_addr);
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+		writeb (MDIO_DIR | dataval, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		writeb (0, mdio_addr);
+		mdio_delay (mdio_addr);
+		retval = (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+		writeb (MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+#endif
+
+	return (retval >> 1) & 0xffff;
+}
+
+
+static void mdio_write (struct rtnet_device *rtdev, int phy_id, int location,
+			int value)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+#ifdef CONFIG_8139TOO_8129
+	void *mdio_addr = tp->mmio_addr + Config4;
+	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+	int i;
+#endif
+
+	if (phy_id > 31) {        /* Really a 8139.  Use internal registers. */
+		void *ioaddr = tp->mmio_addr;
+		if (location == 0) {
+			RTL_W8 (Cfg9346, Cfg9346_Unlock);
+			RTL_W16 (BasicModeCtrl, value);
+			RTL_W8 (Cfg9346, Cfg9346_Lock);
+		} else if (location < 8 && mii_2_8139_map[location])
+			RTL_W16 (mii_2_8139_map[location], value);
+		return;
+	}
+
+#ifdef CONFIG_8139TOO_8129
+	mdio_sync (mdio_addr);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval =
+		    (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+		writeb (dataval, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (dataval | MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		writeb (0, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+#endif
+}
+
+static int rtl8139_open (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	int retval;
+
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	retval = rtdm_irq_request(&tp->irq_handle, rtdev->irq,
+				  rtl8139_interrupt, RTDM_IRQTYPE_SHARED,
+				  rtdev->name, rtdev);
+	if (retval)
+		return retval;
+
+	tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN, &tp->tx_bufs_dma);
+	tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN, &tp->rx_ring_dma);
+
+	if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+		rtdm_irq_free(&tp->irq_handle);
+		if (tp->tx_bufs)
+			pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, tp->tx_bufs, tp->tx_bufs_dma);
+		if (tp->rx_ring)
+			pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, tp->rx_ring, tp->rx_ring_dma);
+
+		return -ENOMEM;
+	}
+	/* FIXME: create wrapper for duplex_lock vs. force_media
+	   tp->mii.full_duplex = tp->mii.duplex_lock; */
+	tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+	tp->twistie = 1;
+	tp->time_to_die = 0;
+
+	rtl8139_init_ring (rtdev);
+	rtl8139_hw_start (rtdev);
+
+	return 0;
+}
+
+
+static void rtl_check_media (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	u16 mii_lpa;
+
+	if (tp->phys[0] < 0)
+		return;
+
+	mii_lpa = mdio_read(rtdev, tp->phys[0], MII_LPA);
+	if (mii_lpa == 0xffff)
+		return;
+
+	tp->mii.full_duplex = (mii_lpa & LPA_100FULL) == LPA_100FULL ||
+		(mii_lpa & 0x00C0) == LPA_10FULL;
+}
+
+
+/* Start the hardware at open or resume. */
+static void rtl8139_hw_start (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	u32 i;
+	u8 tmp;
+
+	/* Bring old chips out of low-power mode. */
+	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+		RTL_W8 (HltClk, 'R');
+
+	rtl8139_chip_reset(ioaddr);
+
+	/* unlock Config[01234] and BMCR register writes */
+	RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+	/* Restore our idea of the MAC address. */
+	RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (rtdev->dev_addr + 0)));
+	RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (rtdev->dev_addr + 4)));
+
+	tp->cur_rx = 0;
+
+	/* init Rx ring buffer DMA address */
+	RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+	/* Must enable Tx/Rx before setting transfer thresholds! */
+	RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+	tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+	RTL_W32 (RxConfig, tp->rx_config);
+
+	/* Check this value: the documentation for IFG contradicts ifself. */
+	RTL_W32 (TxConfig, rtl8139_tx_config);
+
+	rtl_check_media (rtdev);
+
+	if (tp->chipset >= CH_8139B) {
+		/* Disable magic packet scanning, which is enabled
+		 * when PM is enabled in Config1.  It can be reenabled
+		 * via ETHTOOL_SWOL if desired.  */
+		RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
+	}
+
+	/* Lock Config[01234] and BMCR register writes */
+	RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+	/* init Tx buffer DMA addresses */
+	for (i = 0; i < NUM_TX_DESC; i++)
+		RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
+	RTL_W32 (RxMissed, 0);
+
+	rtl8139_set_rx_mode (rtdev);
+
+	/* no early-rx interrupts */
+	RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
+
+	/* make sure RxTx has started */
+	tmp = RTL_R8 (ChipCmd);
+	if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
+		RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+	/* Enable all known interrupts by setting the interrupt mask. */
+	RTL_W16 (IntrMask, rtl8139_intr_mask);
+
+	rtnetif_start_queue (rtdev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void rtl8139_init_ring (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	int i;
+
+	tp->cur_rx = 0;
+	tp->cur_tx = 0;
+	tp->dirty_tx = 0;
+
+	for (i = 0; i < NUM_TX_DESC; i++)
+		tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+}
+
+
+static void rtl8139_tx_clear (struct rtl8139_private *tp)
+{
+	tp->cur_tx = 0;
+	tp->dirty_tx = 0;
+
+	/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
+}
+
+
+
+static int rtl8139_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+
+	void *ioaddr = tp->mmio_addr;
+	unsigned int entry;
+	unsigned int len = skb->len;
+	rtdm_lockctx_t context;
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % NUM_TX_DESC;
+
+	if (likely(len < TX_BUF_SIZE)) {
+		if (unlikely(skb->xmit_stamp != NULL)) {
+			rtdm_lock_irqsave(context);
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+						       *skb->xmit_stamp);
+			/* typically, we are only copying a few bytes here */
+			rtskb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+		} else {
+			/* copy larger packets outside the lock */
+			rtskb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+			rtdm_lock_irqsave(context);
+		}
+	} else {
+		dev_kfree_rtskb(skb);
+		tp->stats.tx_dropped++;
+		return 0;
+	}
+
+
+	/* Note: the chip doesn't have auto-pad! */
+	rtdm_lock_get(&tp->lock);
+	RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
+	tp->cur_tx++;
+	wmb();
+	if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
+		rtnetif_stop_queue (rtdev);
+	rtdm_lock_put_irqrestore(&tp->lock, context);
+
+	dev_kfree_rtskb(skb);
+
+#ifdef DEBUG
+	rtdm_printk ("%s: Queued Tx packet size %u to slot %d.\n", rtdev->name, len, entry);
+#endif
+	return 0;
+}
+
+static int rtl8139_ioctl(struct rtnet_device *rtdev, struct ifreq *ifr, int cmd)
+{
+    struct rtl8139_private *tp = rtdev->priv;
+    void *ioaddr = tp->mmio_addr;
+    int nReturn = 0;
+    struct ethtool_value *value;
+
+    switch (cmd) {
+	case SIOCETHTOOL:
+	    /* TODO: user-safe parameter access, most probably one layer higher */
+	    value = (struct ethtool_value *)ifr->ifr_data;
+	    if (value->cmd == ETHTOOL_GLINK)
+	    {
+		if (RTL_R16(CSCR) & CSCR_LinkOKBit)
+		    value->data = 1;
+		else
+		    value->data = 0;
+	    }
+	    break;
+
+	default:
+	    nReturn = -EOPNOTSUPP;
+	    break;
+    }
+    return nReturn;
+}
+
+static struct net_device_stats *rtl8139_get_stats(struct rtnet_device*rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	return &tp->stats;
+}
+
+static void rtl8139_tx_interrupt (struct rtnet_device *rtdev,
+				  struct rtl8139_private *tp,
+				  void *ioaddr)
+{
+	unsigned long dirty_tx, tx_left;
+
+	dirty_tx = tp->dirty_tx;
+	tx_left = tp->cur_tx - dirty_tx;
+
+	while (tx_left > 0) {
+		int entry = dirty_tx % NUM_TX_DESC;
+		int txstatus;
+
+		txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
+
+		if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+			break;        /* It still hasn't been Txed */
+
+		/* Note: TxCarrierLost is always asserted at 100mbps. */
+		if (txstatus & (TxOutOfWindow | TxAborted)) {
+			/* There was an major error, log it. */
+			rtdm_printk("%s: Transmit error, Tx status %8.8x.\n",
+				    rtdev->name, txstatus);
+			tp->stats.tx_errors++;
+			if (txstatus & TxAborted) {
+				tp->stats.tx_aborted_errors++;
+				RTL_W32 (TxConfig, TxClearAbt);
+				RTL_W16 (IntrStatus, TxErr);
+				wmb();
+			}
+			if (txstatus & TxCarrierLost)
+				tp->stats.tx_carrier_errors++;
+			if (txstatus & TxOutOfWindow)
+				tp->stats.tx_window_errors++;
+#ifdef ETHER_STATS
+			if ((txstatus & 0x0f000000) == 0x0f000000)
+				tp->stats.collisions16++;
+#endif
+		} else {
+			if (txstatus & TxUnderrun) {
+				/* Add 64 to the Tx FIFO threshold. */
+				if (tp->tx_flag < 0x00300000)
+					tp->tx_flag += 0x00020000;
+				tp->stats.tx_fifo_errors++;
+			}
+			tp->stats.collisions += (txstatus >> 24) & 15;
+			tp->stats.tx_bytes += txstatus & 0x7ff;
+			tp->stats.tx_packets++;
+		}
+
+		dirty_tx++;
+		tx_left--;
+	}
+
+	/* only wake the queue if we did work, and the queue is stopped */
+	if (tp->dirty_tx != dirty_tx) {
+		tp->dirty_tx = dirty_tx;
+		mb();
+		if (rtnetif_queue_stopped (rtdev))
+			rtnetif_wake_queue (rtdev);
+	}
+}
+
+
+/* TODO: clean this up!  Rx reset need not be this intensive */
+static void rtl8139_rx_err
+(u32 rx_status, struct rtnet_device *rtdev, struct rtl8139_private *tp, void *ioaddr)
+{
+/*        u8 tmp8;
+#ifndef CONFIG_8139_NEW_RX_RESET
+	int tmp_work;
+#endif */
+
+	/* RTnet-TODO: We really need an error manager to handle such issues... */
+	rtdm_printk("%s: FATAL - Ethernet frame had errors, status %8.8x.\n",
+		    rtdev->name, rx_status);
+}
+
+
+static void rtl8139_rx_interrupt (struct rtnet_device *rtdev,
+				  struct rtl8139_private *tp, void *ioaddr,
+				  nanosecs_abs_t *time_stamp)
+{
+	unsigned char *rx_ring;
+	u16 cur_rx;
+
+	rx_ring = tp->rx_ring;
+	cur_rx = tp->cur_rx;
+
+	while ((RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
+		int ring_offset = cur_rx % RX_BUF_LEN;
+		u32 rx_status;
+		unsigned int rx_size;
+		unsigned int pkt_size;
+		struct rtskb *skb;
+
+		rmb();
+
+		/* read size+status of next frame from DMA ring buffer */
+		rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+		rx_size = rx_status >> 16;
+		pkt_size = rx_size - 4;
+
+		/* Packet copy from FIFO still in progress.
+		 * Theoretically, this should never happen
+		 * since EarlyRx is disabled.
+		 */
+		if (rx_size == 0xfff0) {
+			tp->xstats.early_rx++;
+			break;
+		}
+
+		/* If Rx err or invalid rx_size/rx_status received
+		 * (which happens if we get lost in the ring),
+		 * Rx process gets reset, so we abort any further
+		 * Rx processing.
+		 */
+		if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+		    (rx_size < 8) ||
+		    (!(rx_status & RxStatusOK))) {
+			rtl8139_rx_err (rx_status, rtdev, tp, ioaddr);
+			return;
+		}
+
+		/* Malloc up new buffer, compatible with net-2e. */
+		/* Omit the four octet CRC from the length. */
+
+		/* TODO: consider allocating skb's outside of
+		 * interrupt context, both to speed interrupt processing,
+		 * and also to reduce the chances of having to
+		 * drop packets here under memory pressure.
+		 */
+
+		skb = rtnetdev_alloc_rtskb(rtdev, pkt_size + 2);
+		if (skb) {
+			skb->time_stamp = *time_stamp;
+			rtskb_reserve (skb, 2);        /* 16 byte align the IP fields. */
+
+
+			/* eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); */
+			memcpy (skb->data, &rx_ring[ring_offset + 4], pkt_size);
+			rtskb_put (skb, pkt_size);
+			skb->protocol = rt_eth_type_trans (skb, rtdev);
+			rtnetif_rx (skb);
+			tp->stats.rx_bytes += pkt_size;
+			tp->stats.rx_packets++;
+		} else {
+			rtdm_printk (KERN_WARNING"%s: Memory squeeze, dropping packet.\n", rtdev->name);
+			tp->stats.rx_dropped++;
+		}
+
+		cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+		RTL_W16 (RxBufPtr, cur_rx - 16);
+
+		if (RTL_R16 (IntrStatus) & RxAckBits)
+			RTL_W16_F (IntrStatus, RxAckBits);
+	}
+
+	tp->cur_rx = cur_rx;
+}
+
+
+static void rtl8139_weird_interrupt (struct rtnet_device *rtdev,
+				     struct rtl8139_private *tp,
+				     void *ioaddr,
+				     int status, int link_changed)
+{
+	rtdm_printk ("%s: Abnormal interrupt, status %8.8x.\n",
+		      rtdev->name, status);
+
+	/* Update the error count. */
+	tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+	RTL_W32 (RxMissed, 0);
+
+	if ((status & RxUnderrun) && link_changed && (tp->drv_flags & HAS_LNK_CHNG)) {
+		/* Really link-change on new chips. */
+		status &= ~RxUnderrun;
+	}
+
+	/* XXX along with rtl8139_rx_err, are we double-counting errors? */
+	if (status &
+	    (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+		tp->stats.rx_errors++;
+
+	if (status & PCSTimeout)
+		tp->stats.rx_length_errors++;
+
+	if (status & (RxUnderrun | RxFIFOOver))
+		tp->stats.rx_fifo_errors++;
+
+	if (status & PCIErr) {
+		u16 pci_cmd_status;
+		pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+		pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
+
+		rtdm_printk (KERN_ERR "%s: PCI Bus error %4.4x.\n", rtdev->name, pci_cmd_status);
+	}
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int rtl8139_interrupt(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	int ackstat;
+	int status;
+	int link_changed = 0; /* avoid bogus "uninit" warning */
+	int saved_status = 0;
+	int ret = RTDM_IRQ_NONE;
+
+	rtdm_lock_get(&tp->lock);
+
+	status = RTL_R16(IntrStatus);
+
+	/* h/w no longer present (hotplug?) or major error, bail */
+	if (unlikely(status == 0xFFFF) || unlikely(!(status & rtl8139_intr_mask)))
+		goto out;
+
+	ret = RTDM_IRQ_HANDLED;
+
+	/* close possible race with dev_close */
+	if (unlikely(!rtnetif_running(rtdev))) {
+		RTL_W16(IntrMask, 0);
+		goto out;
+	}
+
+	/* Acknowledge all of the current interrupt sources ASAP, but
+	   first get an additional status bit from CSCR. */
+	if (unlikely(status & RxUnderrun))
+		link_changed = RTL_R16(CSCR) & CSCR_LinkChangeBit;
+
+	/* The chip takes special action when we clear RxAckBits,
+	 * so we clear them later in rtl8139_rx_interrupt
+	 */
+	ackstat = status & ~(RxAckBits | TxErr);
+	if (ackstat)
+		RTL_W16(IntrStatus, ackstat);
+
+	if (status & RxAckBits) {
+		saved_status |= RxAckBits;
+		rtl8139_rx_interrupt(rtdev, tp, ioaddr, &time_stamp);
+	}
+
+	/* Check uncommon events with one test. */
+	if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr)))
+		rtl8139_weird_interrupt(rtdev, tp, ioaddr, status, link_changed);
+
+	if (status & (TxOK |TxErr)) {
+		rtl8139_tx_interrupt(rtdev, tp, ioaddr);
+		if (status & TxErr) {
+			RTL_W16(IntrStatus, TxErr);
+			saved_status |= TxErr;
+		}
+	}
+ out:
+	rtdm_lock_put(&tp->lock);
+
+	if (saved_status & RxAckBits)
+		rt_mark_stack_mgr(rtdev);
+
+	if (saved_status & TxErr)
+		rtnetif_err_tx(rtdev);
+
+	return ret;
+}
+
+
+static int rtl8139_close (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	rtdm_lockctx_t context;
+
+	printk ("%s: Shutting down ethercard, status was 0x%4.4x.\n", rtdev->name, RTL_R16 (IntrStatus));
+
+	rtnetif_stop_queue (rtdev);
+
+	rtdm_lock_get_irqsave (&tp->lock, context);
+	/* Stop the chip's Tx and Rx DMA processes. */
+	RTL_W8 (ChipCmd, 0);
+	/* Disable interrupts by clearing the interrupt mask. */
+	RTL_W16 (IntrMask, 0);
+	/* Update the error counts. */
+	tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+	RTL_W32 (RxMissed, 0);
+	rtdm_lock_put_irqrestore (&tp->lock, context);
+
+	rtdm_irq_free(&tp->irq_handle);
+
+	rt_stack_disconnect(rtdev);
+
+	rtl8139_tx_clear (tp);
+
+	pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, tp->rx_ring, tp->rx_ring_dma);
+	pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, tp->tx_bufs, tp->tx_bufs_dma);
+	tp->rx_ring = NULL;
+	tp->tx_bufs = NULL;
+
+	/* Green! Put the chip in low-power mode. */
+	RTL_W8 (Cfg9346, Cfg9346_Unlock);
+
+	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+		RTL_W8 (HltClk, 'H');        /* 'R' would leave the clock running. */
+
+	return 0;
+}
+
+
+
+/* Set or clear the multicast filter for this adaptor.
+   This routine is not state sensitive and need not be SMP locked. */
+static void __set_rx_mode (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	u32 mc_filter[2];        /* Multicast hash filter */
+	int rx_mode;
+	u32 tmp;
+
+#ifdef DEBUG
+	rtdm_printk ("%s:   rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
+			rtdev->name, rtdev->flags, RTL_R32 (RxConfig));
+#endif
+
+	/* Note: do not reorder, GCC is clever about common statements. */
+	if (rtdev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		/*printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", rtdev->name);*/
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if (rtdev->flags & IFF_ALLMULTI) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		rx_mode = AcceptBroadcast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+	}
+
+	/* We can safely update without stopping the chip. */
+	tmp = rtl8139_rx_config | rx_mode;
+	if (tp->rx_config != tmp) {
+		RTL_W32_F (RxConfig, tmp);
+		tp->rx_config = tmp;
+	}
+	RTL_W32_F (MAR0 + 0, mc_filter[0]);
+	RTL_W32_F (MAR0 + 4, mc_filter[1]);
+}
+
+static void rtl8139_set_rx_mode (struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct rtl8139_private *tp = rtdev->priv;
+
+	rtdm_lock_get_irqsave (&tp->lock, context);
+	__set_rx_mode(rtdev);
+	rtdm_lock_put_irqrestore (&tp->lock, context);
+}
+
+static struct pci_driver rtl8139_pci_driver = {
+	name:                   DRV_NAME,
+	id_table:               rtl8139_pci_tbl,
+	probe:                  rtl8139_init_one,
+	remove:                 rtl8139_remove_one,
+	suspend:                NULL,
+	resume:                 NULL,
+};
+
+
+static int __init rtl8139_init_module (void)
+{
+	/* when we're a module, we always print a version message,
+	 * even if no 8139 board is found.
+	 */
+
+#ifdef MODULE
+	printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+#endif
+
+	return pci_register_driver (&rtl8139_pci_driver);
+}
+
+
+static void __exit rtl8139_cleanup_module (void)
+{
+	pci_unregister_driver (&rtl8139_pci_driver);
+}
+
+
+module_init(rtl8139_init_module);
+module_exit(rtl8139_cleanup_module);
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/hw.h	2022-03-21 12:58:29.585887050 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/netdev.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include <linux/types.h>
+
+struct e1000_hw;
+struct e1000_adapter;
+
+#include "defines.h"
+
+#define er32(reg)	__er32(hw, E1000_##reg)
+#define ew32(reg,val)	__ew32(hw, E1000_##reg, (val))
+#define e1e_flush()	er32(STATUS)
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
+	(writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) \
+	(readl((a)->hw_addr + reg + ((offset) << 2)))
+
+enum e1e_registers {
+	E1000_CTRL     = 0x00000, /* Device Control - RW */
+	E1000_STATUS   = 0x00008, /* Device Status - RO */
+	E1000_EECD     = 0x00010, /* EEPROM/Flash Control - RW */
+	E1000_EERD     = 0x00014, /* EEPROM Read - RW */
+	E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
+	E1000_FLA      = 0x0001C, /* Flash Access - RW */
+	E1000_MDIC     = 0x00020, /* MDI Control - RW */
+	E1000_SCTL     = 0x00024, /* SerDes Control - RW */
+	E1000_FCAL     = 0x00028, /* Flow Control Address Low - RW */
+	E1000_FCAH     = 0x0002C, /* Flow Control Address High -RW */
+	E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
+	E1000_FEXTNVM  = 0x00028, /* Future Extended NVM - RW */
+	E1000_FCT      = 0x00030, /* Flow Control Type - RW */
+	E1000_VET      = 0x00038, /* VLAN Ether Type - RW */
+	E1000_ICR      = 0x000C0, /* Interrupt Cause Read - R/clr */
+	E1000_ITR      = 0x000C4, /* Interrupt Throttling Rate - RW */
+	E1000_ICS      = 0x000C8, /* Interrupt Cause Set - WO */
+	E1000_IMS      = 0x000D0, /* Interrupt Mask Set - RW */
+	E1000_IMC      = 0x000D8, /* Interrupt Mask Clear - WO */
+	E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
+	E1000_IAM      = 0x000E0, /* Interrupt Acknowledge Auto Mask */
+	E1000_IVAR     = 0x000E4, /* Interrupt Vector Allocation - RW */
+	E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
+#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
+	E1000_RCTL     = 0x00100, /* Rx Control - RW */
+	E1000_FCTTV    = 0x00170, /* Flow Control Transmit Timer Value - RW */
+	E1000_TXCW     = 0x00178, /* Tx Configuration Word - RW */
+	E1000_RXCW     = 0x00180, /* Rx Configuration Word - RO */
+	E1000_TCTL     = 0x00400, /* Tx Control - RW */
+	E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
+	E1000_TIPG     = 0x00410, /* Tx Inter-packet gap -RW */
+	E1000_AIT      = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
+	E1000_LEDCTL   = 0x00E00, /* LED Control - RW */
+	E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
+	E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
+	E1000_PHY_CTRL     = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */
+	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
+	E1000_PBS      = 0x01008, /* Packet Buffer Size */
+	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
+	E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */
+	E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */
+	E1000_PBA_ECC  = 0x01100, /* PBA ECC Register */
+	E1000_ERT      = 0x02008, /* Early Rx Threshold - RW */
+	E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
+	E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
+	E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
+	E1000_RDBAL    = 0x02800, /* Rx Descriptor Base Address Low - RW */
+	E1000_RDBAH    = 0x02804, /* Rx Descriptor Base Address High - RW */
+	E1000_RDLEN    = 0x02808, /* Rx Descriptor Length - RW */
+	E1000_RDH      = 0x02810, /* Rx Descriptor Head - RW */
+	E1000_RDT      = 0x02818, /* Rx Descriptor Tail - RW */
+	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
+	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
+#define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
+	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ *
+ */
+#define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
+	E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
+	E1000_TDBAL    = 0x03800, /* Tx Descriptor Base Address Low - RW */
+	E1000_TDBAH    = 0x03804, /* Tx Descriptor Base Address High - RW */
+	E1000_TDLEN    = 0x03808, /* Tx Descriptor Length - RW */
+	E1000_TDH      = 0x03810, /* Tx Descriptor Head - RW */
+	E1000_TDT      = 0x03818, /* Tx Descriptor Tail - RW */
+	E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
+	E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
+#define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
+	E1000_TADV     = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
+	E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
+#define E1000_TARC(_n)   (E1000_TARC_BASE + (_n << 8))
+	E1000_CRCERRS  = 0x04000, /* CRC Error Count - R/clr */
+	E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
+	E1000_SYMERRS  = 0x04008, /* Symbol Error Count - R/clr */
+	E1000_RXERRC   = 0x0400C, /* Receive Error Count - R/clr */
+	E1000_MPC      = 0x04010, /* Missed Packet Count - R/clr */
+	E1000_SCC      = 0x04014, /* Single Collision Count - R/clr */
+	E1000_ECOL     = 0x04018, /* Excessive Collision Count - R/clr */
+	E1000_MCC      = 0x0401C, /* Multiple Collision Count - R/clr */
+	E1000_LATECOL  = 0x04020, /* Late Collision Count - R/clr */
+	E1000_COLC     = 0x04028, /* Collision Count - R/clr */
+	E1000_DC       = 0x04030, /* Defer Count - R/clr */
+	E1000_TNCRS    = 0x04034, /* Tx-No CRS - R/clr */
+	E1000_SEC      = 0x04038, /* Sequence Error Count - R/clr */
+	E1000_CEXTERR  = 0x0403C, /* Carrier Extension Error Count - R/clr */
+	E1000_RLEC     = 0x04040, /* Receive Length Error Count - R/clr */
+	E1000_XONRXC   = 0x04048, /* XON Rx Count - R/clr */
+	E1000_XONTXC   = 0x0404C, /* XON Tx Count - R/clr */
+	E1000_XOFFRXC  = 0x04050, /* XOFF Rx Count - R/clr */
+	E1000_XOFFTXC  = 0x04054, /* XOFF Tx Count - R/clr */
+	E1000_FCRUC    = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
+	E1000_PRC64    = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
+	E1000_PRC127   = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
+	E1000_PRC255   = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
+	E1000_PRC511   = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
+	E1000_PRC1023  = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
+	E1000_PRC1522  = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
+	E1000_GPRC     = 0x04074, /* Good Packets Rx Count - R/clr */
+	E1000_BPRC     = 0x04078, /* Broadcast Packets Rx Count - R/clr */
+	E1000_MPRC     = 0x0407C, /* Multicast Packets Rx Count - R/clr */
+	E1000_GPTC     = 0x04080, /* Good Packets Tx Count - R/clr */
+	E1000_GORCL    = 0x04088, /* Good Octets Rx Count Low - R/clr */
+	E1000_GORCH    = 0x0408C, /* Good Octets Rx Count High - R/clr */
+	E1000_GOTCL    = 0x04090, /* Good Octets Tx Count Low - R/clr */
+	E1000_GOTCH    = 0x04094, /* Good Octets Tx Count High - R/clr */
+	E1000_RNBC     = 0x040A0, /* Rx No Buffers Count - R/clr */
+	E1000_RUC      = 0x040A4, /* Rx Undersize Count - R/clr */
+	E1000_RFC      = 0x040A8, /* Rx Fragment Count - R/clr */
+	E1000_ROC      = 0x040AC, /* Rx Oversize Count - R/clr */
+	E1000_RJC      = 0x040B0, /* Rx Jabber Count - R/clr */
+	E1000_MGTPRC   = 0x040B4, /* Management Packets Rx Count - R/clr */
+	E1000_MGTPDC   = 0x040B8, /* Management Packets Dropped Count - R/clr */
+	E1000_MGTPTC   = 0x040BC, /* Management Packets Tx Count - R/clr */
+	E1000_TORL     = 0x040C0, /* Total Octets Rx Low - R/clr */
+	E1000_TORH     = 0x040C4, /* Total Octets Rx High - R/clr */
+	E1000_TOTL     = 0x040C8, /* Total Octets Tx Low - R/clr */
+	E1000_TOTH     = 0x040CC, /* Total Octets Tx High - R/clr */
+	E1000_TPR      = 0x040D0, /* Total Packets Rx - R/clr */
+	E1000_TPT      = 0x040D4, /* Total Packets Tx - R/clr */
+	E1000_PTC64    = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
+	E1000_PTC127   = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
+	E1000_PTC255   = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
+	E1000_PTC511   = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
+	E1000_PTC1023  = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
+	E1000_PTC1522  = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
+	E1000_MPTC     = 0x040F0, /* Multicast Packets Tx Count - R/clr */
+	E1000_BPTC     = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
+	E1000_TSCTC    = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
+	E1000_TSCTFC   = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
+	E1000_IAC      = 0x04100, /* Interrupt Assertion Count */
+	E1000_ICRXPTC  = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
+	E1000_ICRXATC  = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
+	E1000_ICTXPTC  = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
+	E1000_ICTXATC  = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
+	E1000_ICTXQEC  = 0x04118, /* Irq Cause Tx Queue Empty Count */
+	E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
+	E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
+	E1000_ICRXOC   = 0x04124, /* Irq Cause Receiver Overrun Count */
+	E1000_RXCSUM   = 0x05000, /* Rx Checksum Control - RW */
+	E1000_RFCTL    = 0x05008, /* Receive Filter Control */
+	E1000_MTA      = 0x05200, /* Multicast Table Array - RW Array */
+	E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
+#define E1000_RAL(_n)   (E1000_RAL_BASE + ((_n) * 8))
+#define E1000_RA        (E1000_RAL(0))
+	E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
+#define E1000_RAH(_n)   (E1000_RAH_BASE + ((_n) * 8))
+	E1000_SHRAL_PCH_LPT_BASE = 0x05408,
+#define E1000_SHRAL_PCH_LPT(_n)   (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
+	E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
+#define E1000_SHRAH_PCH_LPT(_n)   (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
+	E1000_VFTA     = 0x05600, /* VLAN Filter Table Array - RW Array */
+	E1000_WUC      = 0x05800, /* Wakeup Control - RW */
+	E1000_WUFC     = 0x05808, /* Wakeup Filter Control - RW */
+	E1000_WUS      = 0x05810, /* Wakeup Status - RO */
+	E1000_MANC     = 0x05820, /* Management Control - RW */
+	E1000_FFLT     = 0x05F00, /* Flexible Filter Length Table - RW Array */
+	E1000_HOST_IF  = 0x08800, /* Host Interface */
+
+	E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
+	E1000_MANC2H    = 0x05860, /* Management Control To Host - RW */
+	E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n)   (E1000_MDEF_BASE + ((_n) * 4))
+	E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
+	E1000_GCR	= 0x05B00, /* PCI-Ex Control */
+	E1000_GCR2      = 0x05B64, /* PCI-Ex Control #2 */
+	E1000_FACTPS    = 0x05B30, /* Function Active and Power State to MNG */
+	E1000_SWSM      = 0x05B50, /* SW Semaphore */
+	E1000_FWSM      = 0x05B54, /* FW Semaphore */
+	E1000_SWSM2     = 0x05B58, /* Driver-only SW semaphore */
+	E1000_FFLT_DBG  = 0x05F04, /* Debug Register */
+	E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
+#define E1000_PCH_RAICC(_n)	(E1000_PCH_RAICC_BASE + ((_n) * 4))
+#define E1000_CRC_OFFSET	E1000_PCH_RAICC_BASE
+	E1000_HICR      = 0x08F00, /* Host Interface Control */
+};
+
+#define E1000_MAX_PHY_ADDR		4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG	0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS	0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL	0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH	0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT	0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT	0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT		22   /* Page Select for BM */
+#define IGP_PAGE_SHIFT			5
+#define PHY_REG_MASK			0x1F
+
+#define BM_WUC_PAGE			800
+#define BM_WUC_ADDRESS_OPCODE		0x11
+#define BM_WUC_DATA_OPCODE		0x12
+#define BM_WUC_ENABLE_PAGE		769
+#define BM_WUC_ENABLE_REG		17
+#define BM_WUC_ENABLE_BIT		(1 << 2)
+#define BM_WUC_HOST_WU_BIT		(1 << 4)
+#define BM_WUC_ME_WU_BIT		(1 << 5)
+
+#define BM_WUC	PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS	PHY_REG(BM_WUC_PAGE, 3)
+
+#define IGP01E1000_PHY_PCS_INIT_REG	0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK	0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX	0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX	0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED	0x0080
+
+#define IGP02E1000_PM_SPD		0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU		0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU		0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE	0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED	0x0002
+#define IGP01E1000_PSSR_MDIX			0x0800
+#define IGP01E1000_PSSR_SPEED_MASK		0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS		0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM		4
+#define IGP02E1000_PHY_AGC_A			0x11B1
+#define IGP02E1000_PHY_AGC_B			0x12B1
+#define IGP02E1000_PHY_AGC_C			0x14B1
+#define IGP02E1000_PHY_AGC_D			0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT	9 /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK	0x7F
+#define IGP02E1000_AGC_RANGE		15
+
+/* manage.c */
+#define E1000_VFTA_ENTRY_SHIFT		5
+#define E1000_VFTA_ENTRY_MASK		0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK	0x1F
+
+#define E1000_HICR_EN			0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C			0x02
+#define E1000_HICR_FW_RESET_ENABLE	0x40
+#define E1000_HICR_FW_RESET		0x80
+
+#define E1000_FWSM_MODE_MASK		0xE
+#define E1000_FWSM_MODE_SHIFT		1
+
+#define E1000_MNG_IAMT_MODE		0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH	0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET	0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT	10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD	64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING	0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN	0x2
+
+/* nvm.c */
+#define E1000_STM_OPCODE  0xDB00
+
+#define E1000_KMRNCTRLSTA_OFFSET	0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT	16
+#define E1000_KMRNCTRLSTA_REN		0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET	0x1    /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET	0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS	0x4    /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM	0x9    /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE	0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK	0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG	0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE	0x0002
+#define E1000_KMRNCTRLSTA_HD_CTRL	0x10   /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL	0x10
+#define IFE_PHY_SPECIAL_CONTROL		0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED	0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL		0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED	0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE		0x0010
+#define IFE_PSC_FORCE_POLARITY			0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE		0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF		0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON		0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS	0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX	0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX	0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#define E1000_CABLE_LENGTH_UNDEFINED	0xFF
+
+#define E1000_DEV_ID_82571EB_COPPER		0x105E
+#define E1000_DEV_ID_82571EB_FIBER		0x105F
+#define E1000_DEV_ID_82571EB_SERDES		0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER	0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER	0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER		0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP	0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL	0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD	0x10DA
+#define E1000_DEV_ID_82572EI_COPPER		0x107D
+#define E1000_DEV_ID_82572EI_FIBER		0x107E
+#define E1000_DEV_ID_82572EI_SERDES		0x107F
+#define E1000_DEV_ID_82572EI			0x10B9
+#define E1000_DEV_ID_82573E			0x108B
+#define E1000_DEV_ID_82573E_IAMT		0x108C
+#define E1000_DEV_ID_82573L			0x109A
+#define E1000_DEV_ID_82574L			0x10D3
+#define E1000_DEV_ID_82574LA			0x10F6
+#define E1000_DEV_ID_82583V                     0x150C
+
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT	0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT	0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT	0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT	0x10BB
+
+#define E1000_DEV_ID_ICH8_82567V_3		0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT		0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT		0x104A
+#define E1000_DEV_ID_ICH8_IGP_C			0x104B
+#define E1000_DEV_ID_ICH8_IFE			0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT		0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G			0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M			0x104D
+#define E1000_DEV_ID_ICH9_IGP_AMT		0x10BD
+#define E1000_DEV_ID_ICH9_BM			0x10E5
+#define E1000_DEV_ID_ICH9_IGP_M_AMT		0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M			0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_V		0x10CB
+#define E1000_DEV_ID_ICH9_IGP_C			0x294C
+#define E1000_DEV_ID_ICH9_IFE			0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT		0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G			0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM		0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF		0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V		0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM		0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF		0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V		0x1525
+#define E1000_DEV_ID_PCH_M_HV_LM		0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC		0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM		0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC		0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM			0x1502
+#define E1000_DEV_ID_PCH2_LV_V			0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM		0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V		0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM		0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V		0x1559
+
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_1 1
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+
+enum e1000_mac_type {
+	e1000_82571,
+	e1000_82572,
+	e1000_82573,
+	e1000_82574,
+	e1000_82583,
+	e1000_80003es2lan,
+	e1000_ich8lan,
+	e1000_ich9lan,
+	e1000_ich10lan,
+	e1000_pchlan,
+	e1000_pch2lan,
+	e1000_pch_lpt,
+};
+
+enum e1000_media_type {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_flash_hw,
+	e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large
+};
+
+enum e1000_phy_type {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+	e1000_phy_bm,
+	e1000_phy_82578,
+	e1000_phy_82577,
+	e1000_phy_82579,
+	e1000_phy_i217,
+};
+
+enum e1000_bus_width {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity{
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+	e1000_serdes_link_down = 0,
+	e1000_serdes_link_autoneg_progress,
+	e1000_serdes_link_autoneg_complete,
+	e1000_serdes_link_forced_up
+};
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+	__le64 buffer_addr; /* Address of the descriptor's data buffer */
+	__le16 length;      /* Length of data DMAed into data buffer */
+	__le16 csum;	/* Packet checksum */
+	u8  status;      /* Descriptor status */
+	u8  errors;      /* Descriptor Errors */
+	__le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+	struct {
+		__le64 buffer_addr;
+		__le64 reserved;
+	} read;
+	struct {
+		struct {
+			__le32 mrq;	      /* Multiple Rx Queues */
+			union {
+				__le32 rss;	    /* RSS Hash */
+				struct {
+					__le16 ip_id;  /* IP id */
+					__le16 csum;   /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error;     /* ext status/error */
+			__le16 length;
+			__le16 vlan;	     /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+	struct {
+		/* one buffer for protocol header(s), three data buffers */
+		__le64 buffer_addr[MAX_PS_BUFFERS];
+	} read;
+	struct {
+		struct {
+			__le32 mrq;	      /* Multiple Rx Queues */
+			union {
+				__le32 rss;	      /* RSS Hash */
+				struct {
+					__le16 ip_id;    /* IP id */
+					__le16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error;     /* ext status/error */
+			__le16 length0;	  /* length of buffer 0 */
+			__le16 vlan;	     /* VLAN tag */
+		} middle;
+		struct {
+			__le16 header_status;
+			__le16 length[3];	/* length of buffers 1-3 */
+		} upper;
+		__le64 reserved;
+	} wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+	__le64 buffer_addr;      /* Address of the descriptor's data buffer */
+	union {
+		__le32 data;
+		struct {
+			__le16 length;    /* Data buffer length */
+			u8 cso;	/* Checksum offset */
+			u8 cmd;	/* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		__le32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 css;	/* Checksum start */
+			__le16 special;
+		} fields;
+	} upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+	union {
+		__le32 ip_config;
+		struct {
+			u8 ipcss;      /* IP checksum start */
+			u8 ipcso;      /* IP checksum offset */
+			__le16 ipcse;     /* IP checksum end */
+		} ip_fields;
+	} lower_setup;
+	union {
+		__le32 tcp_config;
+		struct {
+			u8 tucss;      /* TCP checksum start */
+			u8 tucso;      /* TCP checksum offset */
+			__le16 tucse;     /* TCP checksum end */
+		} tcp_fields;
+	} upper_setup;
+	__le32 cmd_and_length;
+	union {
+		__le32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 hdr_len;    /* Header length */
+			__le16 mss;       /* Maximum segment size */
+		} fields;
+	} tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+	__le64 buffer_addr;   /* Address of the descriptor's buffer address */
+	union {
+		__le32 data;
+		struct {
+			__le16 length;    /* Data buffer length */
+			u8 typ_len_ext;
+			u8 cmd;
+		} flags;
+	} lower;
+	union {
+		__le32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 popts;      /* Packet Options */
+			__le16 special;   /* */
+		} fields;
+	} upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 tor;
+	u64 tot;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+};
+
+struct e1000_phy_stats {
+	u32 idle_errors;
+	u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+/* Function pointers and static data for the MAC. */
+struct e1000_mac_operations {
+	s32  (*id_led_init)(struct e1000_hw *);
+	s32  (*blink_led)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *);
+	s32  (*check_for_link)(struct e1000_hw *);
+	s32  (*cleanup_led)(struct e1000_hw *);
+	void (*clear_hw_cntrs)(struct e1000_hw *);
+	void (*clear_vfta)(struct e1000_hw *);
+	s32  (*get_bus_info)(struct e1000_hw *);
+	void (*set_lan_id)(struct e1000_hw *);
+	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+	s32  (*led_on)(struct e1000_hw *);
+	s32  (*led_off)(struct e1000_hw *);
+	void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+	s32  (*reset_hw)(struct e1000_hw *);
+	s32  (*init_hw)(struct e1000_hw *);
+	s32  (*setup_link)(struct e1000_hw *);
+	s32  (*setup_physical_interface)(struct e1000_hw *);
+	s32  (*setup_led)(struct e1000_hw *);
+	void (*write_vfta)(struct e1000_hw *, u32, u32);
+	void (*config_collision_dist)(struct e1000_hw *);
+	void (*rar_set)(struct e1000_hw *, u8 *, u32);
+	s32  (*read_mac_addr)(struct e1000_hw *);
+};
+
+/*
+ * When to use various PHY register access functions:
+ *
+ *                 Func   Caller
+ *   Function      Does   Does    When to use
+ *   ~~~~~~~~~~~~  ~~~~~  ~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *   X_reg         L,P,A  n/a     for simple PHY reg accesses
+ *   X_reg_locked  P,A    L       for multiple accesses of different regs
+ *                                on different pages
+ *   X_reg_page    A      L,P     for multiple accesses of different regs
+ *                                on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+	s32  (*acquire)(struct e1000_hw *);
+	s32  (*cfg_on_link_up)(struct e1000_hw *);
+	s32  (*check_polarity)(struct e1000_hw *);
+	s32  (*check_reset_block)(struct e1000_hw *);
+	s32  (*commit)(struct e1000_hw *);
+	s32  (*force_speed_duplex)(struct e1000_hw *);
+	s32  (*get_cfg_done)(struct e1000_hw *hw);
+	s32  (*get_cable_length)(struct e1000_hw *);
+	s32  (*get_info)(struct e1000_hw *);
+	s32  (*set_page)(struct e1000_hw *, u16);
+	s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32  (*reset)(struct e1000_hw *);
+	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32  (*write_reg)(struct e1000_hw *, u32, u16);
+	s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
+	s32  (*write_reg_page)(struct e1000_hw *, u32, u16);
+	void (*power_up)(struct e1000_hw *);
+	void (*power_down)(struct e1000_hw *);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+	s32  (*acquire)(struct e1000_hw *);
+	s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32  (*update)(struct e1000_hw *);
+	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+	s32  (*validate)(struct e1000_hw *);
+	s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+	struct e1000_mac_operations ops;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+
+	enum e1000_mac_type type;
+
+	u32 collision_delta;
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 mc_filter_type;
+	u32 tx_packet_delta;
+	u32 txcw;
+
+	u16 current_ifs_val;
+	u16 ifs_max_val;
+	u16 ifs_min_val;
+	u16 ifs_ratio;
+	u16 ifs_step_size;
+	u16 mta_reg_count;
+
+	/* Maximum size of the MTA register table in all supported adapters */
+	#define MAX_MTA_REG 128
+	u32 mta_shadow[MAX_MTA_REG];
+	u16 rar_entry_count;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool has_fwsm;
+	bool arc_subsystem_valid;
+	bool autoneg;
+	bool autoneg_failed;
+	bool get_link_status;
+	bool in_ifs_mode;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+	enum e1000_serdes_link_state serdes_link_state;
+};
+
+struct e1000_phy_info {
+	struct e1000_phy_operations ops;
+
+	enum e1000_phy_type type;
+
+	enum e1000_1000t_rx_status local_rx;
+	enum e1000_1000t_rx_status remote_rx;
+	enum e1000_ms_type ms_type;
+	enum e1000_ms_type original_ms_type;
+	enum e1000_rev_polarity cable_polarity;
+	enum e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	enum e1000_media_type media_type;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool speed_downgraded;
+	bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+	struct e1000_nvm_operations ops;
+
+	enum e1000_nvm_type type;
+	enum e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	enum e1000_bus_width width;
+
+	u16 func;
+};
+
+struct e1000_fc_info {
+	u32 high_water;          /* Flow control high-water mark */
+	u32 low_water;           /* Flow control low-water mark */
+	u16 pause_time;          /* Flow control pause timer */
+	u16 refresh_time;        /* Flow control refresh timer */
+	bool send_xon;           /* Flow control send XON */
+	bool strict_ieee;        /* Strict IEEE mode */
+	enum e1000_fc_mode current_mode; /* FC mode in effect */
+	enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_dev_spec_82571 {
+	bool laa_is_present;
+	u32 smb_counter;
+};
+
+struct e1000_dev_spec_80003es2lan {
+	bool  mdic_wa_enable;
+};
+
+struct e1000_shadow_ram {
+	u16  value;
+	bool modified;
+};
+
+#define E1000_ICH8_SHADOW_RAM_WORDS		2048
+
+struct e1000_dev_spec_ich8lan {
+	bool kmrn_lock_loss_workaround_enabled;
+	struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
+	bool nvm_k1_enabled;
+	bool eee_disable;
+	u16 eee_lp_ability;
+};
+
+struct e1000_hw {
+	struct e1000_adapter *adapter;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+
+	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	union {
+		struct e1000_dev_spec_82571	e82571;
+		struct e1000_dev_spec_80003es2lan e80003es2lan;
+		struct e1000_dev_spec_ich8lan	ich8lan;
+	} dev_spec;
+};
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/netdev.c	2022-03-21 12:58:29.580887099 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/phy.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/version.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#include <linux/pci-aspm.h>
+#endif
+
+#include "e1000.h"
+
+#define RT_E1000E_NUM_RXD	64
+
+#define DRV_EXTRAVERSION "-k-rt"
+
+#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
+char e1000e_driver_name[] = "rt_e1000e";
+const char e1000e_driver_version[] = DRV_VERSION;
+
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
+
+static const struct e1000_info *e1000_info_tbl[] = {
+	[board_82571]		= &e1000_82571_info,
+	[board_82572]		= &e1000_82572_info,
+	[board_82573]		= &e1000_82573_info,
+	[board_82574]		= &e1000_82574_info,
+	[board_82583]		= &e1000_82583_info,
+	[board_80003es2lan]	= &e1000_es2_info,
+	[board_ich8lan]		= &e1000_ich8_info,
+	[board_ich9lan]		= &e1000_ich9_info,
+	[board_ich10lan]	= &e1000_ich10_info,
+	[board_pchlan]		= &e1000_pch_info,
+	[board_pch2lan]		= &e1000_pch2_info,
+	[board_pch_lpt]		= &e1000_pch_lpt_info,
+};
+
+struct e1000_reg_info {
+	u32 ofs;
+	char *name;
+};
+
+#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
+#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
+#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+	/* General Registers */
+	{E1000_CTRL, "CTRL"},
+	{E1000_STATUS, "STATUS"},
+	{E1000_CTRL_EXT, "CTRL_EXT"},
+
+	/* Interrupt Registers */
+	{E1000_ICR, "ICR"},
+
+	/* Rx Registers */
+	{E1000_RCTL, "RCTL"},
+	{E1000_RDLEN, "RDLEN"},
+	{E1000_RDH, "RDH"},
+	{E1000_RDT, "RDT"},
+	{E1000_RDTR, "RDTR"},
+	{E1000_RXDCTL(0), "RXDCTL"},
+	{E1000_ERT, "ERT"},
+	{E1000_RDBAL, "RDBAL"},
+	{E1000_RDBAH, "RDBAH"},
+	{E1000_RDFH, "RDFH"},
+	{E1000_RDFT, "RDFT"},
+	{E1000_RDFHS, "RDFHS"},
+	{E1000_RDFTS, "RDFTS"},
+	{E1000_RDFPC, "RDFPC"},
+
+	/* Tx Registers */
+	{E1000_TCTL, "TCTL"},
+	{E1000_TDBAL, "TDBAL"},
+	{E1000_TDBAH, "TDBAH"},
+	{E1000_TDLEN, "TDLEN"},
+	{E1000_TDH, "TDH"},
+	{E1000_TDT, "TDT"},
+	{E1000_TIDV, "TIDV"},
+	{E1000_TXDCTL(0), "TXDCTL"},
+	{E1000_TADV, "TADV"},
+	{E1000_TARC(0), "TARC"},
+	{E1000_TDFH, "TDFH"},
+	{E1000_TDFT, "TDFT"},
+	{E1000_TDFHS, "TDFHS"},
+	{E1000_TDFTS, "TDFTS"},
+	{E1000_TDFPC, "TDFPC"},
+
+	/* List Terminator */
+	{}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+	int n = 0;
+	char rname[16];
+	u32 regs[8];
+
+	switch (reginfo->ofs) {
+	case E1000_RXDCTL(0):
+		for (n = 0; n < 2; n++)
+			regs[n] = __er32(hw, E1000_RXDCTL(n));
+		break;
+	case E1000_TXDCTL(0):
+		for (n = 0; n < 2; n++)
+			regs[n] = __er32(hw, E1000_TXDCTL(n));
+		break;
+	case E1000_TARC(0):
+		for (n = 0; n < 2; n++)
+			regs[n] = __er32(hw, E1000_TARC(n));
+		break;
+	default:
+		printk(KERN_INFO "%-15s %08x\n",
+		       reginfo->name, __er32(hw, reginfo->ofs));
+		return;
+	}
+
+	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+	printk(KERN_INFO "%-15s ", rname);
+	for (n = 0; n < 2; n++)
+		printk(KERN_CONT "%08x ", regs[n]);
+	printk(KERN_CONT "\n");
+}
+
+/*
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_reg_info *reginfo;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc;
+	struct my_u0 {
+		u64 a;
+		u64 b;
+	} *u0;
+	struct e1000_buffer *buffer_info;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	union e1000_rx_desc_packet_split *rx_desc_ps;
+	union e1000_rx_desc_extended *rx_desc;
+	struct my_u1 {
+		u64 a;
+		u64 b;
+		u64 c;
+		u64 d;
+	} *u1;
+	u32 staterr;
+	int i = 0;
+
+	if (!netif_msg_hw(adapter))
+		return;
+
+	/* Print netdevice Info */
+	if (netdev) {
+		dev_info(&adapter->pdev->dev, "Net device Info\n");
+		printk(KERN_INFO "Device Name     state            "
+		       "trans_start      last_rx\n");
+		printk(KERN_INFO "%-15s\n", netdev->name);
+	}
+
+	/* Print Registers */
+	dev_info(&adapter->pdev->dev, "Register Dump\n");
+	printk(KERN_INFO " Register Name   Value\n");
+	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+	     reginfo->name; reginfo++) {
+		e1000_regdump(hw, reginfo);
+	}
+
+	/* Print Tx Ring Summary */
+	if (!netdev || !rtnetif_running(netdev))
+		goto exit;
+
+	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
+	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
+	       " leng ntw timestamp\n");
+	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
+	       (unsigned long long)buffer_info->dma,
+	       buffer_info->length,
+	       buffer_info->next_to_watch,
+	       (unsigned long long)buffer_info->time_stamp);
+
+	/* Print Tx Ring */
+	if (!netif_msg_tx_done(adapter))
+		goto rx_ring_summary;
+
+	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
+
+	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+	 *
+	 * Legacy Transmit Descriptor
+	 *   +--------------------------------------------------------------+
+	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
+	 *   +--------------------------------------------------------------+
+	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
+	 *   +--------------------------------------------------------------+
+	 *   63       48 47        36 35    32 31     24 23    16 15        0
+	 *
+	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+	 *   63      48 47    40 39       32 31             16 15    8 7      0
+	 *   +----------------------------------------------------------------+
+	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
+	 *   +----------------------------------------------------------------+
+	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
+	 *   +----------------------------------------------------------------+
+	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
+	 *
+	 * Extended Data Descriptor (DTYP=0x1)
+	 *   +----------------------------------------------------------------+
+	 * 0 |                     Buffer Address [63:0]                      |
+	 *   +----------------------------------------------------------------+
+	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
+	 *   +----------------------------------------------------------------+
+	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
+	 */
+	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Legacy format\n");
+	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Context format\n");
+	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Data format\n");
+	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		buffer_info = &tx_ring->buffer_info[i];
+		u0 = (struct my_u0 *)tx_desc;
+		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
+		       "%04X  %3X %016llX %p",
+		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
+		       (unsigned long long)le64_to_cpu(u0->a),
+		       (unsigned long long)le64_to_cpu(u0->b),
+		       (unsigned long long)buffer_info->dma,
+		       buffer_info->length, buffer_info->next_to_watch,
+		       (unsigned long long)buffer_info->time_stamp,
+		       buffer_info->skb);
+		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+			printk(KERN_CONT " NTC/U\n");
+		else if (i == tx_ring->next_to_use)
+			printk(KERN_CONT " NTU\n");
+		else if (i == tx_ring->next_to_clean)
+			printk(KERN_CONT " NTC\n");
+		else
+			printk(KERN_CONT "\n");
+
+		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+				       16, 1, phys_to_virt(buffer_info->dma),
+				       buffer_info->length, true);
+	}
+
+	/* Print Rx Ring Summary */
+rx_ring_summary:
+	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
+	printk(KERN_INFO "Queue [NTU] [NTC]\n");
+	printk(KERN_INFO " %5d %5X %5X\n", 0,
+	       rx_ring->next_to_use, rx_ring->next_to_clean);
+
+	/* Print Rx Ring */
+	if (!netif_msg_rx_status(adapter))
+		goto exit;
+
+	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
+	switch (adapter->rx_ps_pages) {
+	case 1:
+	case 2:
+	case 3:
+		/* [Extended] Packet Split Receive Descriptor Format
+		 *
+		 *    +-----------------------------------------------------+
+		 *  0 |                Buffer Address 0 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 *  8 |                Buffer Address 1 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 * 16 |                Buffer Address 2 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 * 24 |                Buffer Address 3 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 */
+		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
+		       "[buffer 1 63:0 ] "
+		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
+		       "[bi->skb] <-- Ext Pkt Split format\n");
+		/* [Extended] Receive Descriptor (Write-Back) Format
+		 *
+		 *   63       48 47    32 31     13 12    8 7    4 3        0
+		 *   +------------------------------------------------------+
+		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
+		 *   | Checksum | Ident  |         | Queue |      |  Type   |
+		 *   +------------------------------------------------------+
+		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+		 *   +------------------------------------------------------+
+		 *   63       48 47    32 31            20 19               0
+		 */
+		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
+		       "[vl   l0 ee  es] "
+		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
+		       "[bi->skb] <-- Ext Rx Write-Back format\n");
+		for (i = 0; i < rx_ring->count; i++) {
+			buffer_info = &rx_ring->buffer_info[i];
+			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+			u1 = (struct my_u1 *)rx_desc_ps;
+			staterr =
+			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+			if (staterr & E1000_RXD_STAT_DD) {
+				/* Descriptor Done */
+				printk(KERN_INFO "RWB[0x%03X]     %016llX "
+				       "%016llX %016llX %016llX "
+				       "---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       buffer_info->skb);
+			} else {
+				printk(KERN_INFO "R  [0x%03X]     %016llX "
+				       "%016llX %016llX %016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
+
+				if (netif_msg_pktdata(adapter))
+					print_hex_dump(KERN_INFO, "",
+						DUMP_PREFIX_ADDRESS, 16, 1,
+						phys_to_virt(buffer_info->dma),
+						adapter->rx_ps_bsize0, true);
+			}
+
+			if (i == rx_ring->next_to_use)
+				printk(KERN_CONT " NTU\n");
+			else if (i == rx_ring->next_to_clean)
+				printk(KERN_CONT " NTC\n");
+			else
+				printk(KERN_CONT "\n");
+		}
+		break;
+	default:
+	case 0:
+		/* Extended Receive Descriptor (Read) Format
+		 *
+		 *   +-----------------------------------------------------+
+		 * 0 |                Buffer Address [63:0]                |
+		 *   +-----------------------------------------------------+
+		 * 8 |                      Reserved                       |
+		 *   +-----------------------------------------------------+
+		 */
+		printk(KERN_INFO "R  [desc]      [buf addr 63:0 ] "
+		       "[reserved 63:0 ] [bi->dma       ] "
+		       "[bi->skb] <-- Ext (Read) format\n");
+		/* Extended Receive Descriptor (Write-Back) Format
+		 *
+		 *   63       48 47    32 31    24 23            4 3        0
+		 *   +------------------------------------------------------+
+		 *   |     RSS Hash      |        |               |         |
+		 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
+		 *   | Packet   | IP     |        |               |  Type   |
+		 *   | Checksum | Ident  |        |               |         |
+		 *   +------------------------------------------------------+
+		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+		 *   +------------------------------------------------------+
+		 *   63       48 47    32 31            20 19               0
+		 */
+		printk(KERN_INFO "RWB[desc]      [cs ipid    mrq] "
+		       "[vt   ln xe  xs] "
+		       "[bi->skb] <-- Ext (Write-Back) format\n");
+
+		for (i = 0; i < rx_ring->count; i++) {
+			buffer_info = &rx_ring->buffer_info[i];
+			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+			u1 = (struct my_u1 *)rx_desc;
+			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+			if (staterr & E1000_RXD_STAT_DD) {
+				/* Descriptor Done */
+				printk(KERN_INFO "RWB[0x%03X]     %016llX "
+				       "%016llX ---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       buffer_info->skb);
+			} else {
+				printk(KERN_INFO "R  [0x%03X]     %016llX "
+				       "%016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
+
+				if (netif_msg_pktdata(adapter))
+					print_hex_dump(KERN_INFO, "",
+						       DUMP_PREFIX_ADDRESS, 16,
+						       1,
+						       phys_to_virt
+						       (buffer_info->dma),
+						       adapter->rx_buffer_len,
+						       true);
+			}
+
+			if (i == rx_ring->next_to_use)
+				printk(KERN_CONT " NTU\n");
+			else if (i == rx_ring->next_to_clean)
+				printk(KERN_CONT " NTC\n");
+			else
+				printk(KERN_CONT "\n");
+		}
+	}
+
+exit:
+	return;
+}
+
+void e1000e_mod_watchdog_timer(rtdm_nrtsig_t *nrt_sig, void *data)
+{
+	struct timer_list *timer = data;
+
+	mod_timer(timer, jiffies + 1);
+}
+
+void e1000e_trigger_downshift(rtdm_nrtsig_t *nrt_sig, void *data)
+{
+	struct work_struct *downshift_task = data;
+
+	schedule_work(downshift_task);
+}
+
+/**
+ * e1000_desc_unused - calculate if we have unused descriptors
+ **/
+static int e1000_desc_unused(struct e1000_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:	receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+			      u32 csum, struct rtskb *skb)
+{
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
+
+	/* Ignore Checksum bit is set */
+	if (status & E1000_RXD_STAT_IXSM)
+		return;
+	/* TCP/UDP checksum error bit is set */
+	if (errors & E1000_RXD_ERR_TCPE) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+
+	/* TCP/UDP Checksum has not been calculated */
+	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+		return;
+
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (status & E1000_RXD_STAT_TCPCS) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else {
+		/*
+		 * IP fragment with UDP payload
+		 * Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		__sum16 sum = (__force __sum16)htons(csum);
+		skb->csum = csum_unfold(~sum);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
+ * @hw: pointer to the HW structure
+ * @tail: address of tail descriptor register
+ * @i: value to write to tail descriptor register
+ *
+ * When updating the tail register, the ME could be accessing Host CSR
+ * registers at the same time.  Normally, this is handled in h/w by an
+ * arbiter but on some parts there is a bug that acknowledges Host accesses
+ * later than it should which could result in the descriptor register to
+ * have an incorrect value.  Workaround this by checking the FWSM register
+ * which has bit 24 set while ME is accessing Host CSR registers, wait
+ * if it is set and try again a number of times.
+ **/
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+					unsigned int i)
+{
+	unsigned int j = 0;
+
+	while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
+	       (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
+		udelay(50);
+
+	writel(i, tail);
+
+	if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
+		return E1000_ERR_SWFW_SYNC;
+
+	return 0;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (e1000e_update_tail_wa(hw, tail, i)) {
+		u32 rctl = er32(RCTL);
+		ew32(RCTL, rctl & ~E1000_RCTL_EN);
+		e_err("ME firmware caused invalid RDT - resetting\n");
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+	}
+}
+
+static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (e1000e_update_tail_wa(hw, tail, i)) {
+		u32 tctl = er32(TCTL);
+		ew32(TCTL, tctl & ~E1000_TCTL_EN);
+		e_err("ME firmware caused invalid TDT - resetting\n");
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+	}
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   int cleaned_count, gfp_t gfp)
+{
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	union e1000_rx_desc_extended *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct rtskb *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			rtskb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		skb = rtnetdev_alloc_rtskb(adapter->netdev, bufsz);
+		if (!skb) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+		rtskb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+map_skb:
+		buffer_info->dma = rtskb_data_dma_addr(skb, 0);
+
+		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+			/*
+			 * Force memory writes to complete before letting h/w
+			 * know there are new descriptors to fetch.  (Only
+			 * applicable for weak-ordered memory model archs,
+			 * such as IA-64).
+			 */
+			wmb();
+			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+				e1000e_update_rdt_wa(adapter, i);
+			else
+				writel(i, adapter->hw.hw_addr + rx_ring->tail);
+		}
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       nanosecs_abs_t *time_stamp)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	union e1000_rx_desc_extended *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	u32 length, staterr;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool data_received = false;
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		struct rtskb *skb;
+
+		rmb();	/* read descriptor and rx_buffer_info after status DD */
+
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned_count++;
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->wb.upper.length);
+
+		/*
+		 * !EOP means multiple descriptors were used to store a single
+		 * packet, if that's the case we need to toss it.  In fact, we
+		 * need to toss every packet with the EOP bit clear and the
+		 * next frame that _does_ have the EOP bit set, as it is by
+		 * definition only a frame fragment
+		 */
+		if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
+			adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+			/* All receives must fit into a single buffer */
+			e_dbg("Receive packet consumed multiple buffers\n");
+			/* recycle */
+			buffer_info->skb = skb;
+			if (staterr & E1000_RXD_STAT_EOP)
+				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+			goto next_desc;
+		}
+
+		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		/* adjust length to remove Ethernet CRC */
+		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+			length -= 4;
+
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		rtskb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter, staterr,
+				  le16_to_cpu(rx_desc->wb.lower.hi_dword.
+					      csum_ip.csum), skb);
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+		skb->time_stamp = *time_stamp;
+		rtnetif_rx(skb);
+		data_received = true;
+
+next_desc:
+		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+			adapter->alloc_rx_buf(adapter, cleaned_count,
+					      GFP_ATOMIC);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = e1000_desc_unused(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->total_rx_packets += total_rx_packets;
+	return data_received;
+}
+
+static void e1000_put_txbuf(struct e1000_adapter *adapter,
+			     struct e1000_buffer *buffer_info)
+{
+	buffer_info->dma = 0;
+	if (buffer_info->skb) {
+		kfree_rtskb(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	buffer_info->time_stamp = 0;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	unsigned int count = 0;
+	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+	       (count < tx_ring->count)) {
+		bool cleaned = false;
+		rmb(); /* read buffer_info after eop_desc */
+		for (; !cleaned; count++) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			if (cleaned) {
+				total_tx_packets += buffer_info->segs;
+				total_tx_bytes += buffer_info->bytecount;
+			}
+
+			e1000_put_txbuf(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+
+		if (i == tx_ring->next_to_use)
+			break;
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (count && rtnetif_carrier_ok(netdev) &&
+	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+
+		if (rtnetif_queue_stopped(netdev) &&
+		    !(test_bit(__E1000_DOWN, &adapter->state))) {
+			rtnetif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (adapter->detect_tx_hung) {
+		/*
+		 * Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
+		adapter->detect_tx_hung = 0;
+		if (tx_ring->buffer_info[i].time_stamp &&
+		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+			       + (adapter->tx_timeout_factor * HZ)) &&
+		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+			rtnetif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	return count < tx_ring->count;
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ **/
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		buffer_info->dma = 0;
+
+		if (buffer_info->skb) {
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	/* there also may be some cached data from a chained receive */
+	if (rx_ring->rx_skb_top) {
+		kfree_rtskb(rx_ring->rx_skb_top);
+		rx_ring->rx_skb_top = NULL;
+	}
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->head);
+	writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+					struct e1000_adapter, downshift_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr_msi(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	struct e1000_hw *hw = &adapter->hw;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	u32 icr = er32(ICR);
+
+	/*
+	 * read ICR disables interrupts using IAM
+	 */
+
+	if (icr & E1000_ICR_LSC) {
+		hw->mac.get_link_status = 1;
+		/*
+		 * ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers
+		 */
+		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+		    (!(er32(STATUS) & E1000_STATUS_LU)))
+			rtdm_schedule_nrt_work(&adapter->downshift_task);
+
+		/*
+		 * 80003ES2LAN workaround-- For packet buffer work-around on
+		 * link down event; disable receives here in the ISR and reset
+		 * adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(adapter->netdev) &&
+		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
+			/* disable receives */
+			u32 rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			rtdm_nrtsig_pend(&adapter->mod_timer_sig);
+	}
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, adapter->tx_ring->ims_val);
+
+	if (e1000_clean_rx_irq(adapter, &time_stamp))
+		rt_mark_stack_mgr(adapter->netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	struct e1000_hw *hw = &adapter->hw;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	u32 rctl, icr = er32(ICR);
+
+	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
+		return RTDM_IRQ_NONE;  /* Not our interrupt */
+
+	/*
+	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt
+	 */
+	if (!(icr & E1000_ICR_INT_ASSERTED))
+		return RTDM_IRQ_NONE;
+
+	/*
+	 * Interrupt Auto-Mask...upon reading ICR,
+	 * interrupts are masked.  No need for the
+	 * IMC write
+	 */
+
+	if (icr & E1000_ICR_LSC) {
+		hw->mac.get_link_status = 1;
+		/*
+		 * ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers
+		 */
+		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+		    (!(er32(STATUS) & E1000_STATUS_LU)))
+			rtdm_nrtsig_pend(&adapter->downshift_sig);
+
+		/*
+		 * 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(adapter->netdev) &&
+		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
+			/* disable receives */
+			rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			rtdm_nrtsig_pend(&adapter->mod_timer_sig);
+	}
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, adapter->tx_ring->ims_val);
+
+	if (e1000_clean_rx_irq(adapter, &time_stamp))
+		rt_mark_stack_mgr(adapter->netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_msix_other(int irq, void *data)
+{
+	struct rtnet_device *netdev = data;
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	if (!(icr & E1000_ICR_INT_ASSERTED)) {
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			ew32(IMS, E1000_IMS_OTHER);
+		return IRQ_NONE;
+	}
+
+	if (icr & adapter->eiac_mask)
+		ew32(ICS, (icr & adapter->eiac_mask));
+
+	if (icr & E1000_ICR_OTHER) {
+		if (!(icr & E1000_ICR_LSC))
+			goto no_link_interrupt;
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+no_link_interrupt:
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+
+	return IRQ_HANDLED;
+}
+
+
+static int e1000_intr_msix_tx(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+
+
+	adapter->total_tx_bytes = 0;
+	adapter->total_tx_packets = 0;
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, tx_ring->ims_val);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int e1000_intr_msix_rx(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	/* Write the ITR value calculated at the end of the
+	 * previous interrupt.
+	 */
+	if (adapter->rx_ring->set_itr) {
+		writel(1000000000 / (adapter->rx_ring->itr_val * 256),
+		       adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+		adapter->rx_ring->set_itr = 0;
+	}
+
+	if (e1000_clean_rx_irq(adapter, &time_stamp))
+		rt_mark_stack_mgr(adapter->netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_configure_msix - Configure MSI-X hardware
+ *
+ * e1000_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void e1000_configure_msix(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	int vector = 0;
+	u32 ctrl_ext, ivar = 0;
+
+	adapter->eiac_mask = 0;
+
+	/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
+	if (hw->mac.type == e1000_82574) {
+		u32 rfctl = er32(RFCTL);
+		rfctl |= E1000_RFCTL_ACK_DIS;
+		ew32(RFCTL, rfctl);
+	}
+
+#define E1000_IVAR_INT_ALLOC_VALID	0x8
+	/* Configure Rx vector */
+	rx_ring->ims_val = E1000_IMS_RXQ0;
+	adapter->eiac_mask |= rx_ring->ims_val;
+	if (rx_ring->itr_val)
+		writel(1000000000 / (rx_ring->itr_val * 256),
+		       hw->hw_addr + rx_ring->itr_register);
+	else
+		writel(1, hw->hw_addr + rx_ring->itr_register);
+	ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
+
+	/* Configure Tx vector */
+	tx_ring->ims_val = E1000_IMS_TXQ0;
+	vector++;
+	if (tx_ring->itr_val)
+		writel(1000000000 / (tx_ring->itr_val * 256),
+		       hw->hw_addr + tx_ring->itr_register);
+	else
+		writel(1, hw->hw_addr + tx_ring->itr_register);
+	adapter->eiac_mask |= tx_ring->ims_val;
+	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
+
+	/* set vector for Other Causes, e.g. link changes */
+	vector++;
+	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
+	if (rx_ring->itr_val)
+		writel(1000000000 / (rx_ring->itr_val * 256),
+		       hw->hw_addr + E1000_EITR_82574(vector));
+	else
+		writel(1, hw->hw_addr + E1000_EITR_82574(vector));
+
+	/* Cause Tx interrupts on every write back */
+	ivar |= (1 << 31);
+
+	ew32(IVAR, ivar);
+
+	/* enable MSI-X PBA support */
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
+
+	/* Auto-Mask Other interrupts upon ICR read */
+#define E1000_EIAC_MASK_82574   0x01F00000
+	ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
+	ctrl_ext |= E1000_CTRL_EXT_EIAME;
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+}
+
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
+{
+	if (adapter->msix_entries) {
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~FLAG_MSI_ENABLED;
+	}
+}
+
+/**
+ * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
+{
+	int err;
+	int i;
+
+	switch (adapter->int_mode) {
+	case E1000E_INT_MODE_MSIX:
+		if (adapter->flags & FLAG_HAS_MSIX) {
+			adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+			adapter->msix_entries = kcalloc(adapter->num_vectors,
+						      sizeof(struct msix_entry),
+						      GFP_KERNEL);
+			if (adapter->msix_entries) {
+				for (i = 0; i < adapter->num_vectors; i++)
+					adapter->msix_entries[i].entry = i;
+
+				err = pci_enable_msix_range(adapter->pdev,
+							adapter->msix_entries,
+							adapter->num_vectors,
+							adapter->num_vectors);
+				if (err == 0)
+					return;
+			}
+			/* MSI-X failed, so fall through and try MSI */
+			e_err("Failed to initialize MSI-X interrupts.  "
+			      "Falling back to MSI interrupts.\n");
+			e1000e_reset_interrupt_capability(adapter);
+		}
+		adapter->int_mode = E1000E_INT_MODE_MSI;
+		fallthrough;
+	case E1000E_INT_MODE_MSI:
+		if (!pci_enable_msi(adapter->pdev)) {
+			adapter->flags |= FLAG_MSI_ENABLED;
+		} else {
+			adapter->int_mode = E1000E_INT_MODE_LEGACY;
+			e_err("Failed to initialize MSI interrupts.  Falling "
+			      "back to legacy interrupts.\n");
+		}
+		fallthrough;
+	case E1000E_INT_MODE_LEGACY:
+		/* Don't do anything; this is the system default */
+		break;
+	}
+
+	/* store the number of vectors being used */
+	adapter->num_vectors = 1;
+}
+
+/**
+ * e1000_request_msix - Initialize MSI-X interrupts
+ *
+ * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int e1000_request_msix(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int err = 0, vector = 0;
+
+	if (strlen(netdev->name) < (IFNAMSIZ - 5))
+		snprintf(adapter->rx_ring->name,
+			 sizeof(adapter->rx_ring->name) - 1,
+			 "%s-rx-0", netdev->name);
+	else
+		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+	err = rtdm_irq_request(&adapter->rx_irq_handle,
+			       adapter->msix_entries[vector].vector,
+			       e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+			       adapter);
+	if (err)
+		goto out;
+	adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
+	adapter->rx_ring->itr_val = adapter->itr;
+	vector++;
+
+	if (strlen(netdev->name) < (IFNAMSIZ - 5))
+		snprintf(adapter->tx_ring->name,
+			 sizeof(adapter->tx_ring->name) - 1,
+			 "%s-tx-0", netdev->name);
+	else
+		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+	err = rtdm_irq_request(&adapter->tx_irq_handle,
+			       adapter->msix_entries[vector].vector,
+			       e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+			       adapter);
+	if (err)
+		goto out;
+	adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
+	adapter->tx_ring->itr_val = adapter->itr;
+	vector++;
+
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  e1000_msix_other, 0, netdev->name, netdev);
+	if (err)
+		goto out;
+
+	e1000_configure_msix(adapter);
+	return 0;
+out:
+	return err;
+}
+
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int err;
+
+	if (adapter->msix_entries) {
+		err = e1000_request_msix(adapter);
+		if (!err)
+			return err;
+		/* fall back to MSI */
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_MSI;
+		e1000e_set_interrupt_capability(adapter);
+	}
+	if (adapter->flags & FLAG_MSI_ENABLED) {
+		err = rtdm_irq_request(&adapter->irq_handle,
+				       adapter->pdev->irq, e1000_intr_msi,
+				       0, netdev->name, adapter);
+		if (!err)
+			return err;
+
+		/* fall back to legacy interrupt */
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
+	}
+
+	err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq,
+			       e1000_intr, 0, netdev->name, adapter);
+	if (err)
+		e_err("Unable to allocate interrupt, Error: %d\n", err);
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+
+	if (adapter->msix_entries) {
+		int vector = 0;
+
+		rtdm_irq_disable(&adapter->rx_irq_handle);
+		rtdm_irq_free(&adapter->rx_irq_handle);
+		vector++;
+
+		rtdm_irq_disable(&adapter->tx_irq_handle);
+		rtdm_irq_free(&adapter->tx_irq_handle);
+		vector++;
+
+		/* Other Causes interrupt vector */
+		free_irq(adapter->msix_entries[vector].vector, netdev);
+		return;
+	}
+
+	if (adapter->flags & FLAG_MSI_ENABLED)
+		rtdm_irq_disable(&adapter->irq_handle);
+	rtdm_irq_free(&adapter->irq_handle);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	ew32(IMC, ~0);
+	if (adapter->msix_entries)
+		ew32(EIAC_82574, 0);
+	e1e_flush();
+
+	if (adapter->msix_entries) {
+		int i;
+		for (i = 0; i < adapter->num_vectors; i++)
+			synchronize_irq(adapter->msix_entries[i].vector);
+	} else {
+		synchronize_irq(adapter->pdev->irq);
+	}
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ **/
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->msix_entries) {
+		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
+		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+	} else {
+		ew32(IMS, IMS_ENABLE_MASK);
+	}
+	e1e_flush();
+}
+
+/**
+ * e1000e_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ **/
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware know the driver has taken over */
+	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+	}
+}
+
+/**
+ * e1000e_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware taken over control of h/w */
+	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+	}
+}
+
+/**
+ * @e1000_alloc_ring - allocate memory for a ring structure
+ **/
+static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
+				struct e1000_ring *ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+					GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	int err = -ENOMEM, size;
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	tx_ring->buffer_info = vzalloc(size);
+	if (!tx_ring->buffer_info)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	err = e1000_alloc_ring_dma(adapter, tx_ring);
+	if (err)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	return 0;
+err:
+	vfree(tx_ring->buffer_info);
+	e_err("Unable to allocate memory for the transmit descriptor ring\n");
+	return err;
+}
+
+/**
+ * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	int size, desc_len, err = -ENOMEM;
+
+	size = sizeof(struct e1000_buffer) * rx_ring->count;
+	rx_ring->buffer_info = vzalloc(size);
+	if (!rx_ring->buffer_info)
+		goto err;
+
+	desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * desc_len;
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	err = e1000_alloc_ring_dma(adapter, rx_ring);
+	if (err)
+		goto err;
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->rx_skb_top = NULL;
+
+	return 0;
+
+err:
+	vfree(rx_ring->buffer_info);
+	e_err("Unable to allocate memory for the receive descriptor ring\n");
+	return err;
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_put_txbuf(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->head);
+	writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * e1000e_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void e1000e_free_tx_resources(struct e1000_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+
+	e1000_clean_tx_ring(adapter);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000e_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000e_free_rx_resources(struct e1000_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	int i;
+
+	e1000_clean_rx_ring(adapter);
+
+	for (i = 0; i < rx_ring->count; i++)
+		kfree(rx_ring->buffer_info[i].ps_pages);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		goto err;
+
+	rtdm_lock_init(&adapter->tx_ring->lock);
+
+	adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
+		goto err;
+
+	return 0;
+err:
+	e_err("Unable to allocate memory for queues\n");
+	kfree(adapter->rx_ring);
+	kfree(adapter->tx_ring);
+	return -ENOMEM;
+}
+
+static void e1000_vlan_rx_add_vid(struct rtnet_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	/* don't update vlan cookie if already programmed */
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+
+	/* add VID to filter table */
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		index = (vid >> 5) & 0x7F;
+		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+		vfta |= (1 << (vid & 0x1F));
+		hw->mac.ops.write_vfta(hw, index, vfta);
+	}
+
+	set_bit(vid, adapter->active_vlans);
+}
+
+static void e1000_vlan_rx_kill_vid(struct rtnet_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		e1000e_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		index = (vid >> 5) & 0x7F;
+		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+		vfta &= ~(1 << (vid & 0x1F));
+		hw->mac.ops.write_vfta(hw, index, vfta);
+	}
+
+	clear_bit(vid, adapter->active_vlans);
+}
+
+/**
+ * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		/* disable VLAN receive filtering */
+		rctl = er32(RCTL);
+		rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
+		ew32(RCTL, rctl);
+
+		if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+			e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+		}
+	}
+}
+
+/**
+ * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		/* enable VLAN receive filtering */
+		rctl = er32(RCTL);
+		rctl |= E1000_RCTL_VFE;
+		rctl &= ~E1000_RCTL_CFIEN;
+		ew32(RCTL, rctl);
+	}
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	/* disable VLAN tag insert/strip */
+	ctrl = er32(CTRL);
+	ctrl &= ~E1000_CTRL_VME;
+	ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	/* enable VLAN tag insert/strip */
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_VME;
+	ew32(CTRL, ctrl);
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+
+	if (adapter->hw.mng_cookie.status &
+	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		e1000_vlan_rx_add_vid(netdev, vid);
+		adapter->mng_vlan_id = vid;
+	}
+
+	if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+		e1000_vlan_rx_kill_vid(netdev, old_vid);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+	u16 vid;
+
+	e1000_vlan_rx_add_vid(adapter->netdev, 0);
+
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+		e1000_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 manc, manc2h, mdef, i, j;
+
+	if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
+		return;
+
+	manc = er32(MANC);
+
+	/*
+	 * enable receiving management packets to the host. this will probably
+	 * generate destination unreachable messages from the host OS, but
+	 * the packets will be handled on SMBUS
+	 */
+	manc |= E1000_MANC_EN_MNG2HOST;
+	manc2h = er32(MANC2H);
+
+	switch (hw->mac.type) {
+	default:
+		manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		/*
+		 * Check if IPMI pass-through decision filter already exists;
+		 * if so, enable it.
+		 */
+		for (i = 0, j = 0; i < 8; i++) {
+			mdef = er32(MDEF(i));
+
+			/* Ignore filters with anything other than IPMI ports */
+			if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+				continue;
+
+			/* Enable this decision filter in MANC2H */
+			if (mdef)
+				manc2h |= (1 << i);
+
+			j |= mdef;
+		}
+
+		if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+			break;
+
+		/* Create new decision filter in an empty filter */
+		for (i = 0, j = 0; i < 8; i++)
+			if (er32(MDEF(i)) == 0) {
+				ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+					       E1000_MDEF_PORT_664));
+				manc2h |= (1 << 1);
+				j++;
+				break;
+			}
+
+		if (!j)
+			e_warn("Unable to create IPMI pass-through filter\n");
+		break;
+	}
+
+	ew32(MANC2H, manc2h);
+	ew32(MANC, manc);
+}
+
+/**
+ * e1000_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	u64 tdba;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	tdba = tx_ring->dma;
+	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
+	ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
+	ew32(TDBAH, (tdba >> 32));
+	ew32(TDLEN, tdlen);
+	ew32(TDH, 0);
+	ew32(TDT, 0);
+	tx_ring->head = E1000_TDH;
+	tx_ring->tail = E1000_TDT;
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+	tipg = DEFAULT_82543_TIPG_IPGT_COPPER;          /*  8  */
+	ipgr1 = DEFAULT_82543_TIPG_IPGR1;               /*  8  */
+	ipgr2 = DEFAULT_82543_TIPG_IPGR2;               /*  6  */
+
+	if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /*  7  */
+
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	ew32(TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+	ew32(TIDV, adapter->tx_int_delay);
+	/* Tx irq moderation */
+	ew32(TADV, adapter->tx_abs_int_delay);
+
+	if (adapter->flags2 & FLAG2_DMA_BURST) {
+		u32 txdctl = er32(TXDCTL(0));
+		txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
+			    E1000_TXDCTL_WTHRESH);
+		/*
+		 * set up some performance related parameters to encourage the
+		 * hardware to use the bus more efficiently in bursts, depends
+		 * on the tx_int_delay to be enabled,
+		 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
+		 * hthresh = 1 ==> prefetch when one or more available
+		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+		 * BEWARE: this seems to work but should be considered first if
+		 * there are Tx hangs or other Tx related bugs
+		 */
+		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
+		ew32(TXDCTL(0), txdctl);
+		/* erratum work around: set txdctl the same for both queues */
+		ew32(TXDCTL(1), txdctl);
+	}
+
+	/* Program the Transmit Control Register */
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
+		tarc = er32(TARC(0));
+		/*
+		 * set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later
+		 */
+#define SPEED_MODE_BIT (1 << 21)
+		tarc |= SPEED_MODE_BIT;
+		ew32(TARC(0), tarc);
+	}
+
+	/* errata: program both queues to unweighted RR */
+	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
+		tarc = er32(TARC(0));
+		tarc |= 1;
+		ew32(TARC(0), tarc);
+		tarc = er32(TARC(1));
+		tarc |= 1;
+		ew32(TARC(1), tarc);
+	}
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+	/* enable Report Status bit */
+	adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	ew32(TCTL, tctl);
+
+	e1000e_config_collision_dist(hw);
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			   (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl, rfctl;
+
+	/* Workaround Si errata on PCHx - configure jumbo frame flow */
+	if (hw->mac.type >= e1000_pch2lan) {
+		s32 ret_val;
+
+		if (adapter->netdev->mtu > ETH_DATA_LEN)
+			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+		else
+			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+		if (ret_val)
+			e_dbg("failed to enable jumbo frame workaround mode\n");
+	}
+
+	/* Program MC offset vector base */
+	rctl = er32(RCTL);
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* Do not Store bad packets */
+	rctl &= ~E1000_RCTL_SBP;
+
+	/* Enable Long Packet receive */
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Some systems expect that the CRC is included in SMBUS traffic. The
+	 * hardware strips the CRC before sending to both SMBUS (BMC) and to
+	 * host memory when this is enabled
+	 */
+	if (adapter->flags2 & FLAG2_CRC_STRIPPING)
+		rctl |= E1000_RCTL_SECRC;
+
+	/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+	if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+		u16 phy_data;
+
+		e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+		phy_data &= 0xfff8;
+		phy_data |= (1 << 2);
+		e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+		e1e_rphy(hw, 22, &phy_data);
+		phy_data &= 0x0fff;
+		phy_data |= (1 << 14);
+		e1e_wphy(hw, 0x10, 0x2823);
+		e1e_wphy(hw, 0x11, 0x0003);
+		e1e_wphy(hw, 22, phy_data);
+	}
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+	case 2048:
+	default:
+		rctl |= E1000_RCTL_SZ_2048;
+		rctl &= ~E1000_RCTL_BSEX;
+		break;
+	case 4096:
+		rctl |= E1000_RCTL_SZ_4096;
+		break;
+	case 8192:
+		rctl |= E1000_RCTL_SZ_8192;
+		break;
+	case 16384:
+		rctl |= E1000_RCTL_SZ_16384;
+		break;
+	}
+
+	/* Enable Extended Status in all Receive Descriptors */
+	rfctl = er32(RFCTL);
+	rfctl |= E1000_RFCTL_EXTEN;
+
+	adapter->rx_ps_pages = 0;
+
+	ew32(RFCTL, rfctl);
+	ew32(RCTL, rctl);
+	/* just started the receive unit, no need to restart */
+	adapter->flags &= ~FLAG_RX_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	u64 rdba;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+	rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+	adapter->clean_rx = e1000_clean_rx_irq;
+	adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+
+	/* disable receives while setting up the descriptors */
+	rctl = er32(RCTL);
+	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+		ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	e1e_flush();
+	usleep_range(10000, 20000);
+
+	if (adapter->flags2 & FLAG2_DMA_BURST) {
+		/*
+		 * set the writeback threshold (only takes effect if the RDTR
+		 * is set). set GRAN=1 and write back up to 0x4 worth, and
+		 * enable prefetching of 0x20 Rx descriptors
+		 * granularity = 01
+		 * wthresh = 04,
+		 * hthresh = 04,
+		 * pthresh = 0x20
+		 */
+		ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
+		ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
+
+		/*
+		 * override the delay timers for enabling bursting, only if
+		 * the value was not set by the user via module options
+		 */
+		if (adapter->rx_int_delay == DEFAULT_RDTR)
+			adapter->rx_int_delay = BURST_RDTR;
+		if (adapter->rx_abs_int_delay == DEFAULT_RADV)
+			adapter->rx_abs_int_delay = BURST_RADV;
+	}
+
+	/* set the Receive Delay Timer Register */
+	ew32(RDTR, adapter->rx_int_delay);
+
+	/* irq moderation */
+	ew32(RADV, adapter->rx_abs_int_delay);
+	if ((adapter->itr_setting != 0) && (adapter->itr != 0))
+		ew32(ITR, 1000000000 / (adapter->itr * 256));
+
+	ctrl_ext = er32(CTRL_EXT);
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	rdba = rx_ring->dma;
+	ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
+	ew32(RDBAH, (rdba >> 32));
+	ew32(RDLEN, rdlen);
+	ew32(RDH, 0);
+	ew32(RDT, 0);
+	rx_ring->head = E1000_RDH;
+	rx_ring->tail = E1000_RDT;
+
+	/* Enable Receive Checksum Offload for TCP and UDP */
+	rxcsum = er32(RXCSUM);
+	if (adapter->netdev->features & NETIF_F_RXCSUM) {
+		rxcsum |= E1000_RXCSUM_TUOFL;
+	} else {
+		rxcsum &= ~E1000_RXCSUM_TUOFL;
+		/* no need to clear IPPCSE as it defaults to 0 */
+	}
+	ew32(RXCSUM, rxcsum);
+
+	/* Enable Receives */
+	ew32(RCTL, rctl);
+}
+
+/**
+ *  e1000_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates the Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+				      u32 mc_addr_count)
+{
+	hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000_set_multi(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = er32(RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		rctl &= ~E1000_RCTL_VFE;
+		/* Do not hardware filter VLANs in promisc mode */
+		e1000e_vlan_filter_disable(adapter);
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+			rctl &= ~E1000_RCTL_UPE;
+		} else {
+			rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+		}
+		e1000e_vlan_filter_enable(adapter);
+	}
+
+	ew32(RCTL, rctl);
+
+	e1000_update_mc_addr_list(hw, NULL, 0);
+
+	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		e1000e_vlan_strip_enable(adapter);
+	else
+		e1000e_vlan_strip_disable(adapter);
+}
+
+/**
+ * e1000_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+	e1000_set_multi(adapter->netdev);
+
+	e1000_restore_vlan(adapter);
+	e1000_init_manageability_pt(adapter);
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
+			      GFP_KERNEL);
+}
+
+/**
+ * e1000e_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000e_reset ***
+ **/
+void e1000e_power_up_phy(struct e1000_adapter *adapter)
+{
+	if (adapter->hw.phy.ops.power_up)
+		adapter->hw.phy.ops.power_up(&adapter->hw);
+
+	adapter->hw.mac.ops.setup_link(&adapter->hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down the PHY
+ *
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
+ */
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+	/* WoL is enabled */
+	if (adapter->wol)
+		return;
+
+	if (adapter->hw.phy.ops.power_down)
+		adapter->hw.phy.ops.power_down(&adapter->hw);
+}
+
+/**
+ * e1000e_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+void e1000e_reset(struct e1000_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_fc_info *fc = &adapter->hw.fc;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tx_space, min_tx_space, min_rx_space;
+	u32 pba = adapter->pba;
+	u16 hwm;
+
+	/* reset Packet Buffer Allocation to default */
+	ew32(PBA, pba);
+
+	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+		/*
+		 * To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB.
+		 */
+		pba = er32(PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/*
+		 * the Tx fifo also stores 16 bytes of information about the Tx
+		 * but don't include ethernet FCS because hardware appends it
+		 */
+		min_tx_space = (adapter->max_frame_size +
+				sizeof(struct e1000_tx_desc) -
+				ETH_FCS_LEN) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = adapter->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/*
+		 * If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation
+		 */
+		if ((tx_space < min_tx_space) &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba -= min_tx_space - tx_space;
+
+			/*
+			 * if short on Rx space, Rx wins and must trump Tx
+			 * adjustment or use Early Receive if available
+			 */
+			if ((pba < min_rx_space) &&
+			    (!(adapter->flags & FLAG_HAS_ERT)))
+				/* ERT enabled in e1000_configure_rx */
+				pba = min_rx_space;
+		}
+
+		ew32(PBA, pba);
+	}
+
+	/*
+	 * flow control settings
+	 *
+	 * The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, and
+	 * - the full Rx FIFO size minus the early receive size (for parts
+	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
+	 * - the full Rx FIFO size minus one full frame
+	 */
+	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+		fc->pause_time = 0xFFFF;
+	else
+		fc->pause_time = E1000_FC_PAUSE_TIME;
+	fc->send_xon = 1;
+	fc->current_mode = fc->requested_mode;
+
+	switch (hw->mac.type) {
+	default:
+		if ((adapter->flags & FLAG_HAS_ERT) &&
+		    (adapter->netdev->mtu > ETH_DATA_LEN))
+			hwm = min(((pba << 10) * 9 / 10),
+				  ((pba << 10) - (E1000_ERT_2048 << 3)));
+		else
+			hwm = min(((pba << 10) * 9 / 10),
+				  ((pba << 10) - adapter->max_frame_size));
+
+		fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+		fc->low_water = fc->high_water - 8;
+		break;
+	case e1000_pchlan:
+		/*
+		 * Workaround PCH LOM adapter hangs with certain network
+		 * loads.  If hangs persist, try disabling Tx flow control.
+		 */
+		if (adapter->netdev->mtu > ETH_DATA_LEN) {
+			fc->high_water = 0x3500;
+			fc->low_water  = 0x1500;
+		} else {
+			fc->high_water = 0x5000;
+			fc->low_water  = 0x3000;
+		}
+		fc->refresh_time = 0x1000;
+		break;
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		fc->high_water = 0x05C20;
+		fc->low_water = 0x05048;
+		fc->pause_time = 0x0650;
+		fc->refresh_time = 0x0400;
+		if (adapter->netdev->mtu > ETH_DATA_LEN) {
+			pba = 14;
+			ew32(PBA, pba);
+		}
+		break;
+	}
+
+	/*
+	 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+	 * fit in receive buffer and early-receive not supported.
+	 */
+	if (adapter->itr_setting & 0x3) {
+		if (((adapter->max_frame_size * 2) > (pba << 10)) &&
+		    !(adapter->flags & FLAG_HAS_ERT)) {
+			if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
+				dev_info(&adapter->pdev->dev,
+					"Interrupt Throttle Rate turned off\n");
+				adapter->flags2 |= FLAG2_DISABLE_AIM;
+				ew32(ITR, 0);
+			}
+		} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+			dev_info(&adapter->pdev->dev,
+				 "Interrupt Throttle Rate turned on\n");
+			adapter->flags2 &= ~FLAG2_DISABLE_AIM;
+			adapter->itr = 20000;
+			ew32(ITR, 1000000000 / (adapter->itr * 256));
+		}
+	}
+
+	/* Allow time for pending master requests to run */
+	mac->ops.reset_hw(hw);
+
+	/*
+	 * For parts with AMT enabled, let the firmware know
+	 * that the network interface is in control
+	 */
+	if (adapter->flags & FLAG_HAS_AMT)
+		e1000e_get_hw_control(adapter);
+
+	ew32(WUC, 0);
+
+	if (mac->ops.init_hw(hw))
+		e_err("Hardware Error\n");
+
+	e1000_update_mng_vlan(adapter);
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	ew32(VET, ETH_P_8021Q);
+
+	e1000e_reset_adaptive(hw);
+
+	if (!rtnetif_running(adapter->netdev) &&
+	    !test_bit(__E1000_TESTING, &adapter->state)) {
+		e1000_power_down_phy(adapter);
+		return;
+	}
+
+	e1000_get_phy_info(hw);
+
+	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+	    !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+		u16 phy_data = 0;
+		/*
+		 * speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed
+		 */
+		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+	}
+}
+
+int e1000e_up(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* hardware has been reset, we need to reload some things */
+	e1000_configure(adapter);
+
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	if (adapter->msix_entries)
+		e1000_configure_msix(adapter);
+	e1000_irq_enable(adapter);
+
+	rtnetif_start_queue(adapter->netdev);
+
+	/* fire a link change interrupt to start the watchdog */
+	if (adapter->msix_entries)
+		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+	else
+		ew32(ICS, E1000_ICS_LSC);
+
+	return 0;
+}
+
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags2 & FLAG2_DMA_BURST))
+		return;
+
+	/* flush pending descriptor writebacks to memory */
+	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+	/* execute the writes immediately */
+	e1e_flush();
+}
+
+void e1000e_down(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl, rctl;
+
+	/*
+	 * signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer
+	 */
+	set_bit(__E1000_DOWN, &adapter->state);
+
+	/* disable receives in the hardware */
+	rctl = er32(RCTL);
+	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+		ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+	rtnetif_stop_queue(netdev);
+
+	/* disable transmits in the hardware */
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	ew32(TCTL, tctl);
+
+	/* flush both disables and wait for them to finish */
+	e1e_flush();
+	usleep_range(10000, 20000);
+
+	e1000_irq_disable(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	rtnetif_carrier_off(netdev);
+
+	e1000e_flush_descriptors(adapter);
+	e1000_clean_tx_ring(adapter);
+	e1000_clean_rx_ring(adapter);
+
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	if (!pci_channel_offline(adapter->pdev))
+		e1000e_reset(adapter);
+
+	/*
+	 * TODO: for power management, we could drop the link and
+	 * pci_disable_device here.
+	 */
+}
+
+void e1000e_reinit_locked(struct e1000_adapter *adapter)
+{
+	might_sleep();
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	e1000e_down(adapter);
+	e1000e_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+
+	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+	adapter->rx_ps_bsize0 = 128;
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+	spin_lock_init(&adapter->stats64_lock);
+
+	e1000e_set_interrupt_capability(adapter);
+
+	if (e1000_alloc_queues(adapter))
+		return -ENOMEM;
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	e1000_irq_disable(adapter);
+
+	set_bit(__E1000_DOWN, &adapter->state);
+	return 0;
+}
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+	struct rtnet_device *netdev = data;
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	e_dbg("icr is %08X\n", icr);
+	if (icr & E1000_ICR_RXSEQ) {
+		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+		wmb();
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	int err;
+
+	/* poll_enable hasn't been called yet, so don't need disable */
+	/* clear any pending events */
+	er32(ICR);
+
+	/* free the real vector and request a test handler */
+	e1000_free_irq(adapter);
+	e1000e_reset_interrupt_capability(adapter);
+
+	/* Assume that the test fails, if it succeeds then the test
+	 * MSI irq handler will unset this flag */
+	adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+	err = pci_enable_msi(adapter->pdev);
+	if (err)
+		goto msi_test_failed;
+
+	err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
+			  netdev->name, netdev);
+	if (err) {
+		pci_disable_msi(adapter->pdev);
+		goto msi_test_failed;
+	}
+
+	wmb();
+
+	e1000_irq_enable(adapter);
+
+	/* fire an unusual interrupt on the test handler */
+	ew32(ICS, E1000_ICS_RXSEQ);
+	e1e_flush();
+	msleep(50);
+
+	e1000_irq_disable(adapter);
+
+	rmb();
+
+	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
+		e_info("MSI interrupt test failed, using legacy interrupt.\n");
+	} else
+		e_dbg("MSI interrupt test succeeded!\n");
+
+	free_irq(adapter->pdev->irq, netdev);
+	pci_disable_msi(adapter->pdev);
+
+msi_test_failed:
+	e1000e_set_interrupt_capability(adapter);
+	return e1000_request_irq(adapter);
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+	int err;
+	u16 pci_cmd;
+
+	if (!(adapter->flags & FLAG_MSI_ENABLED))
+		return 0;
+
+	/* disable SERR in case the MSI write causes a master abort */
+	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+	if (pci_cmd & PCI_COMMAND_SERR)
+		pci_write_config_word(adapter->pdev, PCI_COMMAND,
+				      pci_cmd & ~PCI_COMMAND_SERR);
+
+	err = e1000_test_msi_interrupt(adapter);
+
+	/* re-enable SERR */
+	if (pci_cmd & PCI_COMMAND_SERR) {
+		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+		pci_cmd |= PCI_COMMAND_SERR;
+		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+	}
+
+	return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__E1000_TESTING, &adapter->state))
+		return -EBUSY;
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	rtnetif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = e1000e_setup_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = e1000e_setup_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	/*
+	 * If AMT is enabled, let the firmware know that the network
+	 * interface is now open and reset the part to a known state.
+	 */
+	if (adapter->flags & FLAG_HAS_AMT) {
+		e1000e_get_hw_control(adapter);
+		e1000e_reset(adapter);
+	}
+
+	e1000e_power_up_phy(adapter);
+
+	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+		e1000_update_mng_vlan(adapter);
+
+	/*
+	 * before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.
+	 */
+	e1000_configure(adapter);
+
+	rt_stack_connect(netdev, &STACK_manager);
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/*
+	 * Work around PCIe errata with MSI interrupts causing some chipsets to
+	 * ignore e1000e MSI messages, which means we need to test our MSI
+	 * interrupt now
+	 */
+	if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
+		err = e1000_test_msi(adapter);
+		if (err) {
+			e_err("Interrupt allocation failed\n");
+			goto err_req_irq;
+		}
+	}
+
+	/* From here on the code is the same as e1000e_up() */
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	e1000_irq_enable(adapter);
+
+	rtnetif_start_queue(netdev);
+
+	adapter->idle_check = true;
+	pm_runtime_put(&pdev->dev);
+
+	/* fire a link status change interrupt to start the watchdog */
+	if (adapter->msix_entries)
+		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+	else
+		ew32(ICS, E1000_ICS_LSC);
+
+	return 0;
+
+err_req_irq:
+	e1000e_release_hw_control(adapter);
+	e1000_power_down_phy(adapter);
+	e1000e_free_rx_resources(adapter);
+err_setup_rx:
+	e1000e_free_tx_resources(adapter);
+err_setup_tx:
+	e1000e_reset(adapter);
+	pm_runtime_put_sync(&pdev->dev);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct pci_dev *pdev = adapter->pdev;
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	if (!test_bit(__E1000_DOWN, &adapter->state)) {
+		e1000e_down(adapter);
+		e1000_free_irq(adapter);
+	}
+	e1000_power_down_phy(adapter);
+
+	rt_stack_disconnect(netdev);
+
+	e1000e_free_tx_resources(adapter);
+	e1000e_free_rx_resources(adapter);
+
+	/*
+	 * kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it)
+	 */
+	if (adapter->hw.mng_cookie.status &
+	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+
+	/*
+	 * If AMT is enabled, let the firmware know that the network
+	 * interface is now closed
+	 */
+	if ((adapter->flags & FLAG_HAS_AMT) &&
+	    !test_bit(__E1000_TESTING, &adapter->state))
+		e1000e_release_hw_control(adapter);
+
+	pm_runtime_put_sync(&pdev->dev);
+
+	return 0;
+}
+
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+					struct e1000_adapter, update_phy_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	e1000_get_phy_info(&adapter->hw);
+}
+
+/*
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void e1000_update_phy_info(struct timer_list *t)
+{
+	struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+#else /* < 4.14 */
+static void e1000_update_phy_info(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+#endif /* < 4.14 */
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	rtdm_schedule_nrt_work(&adapter->update_phy_task);
+}
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_phy_regs *phy = &adapter->phy_regs;
+
+	if ((er32(STATUS) & E1000_STATUS_LU) &&
+	    (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+		int ret_val;
+
+		ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
+		ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
+		ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
+		ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
+		ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
+		ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
+		ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
+		ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+		if (ret_val)
+			e_warn("Error reading PHY register\n");
+	} else {
+		/*
+		 * Do not read PHY registers if link is not up
+		 * Set values to typical power-on defaults
+		 */
+		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+			     BMSR_ERCAP);
+		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+				  ADVERTISE_ALL | ADVERTISE_CSMA);
+		phy->lpa = 0;
+		phy->expansion = EXPANSION_ENABLENPAGE;
+		phy->ctrl1000 = ADVERTISE_1000FULL;
+		phy->stat1000 = 0;
+		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+	}
+}
+
+static void e1000_print_link_info(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl = er32(CTRL);
+
+	/* Link status message must follow this format for user tools */
+	printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
+	       "Flow Control: %s\n",
+	       adapter->netdev->name,
+	       adapter->link_speed,
+	       (adapter->link_duplex == FULL_DUPLEX) ?
+	       "Full Duplex" : "Half Duplex",
+	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
+	       "Rx/Tx" :
+	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+}
+
+static bool e1000e_has_link(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * get_link_status is set on LSC (link status) interrupt or
+	 * Rx sequence error interrupt.  get_link_status will stay
+	 * false until the check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (hw->mac.get_link_status) {
+			ret_val = hw->mac.ops.check_for_link(hw);
+			link_active = !hw->mac.get_link_status;
+		} else {
+			link_active = 1;
+		}
+		break;
+	case e1000_media_type_fiber:
+		ret_val = hw->mac.ops.check_for_link(hw);
+		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+		break;
+	case e1000_media_type_internal_serdes:
+		ret_val = hw->mac.ops.check_for_link(hw);
+		link_active = adapter->hw.mac.serdes_has_link;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+		e_info("Gigabit has been disabled, downgrading speed\n");
+	}
+
+	return link_active;
+}
+
+static void e1000e_enable_receives(struct e1000_adapter *adapter)
+{
+	/* make sure the receive unit is started */
+	if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+	    (adapter->flags & FLAG_RX_RESTART_NOW)) {
+		struct e1000_hw *hw = &adapter->hw;
+		u32 rctl = er32(RCTL);
+		ew32(RCTL, rctl | E1000_RCTL_EN);
+		adapter->flags &= ~FLAG_RX_RESTART_NOW;
+	}
+}
+
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/*
+	 * With 82574 controllers, PHY needs to be checked periodically
+	 * for hung state and reset, if two calls return true
+	 */
+	if (e1000_check_phy_82574(hw))
+		adapter->phy_hang_count++;
+	else
+		adapter->phy_hang_count = 0;
+
+	if (adapter->phy_hang_count > 1) {
+		adapter->phy_hang_count = 0;
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+	}
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void e1000_watchdog(struct timer_list *t)
+{
+	struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+#else /* < 4.14 */
+static void e1000_watchdog(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+#endif /* < 4.14 */
+
+	/* Do the rest outside of interrupt context */
+	rtdm_schedule_nrt_work(&adapter->watchdog_task);
+
+	/* TODO: make this use queue_delayed_work() */
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+					struct e1000_adapter, watchdog_task);
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_phy_info *phy = &adapter->hw.phy;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 link, tctl;
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	link = e1000e_has_link(adapter);
+	if ((rtnetif_carrier_ok(netdev)) && link) {
+		e1000e_enable_receives(adapter);
+		goto link_up;
+	}
+
+	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
+	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
+		e1000_update_mng_vlan(adapter);
+
+	if (link) {
+		if (!rtnetif_carrier_ok(netdev)) {
+			bool txb2b = 1;
+
+			/* update snapshot of PHY registers on LSC */
+			e1000_phy_read_status(adapter);
+			mac->ops.get_link_up_info(&adapter->hw,
+						   &adapter->link_speed,
+						   &adapter->link_duplex);
+			e1000_print_link_info(adapter);
+			/*
+			 * On supported PHYs, check for duplex mismatch only
+			 * if link has autonegotiated at 10/100 half
+			 */
+			if ((hw->phy.type == e1000_phy_igp_3 ||
+			     hw->phy.type == e1000_phy_bm) &&
+			    (hw->mac.autoneg == true) &&
+			    (adapter->link_speed == SPEED_10 ||
+			     adapter->link_speed == SPEED_100) &&
+			    (adapter->link_duplex == HALF_DUPLEX)) {
+				u16 autoneg_exp;
+
+				e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+
+				if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+					e_info("Autonegotiated half duplex but"
+					       " link partner cannot autoneg. "
+					       " Try forcing full duplex if "
+					       "link gets many collisions.\n");
+			}
+
+			/* adjust timeout factor according to speed/duplex */
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = 0;
+				adapter->tx_timeout_factor = 16;
+				break;
+			case SPEED_100:
+				txb2b = 0;
+				adapter->tx_timeout_factor = 10;
+				break;
+			}
+
+			/*
+			 * workaround: re-program speed mode bit after
+			 * link-up event
+			 */
+			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
+			    !txb2b) {
+				u32 tarc0;
+				tarc0 = er32(TARC(0));
+				tarc0 &= ~SPEED_MODE_BIT;
+				ew32(TARC(0), tarc0);
+			}
+
+			/*
+			 * disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues
+			 */
+			if (!(adapter->flags & FLAG_TSO_FORCE)) {
+				switch (adapter->link_speed) {
+				case SPEED_10:
+				case SPEED_100:
+					e_info("10/100 speed: disabling TSO\n");
+					netdev->features &= ~NETIF_F_TSO;
+					netdev->features &= ~NETIF_F_TSO6;
+					break;
+				case SPEED_1000:
+					netdev->features |= NETIF_F_TSO;
+					netdev->features |= NETIF_F_TSO6;
+					break;
+				default:
+					/* oops */
+					break;
+				}
+			}
+
+			/*
+			 * enable transmits in the hardware, need to do this
+			 * after setting TARC(0)
+			 */
+			tctl = er32(TCTL);
+			tctl |= E1000_TCTL_EN;
+			ew32(TCTL, tctl);
+
+			/*
+			 * Perform any post-link-up configuration before
+			 * reporting link up.
+			 */
+			if (phy->ops.cfg_on_link_up)
+				phy->ops.cfg_on_link_up(hw);
+
+			rtnetif_carrier_on(netdev);
+
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			/* Link status message must follow this format */
+			printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
+			       adapter->netdev->name);
+			rtnetif_carrier_off(netdev);
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+
+			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+				rtdm_schedule_nrt_work(&adapter->reset_task);
+		}
+	}
+
+link_up:
+	spin_lock(&adapter->stats64_lock);
+
+	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+	adapter->gorc_old = adapter->stats.gorc;
+	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+	adapter->gotc_old = adapter->stats.gotc;
+	spin_unlock(&adapter->stats64_lock);
+
+	e1000e_update_adaptive(&adapter->hw);
+
+	if (!rtnetif_carrier_ok(netdev) &&
+	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+		/*
+		 * We've lost link, so the controller stops DMA,
+		 * but we've got queued Tx work that's never going
+		 * to get done, so reset controller to flush Tx.
+		 * (Do the reset outside of interrupt context).
+		 */
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+		/* return immediately since reset is imminent */
+		return;
+	}
+
+	/* Simple mode for Interrupt Throttle Rate (ITR) */
+	if (adapter->itr_setting == 4) {
+		/*
+		 * Symmetric Tx/Rx gets a reduced ITR=2000;
+		 * Total asymmetrical Tx or Rx gets ITR=8000;
+		 * everyone else is between 2000-8000.
+		 */
+		u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+		u32 dif = (adapter->gotc > adapter->gorc ?
+			    adapter->gotc - adapter->gorc :
+			    adapter->gorc - adapter->gotc) / 10000;
+		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+		ew32(ITR, 1000000000 / (itr * 256));
+	}
+
+	/* Cause software interrupt to ensure Rx ring is cleaned */
+	if (adapter->msix_entries)
+		ew32(ICS, adapter->rx_ring->ims_val);
+	else
+		ew32(ICS, E1000_ICS_RXDMT0);
+
+	/* flush pending descriptors to memory before detecting Tx hang */
+	e1000e_flush_descriptors(adapter);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = 1;
+
+	/*
+	 * With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0]
+	 */
+	if (e1000e_get_laa_state_82571(hw))
+		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+
+	if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+		e1000e_check_82574_phy_workaround(adapter);
+
+	/* Reset the timer */
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer,
+			  round_jiffies(jiffies + 2 * HZ));
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+#define E1000_MAX_PER_TXD	8192
+#define E1000_MAX_TXD_PWR	12
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+			struct rtskb *skb, unsigned int first)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_buffer *buffer_info;
+	unsigned int offset = 0, size, i;
+
+	i = tx_ring->next_to_use;
+
+	buffer_info = &tx_ring->buffer_info[i];
+	size = skb->len;
+
+	buffer_info->length = size;
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
+	buffer_info->dma = rtskb_data_dma_addr(skb, offset);
+	buffer_info->mapped_as_page = false;
+
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[i].segs = 1;
+	tx_ring->buffer_info[i].bytecount = size;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return 1;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+			   int tx_flags, int count)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+
+	if (tx_flags & E1000_TX_FLAGS_CSUM) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	if (tx_flags & E1000_TX_FLAGS_VLAN) {
+		txd_lower |= E1000_TXD_CMD_VLE;
+		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+	}
+
+	i = tx_ring->next_to_use;
+
+	do {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	} while (--count > 0);
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	tx_ring->next_to_use = i;
+
+	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_tdt_wa(adapter, i);
+	else
+		writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
+	/*
+	 * we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems
+	 */
+	mmiowb();
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+				    struct rtskb *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	u16 length, offset;
+
+	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
+		return 0;
+
+	if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+		return 0;
+
+	{
+		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+		struct udphdr *udp;
+
+		if (ip->protocol != IPPROTO_UDP)
+			return 0;
+
+		udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
+		if (ntohs(udp->dest) != 67)
+			return 0;
+
+		offset = (u8 *)udp + 8 - skb->data;
+		length = skb->len - offset;
+		return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
+	}
+
+	return 0;
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	rtdm_lockctx_t context;
+	unsigned int first;
+	unsigned int tx_flags = 0;
+	int count = 0;
+
+	if (test_bit(__E1000_DOWN, &adapter->state)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	count++;
+
+	count += skb->len;
+
+	if (adapter->hw.mac.tx_pkt_filtering)
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	rtdm_lock_get_irqsave(&tx_ring->lock, context);
+
+	first = tx_ring->next_to_use;
+
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* if count is 0 then mapping error has occurred */
+	count = e1000_tx_map(adapter, skb, first);
+	if (count) {
+		e1000_tx_queue(adapter, tx_flags, count);
+		rtdm_lock_put_irqrestore(&tx_ring->lock, context);
+	} else {
+		tx_ring->buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+		rtdm_lock_put_irqrestore(&tx_ring->lock, context);
+		kfree_rtskb(skb);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter;
+	adapter = container_of(work, struct e1000_adapter, reset_task);
+
+	/* don't run the task if already down */
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
+		e1000e_dump(adapter);
+		e_err("Reset adapter\n");
+	}
+	e1000e_reinit_locked(adapter);
+}
+
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 i, mac_reg;
+	u16 phy_reg, wuc_enable;
+	int retval = 0;
+
+	/* copy MAC RARs to PHY RARs */
+	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+	retval = hw->phy.ops.acquire(hw);
+	if (retval) {
+		e_err("Could not acquire PHY\n");
+		return retval;
+	}
+
+	/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+	retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+	if (retval)
+		goto out;
+
+	/* copy MAC MTA to PHY MTA - only needed for pchlan */
+	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+		mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+					   (u16)((mac_reg >> 16) & 0xFFFF));
+	}
+
+	/* configure PHY Rx Control register */
+	hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
+	mac_reg = er32(RCTL);
+	if (mac_reg & E1000_RCTL_UPE)
+		phy_reg |= BM_RCTL_UPE;
+	if (mac_reg & E1000_RCTL_MPE)
+		phy_reg |= BM_RCTL_MPE;
+	phy_reg &= ~(BM_RCTL_MO_MASK);
+	if (mac_reg & E1000_RCTL_MO_3)
+		phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+				<< BM_RCTL_MO_SHIFT);
+	if (mac_reg & E1000_RCTL_BAM)
+		phy_reg |= BM_RCTL_BAM;
+	if (mac_reg & E1000_RCTL_PMCF)
+		phy_reg |= BM_RCTL_PMCF;
+	mac_reg = er32(CTRL);
+	if (mac_reg & E1000_CTRL_RFCE)
+		phy_reg |= BM_RCTL_RFCE;
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+
+	/* enable PHY wakeup in MAC register */
+	ew32(WUFC, wufc);
+	ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+
+	/* configure and enable PHY wakeup in PHY registers */
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+	/* activate PHY wakeup */
+	wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+	retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+	if (retval)
+		e_err("Could not set PHY Host Wakeup bit\n");
+out:
+	hw->phy.ops.release(hw);
+
+	return retval;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+			    bool runtime)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, ctrl_ext, rctl, status;
+	/* Runtime suspend should only enable wakeup for link changes */
+	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+	int retval = 0;
+
+	rtnetif_device_detach(netdev);
+
+	if (rtnetif_running(netdev)) {
+		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+		e1000e_down(adapter);
+		e1000_free_irq(adapter);
+	}
+	e1000e_reset_interrupt_capability(adapter);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	status = er32(STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		e1000_setup_rctl(adapter);
+		e1000_set_multi(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = er32(RCTL);
+			rctl |= E1000_RCTL_MPE;
+			ew32(RCTL, rctl);
+		}
+
+		ctrl = er32(CTRL);
+		/* advertise wake from D3Cold */
+		#define E1000_CTRL_ADVD3WUC 0x00100000
+		/* phy power management enable */
+		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+		ctrl |= E1000_CTRL_ADVD3WUC;
+		if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+			ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
+		ew32(CTRL, ctrl);
+
+		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+		    adapter->hw.phy.media_type ==
+		    e1000_media_type_internal_serdes) {
+			/* keep the laser running in D3 */
+			ctrl_ext = er32(CTRL_EXT);
+			ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
+			ew32(CTRL_EXT, ctrl_ext);
+		}
+
+		if (adapter->flags & FLAG_IS_ICH)
+			e1000_suspend_workarounds_ich8lan(&adapter->hw);
+
+		/* Allow time for pending master requests to run */
+		e1000e_disable_pcie_master(&adapter->hw);
+
+		if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+			/* enable wakeup by the PHY */
+			retval = e1000_init_phy_wakeup(adapter, wufc);
+			if (retval)
+				return retval;
+		} else {
+			/* enable wakeup by the MAC */
+			ew32(WUFC, wufc);
+			ew32(WUC, E1000_WUC_PME_EN);
+		}
+	} else {
+		ew32(WUC, 0);
+		ew32(WUFC, 0);
+	}
+
+	*enable_wake = !!wufc;
+
+	/* make sure adapter isn't asleep if manageability is enabled */
+	if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+	    (hw->mac.ops.check_mng_mode(hw)))
+		*enable_wake = true;
+
+	if (adapter->hw.phy.type == e1000_phy_igp_3)
+		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+
+	/*
+	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	e1000e_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
+{
+	if (sleep && wake) {
+		pci_prepare_to_sleep(pdev);
+		return;
+	}
+
+	pci_wake_from_d3(pdev, wake);
+	pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
+				    bool wake)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	/*
+	 * The pci-e switch on some quad port adapters will report a
+	 * correctable error when the MAC transitions from D0 to D3.  To
+	 * prevent this we need to mask off the correctable errors on the
+	 * downstream port of the pci-e switch.
+	 */
+	if (adapter->flags & FLAG_IS_QUAD_PORT) {
+		struct pci_dev *us_dev = pdev->bus->self;
+		int pos = pci_pcie_cap(us_dev);
+		u16 devctl;
+
+		pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
+		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
+				      (devctl & ~PCI_EXP_DEVCTL_CERE));
+
+		e1000_power_off(pdev, sleep, wake);
+
+		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
+	} else {
+		e1000_power_off(pdev, sleep, wake);
+	}
+}
+
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+	int pos;
+	u16 reg16;
+
+	/*
+	 * Both device and parent should have the same ASPM setting.
+	 * Disable ASPM in downstream component first and then upstream.
+	 */
+	pos = pci_pcie_cap(pdev);
+	pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+	reg16 &= ~state;
+	pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+	if (!pdev->bus->self)
+		return;
+
+	pos = pci_pcie_cap(pdev->bus->self);
+	pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+	reg16 &= ~state;
+	pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+	dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+		 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+		 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+	__e1000e_disable_aspm(pdev, state);
+}
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+	bool wake = false;
+
+	__e1000_shutdown(pdev, &wake, false);
+
+	if (system_state == SYSTEM_POWER_OFF)
+		e1000_complete_shutdown(pdev, false, wake);
+}
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	rtnetif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (rtnetif_running(netdev))
+		e1000e_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u16 aspm_disable_flag = 0;
+	int err;
+	pci_ers_result_t result;
+
+	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+		aspm_disable_flag = PCIE_LINK_STATE_L0S;
+	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+		aspm_disable_flag |= PCIE_LINK_STATE_L1;
+	if (aspm_disable_flag)
+		e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pdev->state_saved = true;
+		pci_restore_state(pdev);
+
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+
+		e1000e_reset(adapter);
+		ew32(WUS, ~0);
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	pci_aer_clear_nonfatal_status(pdev);
+
+	return result;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	e1000_init_manageability_pt(adapter);
+
+	if (rtnetif_running(netdev)) {
+		if (e1000e_up(adapter)) {
+			dev_err(&pdev->dev,
+				"can't bring device back up after reset\n");
+			return;
+		}
+	}
+
+	rtnetif_device_attach(netdev);
+
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
+	 * is up.  For all other cases, let the f/w know that the h/w is now
+	 * under the control of the driver.
+	 */
+	if (!(adapter->flags & FLAG_HAS_AMT))
+		e1000e_get_hw_control(adapter);
+
+}
+
+static void e1000_print_device_info(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct rtnet_device *netdev = adapter->netdev;
+	u32 ret_val;
+	u8 pba_str[E1000_PBANUM_LENGTH];
+
+	/* print bus type/speed/width info */
+	e_info("(PCI Express:2.5GT/s:%s) %pM\n",
+	       /* bus width */
+	       ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+		"Width x1"),
+	       /* MAC address */
+	       netdev->dev_addr);
+	e_info("Intel(R) PRO/%s Network Connection\n",
+	       (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
+	ret_val = e1000_read_pba_string_generic(hw, pba_str,
+						E1000_PBANUM_LENGTH);
+	if (ret_val)
+		strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
+	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+	       hw->mac.type, hw->phy.type, pba_str);
+}
+
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int ret_val;
+	u16 buf = 0;
+
+	if (hw->mac.type != e1000_82573)
+		return;
+
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+	if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
+		/* Deep Smart Power Down (DSPD) */
+		dev_warn(&adapter->pdev->dev,
+			 "Warning: detected DSPD enabled in EEPROM\n");
+	}
+}
+
+static dma_addr_t e1000_map_rtskb(struct rtnet_device *netdev,
+				  struct rtskb *skb)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+	dma_addr_t addr;
+
+	addr = dma_map_single(dev, skb->buf_start, RTSKB_SIZE,
+			      DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, addr)) {
+		dev_err(dev, "DMA map failed\n");
+		return RTSKB_UNMAPPED;
+	}
+	return addr;
+}
+
+static void e1000_unmap_rtskb(struct rtnet_device *netdev,
+			      struct rtskb *skb)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+
+	dma_unmap_single(dev, skb->buf_dma_addr, RTSKB_SIZE,
+			 DMA_BIDIRECTIONAL);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int e1000_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct rtnet_device *netdev;
+	struct e1000_adapter *adapter;
+	struct e1000_hw *hw;
+	const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
+	resource_size_t mmio_start, mmio_len;
+	resource_size_t flash_start, flash_len;
+
+	static int cards_found;
+	u16 aspm_disable_flag = 0;
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+
+	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
+		aspm_disable_flag = PCIE_LINK_STATE_L0S;
+	if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+		aspm_disable_flag |= PCIE_LINK_STATE_L1;
+	if (aspm_disable_flag)
+		e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	pci_using_dac = 0;
+	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	if (!err) {
+		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (!err)
+			pci_using_dac = 1;
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev, "No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+	}
+
+	err = pci_request_selected_regions_exclusive(pdev,
+					  pci_select_bars(pdev, IORESOURCE_MEM),
+					  e1000e_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	/* AER (Advanced Error Reporting) hooks */
+	pci_enable_pcie_error_reporting(pdev);
+
+	pci_set_master(pdev);
+	/* PCI config space info */
+	err = pci_save_state(pdev);
+	if (err)
+		goto err_alloc_etherdev;
+
+	err = -ENOMEM;
+	netdev = rt_alloc_etherdev(sizeof(*adapter),
+				2 * RT_E1000E_NUM_RXD + 256);
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	rtdev_alloc_name(netdev, "rteth%d");
+	rt_rtdev_connect(netdev, &RTDEV_manager);
+	netdev->vers = RTDEV_VERS_2_0;
+	netdev->sysbind = &pdev->dev;
+
+	netdev->irq = pdev->irq;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev->priv;
+	hw = &adapter->hw;
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->ei = ei;
+	adapter->pba = ei->pba;
+	adapter->flags = ei->flags;
+	adapter->flags2 = ei->flags2;
+	adapter->hw.adapter = adapter;
+	adapter->hw.mac.type = ei->mac;
+	adapter->max_hw_frame_size = ei->max_hw_frame_size;
+	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+
+	err = -EIO;
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr)
+		goto err_ioremap;
+
+	if ((adapter->flags & FLAG_HAS_FLASH) &&
+	    (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		flash_start = pci_resource_start(pdev, 1);
+		flash_len = pci_resource_len(pdev, 1);
+		adapter->hw.flash_address = ioremap(flash_start, flash_len);
+		if (!adapter->hw.flash_address)
+			goto err_flashmap;
+	}
+
+	/* construct the net_device struct */
+	netdev->open = e1000_open;
+	netdev->stop = e1000_close;
+	netdev->hard_start_xmit = e1000_xmit_frame;
+	//netdev->get_stats = e1000_get_stats;
+	netdev->map_rtskb = e1000_map_rtskb;
+	netdev->unmap_rtskb = e1000_unmap_rtskb;
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+
+	adapter->bd_number = cards_found++;
+
+	e1000e_check_options(adapter);
+
+	/* setup adapter struct */
+	err = e1000_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+	err = ei->get_variants(adapter);
+	if (err)
+		goto err_hw_init;
+
+	if ((adapter->flags & FLAG_IS_ICH) &&
+	    (adapter->flags & FLAG_READ_ONLY_NVM))
+		e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
+	hw->mac.ops.get_bus_info(&adapter->hw);
+
+	adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+	/* Copper options */
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		adapter->hw.phy.mdix = AUTO_ALL_MODES;
+		adapter->hw.phy.disable_polarity_correction = 0;
+		adapter->hw.phy.ms_type = e1000_ms_hw_default;
+	}
+
+	if (e1000_check_reset_block(&adapter->hw))
+		e_info("PHY reset is blocked due to SOL/IDER session.\n");
+
+	/* Set initial default active device features */
+	netdev->features = (NETIF_F_SG |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_HW_CSUM);
+
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (pci_using_dac) {
+		netdev->features |= NETIF_F_HIGHDMA;
+	}
+
+	if (e1000e_enable_mng_pass_thru(&adapter->hw))
+		adapter->flags |= FLAG_MNG_PT_ENABLED;
+
+	/*
+	 * before reading the NVM, reset the controller to
+	 * put the device in a known good starting state
+	 */
+	adapter->hw.mac.ops.reset_hw(&adapter->hw);
+
+	/*
+	 * systems with ASPM and others may see the checksum fail on the first
+	 * attempt. Let's give it a few tries
+	 */
+	for (i = 0;; i++) {
+		if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
+			break;
+		if (i == 2) {
+			e_err("The NVM Checksum Is Not Valid\n");
+			err = -EIO;
+			goto err_eeprom;
+		}
+	}
+
+	e1000_eeprom_checks(adapter);
+
+	/* copy the MAC address */
+	if (e1000e_read_mac_addr(&adapter->hw))
+		e_err("NVM Read Error while reading MAC address\n");
+
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+	timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
+	timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
+#else /* < 4.14 */
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = e1000_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_info_timer);
+	adapter->phy_info_timer.function = e1000_update_phy_info;
+	adapter->phy_info_timer.data = (unsigned long) adapter;
+#endif /* < 4.14 */
+
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
+	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+
+	rtdm_nrtsig_init(&adapter->mod_timer_sig, e1000e_mod_watchdog_timer,
+			(void*)&adapter->watchdog_timer);
+	rtdm_nrtsig_init(&adapter->downshift_sig, e1000e_trigger_downshift,
+			&adapter->downshift_task);
+
+	/* Initialize link parameters. User can change them with ethtool */
+	adapter->hw.mac.autoneg = 1;
+	adapter->fc_autoneg = 1;
+	adapter->hw.fc.requested_mode = e1000_fc_default;
+	adapter->hw.fc.current_mode = e1000_fc_default;
+	adapter->hw.phy.autoneg_advertised = 0x2f;
+
+	/* ring size defaults */
+	adapter->rx_ring->count = RT_E1000E_NUM_RXD;
+	adapter->tx_ring->count = 256;
+
+	/*
+	 * Initial Wake on LAN setting - If APM wake is enabled in
+	 * the EEPROM, enable the ACPI Magic Packet filter
+	 */
+	if (adapter->flags & FLAG_APME_IN_WUC) {
+		/* APME bit in EEPROM is mapped to WUC.APME */
+		eeprom_data = er32(WUC);
+		eeprom_apme_mask = E1000_WUC_APME;
+		if ((hw->mac.type > e1000_ich10lan) &&
+		    (eeprom_data & E1000_WUC_PHY_WAKE))
+			adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
+	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
+		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+		    (adapter->hw.bus.func == 1))
+			e1000_read_nvm(&adapter->hw,
+				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+		else
+			e1000_read_nvm(&adapter->hw,
+				NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+	}
+
+	/* fetch WoL from EEPROM */
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/*
+	 * now that we have the eeprom settings, apply the special cases
+	 * where the eeprom may be wrong or the board simply won't support
+	 * wake on lan on a particular port
+	 */
+	if (!(adapter->flags & FLAG_HAS_WOL))
+		adapter->eeprom_wol = 0;
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+	/* save off EEPROM version number */
+	e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+	/* reset the hardware with the new settings */
+	e1000e_reset(adapter);
+
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
+	 * is up.  For all other cases, let the f/w know that the h/w is now
+	 * under the control of the driver.
+	 */
+	if (!(adapter->flags & FLAG_HAS_AMT))
+		e1000e_get_hw_control(adapter);
+
+	strncpy(netdev->name, "rteth%d", sizeof(netdev->name) - 1);
+	err = rt_register_rtnetdev(netdev);
+	if (err)
+		goto err_register;
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	rtnetif_carrier_off(netdev);
+
+	e1000_print_device_info(adapter);
+
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_put_noidle(&pdev->dev);
+
+	return 0;
+
+err_register:
+	rtdm_nrtsig_destroy(&adapter->downshift_sig);
+	rtdm_nrtsig_destroy(&adapter->mod_timer_sig);
+	if (!(adapter->flags & FLAG_HAS_AMT))
+		e1000e_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	e1000e_reset_interrupt_capability(adapter);
+err_flashmap:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void e1000_remove(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+	/*
+	 * The timers may be rescheduled, so explicitly disable them
+	 * from being rescheduled.
+	 */
+	if (!down)
+		set_bit(__E1000_DOWN, &adapter->state);
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	rtdm_nrtsig_destroy(&adapter->downshift_sig);
+	rtdm_nrtsig_destroy(&adapter->mod_timer_sig);
+
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
+	cancel_work_sync(&adapter->downshift_task);
+	cancel_work_sync(&adapter->update_phy_task);
+
+	if (!(netdev->flags & IFF_UP))
+		e1000_power_down_phy(adapter);
+
+	/* Don't lie to e1000_close() down the road. */
+	if (!down)
+		clear_bit(__E1000_DOWN, &adapter->state);
+	rt_unregister_rtnetdev(netdev);
+
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_get_noresume(&pdev->dev);
+
+	/*
+	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	e1000e_release_hw_control(adapter);
+
+	e1000e_reset_interrupt_capability(adapter);
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	rtdev_free(netdev);
+
+	/* AER disable */
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers e1000_err_handler = {
+	.error_detected = e1000_io_error_detected,
+	.slot_reset = e1000_io_slot_reset,
+	.resume = e1000_io_resume,
+};
+
+static const struct pci_device_id e1000_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
+	  board_80003es2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
+	  board_80003es2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
+	  board_80003es2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
+	  board_80003es2lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+
+	{ }	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+/* PCI Device API Driver */
+static struct pci_driver e1000_driver = {
+	.name     = e1000e_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = e1000_remove,
+	.shutdown = e1000_shutdown,
+	.err_handler = &e1000_err_handler
+};
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+	int ret;
+	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+		e1000e_driver_version);
+	pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
+	ret = pci_register_driver(&e1000_driver);
+
+	return ret;
+}
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+	pci_unregister_driver(&e1000_driver);
+}
+module_exit(e1000_exit_module);
+
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* e1000_main.c */
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/phy.c	2022-03-21 12:58:29.575887148 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/defines.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/delay.h>
+
+#include "e1000.h"
+
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+					  u16 *data, bool read, bool page_set);
+static u32 e1000_get_phy_addr_for_hv_page(u32 page);
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+                                          u16 *data, bool read);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_m88_cable_length_table)
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+	6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+	26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+	44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+	66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+	87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+	100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+	124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_igp_2_cable_length_table)
+
+#define BM_PHY_REG_PAGE(offset) \
+	((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+	((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+	 (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+		~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START             768
+#define I82578_ADDR_REG                   29
+#define I82577_ADDR_REG                   16
+#define I82577_CFG_REG                    22
+#define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG                   23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2            18
+#define I82577_PHY_STATUS_2          26
+#define I82577_PHY_DIAG_STATUS       31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82577_PHY_STATUS2_MDIX           0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1                       16
+
+#define HV_MUX_DATA_CTRL               PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC    0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED   0x0004
+
+/**
+ *  e1000e_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return 0, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
+{
+	u32 manc;
+
+	manc = er32(MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	       E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ *  e1000e_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000e_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_id;
+	u16 retry_count = 0;
+
+	if (!(phy->ops.read_reg))
+		goto out;
+
+	while (retry_count < 2) {
+		ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+		if (ret_val)
+			goto out;
+
+		phy->id = (u32)(phy_id << 16);
+		udelay(20);
+		ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+		if (ret_val)
+			goto out;
+
+		phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+		phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+		if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+			goto out;
+
+		retry_count++;
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_reset_dsp - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		return ret_val;
+
+	return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ *  e1000e_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		e_dbg("PHY Address %d is out of range\n", offset);
+		return -E1000_ERR_PARAM;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_READ));
+
+	ew32(MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = er32(MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		e_dbg("MDI Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		e_dbg("MDI Error\n");
+		return -E1000_ERR_PHY;
+	}
+	*data = (u16) mdic;
+
+	/*
+	 * Allow some time after each MDIC transaction to avoid
+	 * reading duplicate data in the next MDIC transaction.
+	 */
+	if (hw->mac.type == e1000_pch2lan)
+		udelay(100);
+
+	return 0;
+}
+
+/**
+ *  e1000e_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		e_dbg("PHY Address %d is out of range\n", offset);
+		return -E1000_ERR_PARAM;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+		(offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_WRITE));
+
+	ew32(MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = er32(MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		e_dbg("MDI Write did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		e_dbg("MDI Error\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/*
+	 * Allow some time after each MDIC transaction to avoid
+	 * reading duplicate data in the next MDIC transaction.
+	 */
+	if (hw->mac.type == e1000_pch2lan)
+		udelay(100);
+
+	return 0;
+}
+
+/**
+ *  e1000e_read_phy_reg_m88 - Read m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					   data);
+
+	hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_m88 - Write m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+	hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  @brief Set page as on IGP-like PHY(s)
+ *  @param hw pointer to the HW structure
+ *  @param page page to set (shifted left when necessary)
+ *
+ *  Sets PHY page required for PHY register access.  Assumes semaphore is
+ *  already acquired.  Note, this function sets phy.addr to 1 so the caller
+ *  must set it appropriately (if necessary) after this function returns.
+ */
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+	e_dbg("Setting page 0x%x\n", page);
+
+	hw->phy.addr = 1;
+
+	return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ *  __e1000e_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and stores the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+                                    bool locked)
+{
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+						    IGP01E1000_PHY_PAGE_SELECT,
+						    (u16)offset);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+release:
+	if (!locked)
+		hw->phy.ops.release(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores the
+ *  retrieved information in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  e1000e_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+                                     bool locked)
+{
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+						    IGP01E1000_PHY_PAGE_SELECT,
+						    (u16)offset);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+release:
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000e_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_read_kmrn_reg - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+                                 bool locked)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+		       E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	kmrnctrlsta = er32(KMRNCTRLSTA);
+	*data = (u16)kmrnctrlsta;
+
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_kmrn_reg -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset using the
+ *  kumeran interface.  The information retrieved is stored in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_kmrn_reg_locked -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the kumeran interface.  The
+ *  information retrieved is stored in data.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_write_kmrn_reg - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release any acquired semaphores
+ *  before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+                                  bool locked)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+		       E1000_KMRNCTRLSTA_OFFSET) | data;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_kmrn_reg -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to the PHY register at the offset
+ *  using the kumeran interface.  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_write_kmrn_reg_locked -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Write the data to PHY register at the offset using the kumeran interface.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+	/* Enable downshift */
+	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+	ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* For BM PHY this bit is downshift enable */
+	if (phy->type != e1000_phy_bm)
+		phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/*
+	 * Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+		break;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/*
+	 * Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	/* Enable downshift on BM (disabled by default) */
+	if (phy->type == e1000_phy_bm)
+		phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+
+	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	if ((phy->type == e1000_phy_m88) &&
+	    (phy->revision < E1000_REVISION_4) &&
+	    (phy->id != BME1000_E_PHY_ID_R2)) {
+		/*
+		 * Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == 2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+				      M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+				     M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+		if (ret_val)
+			return ret_val;
+	}
+
+	if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+		/* Set PHY page 0, register 29 to 0x0003 */
+		ret_val = e1e_wphy(hw, 29, 0x0003);
+		if (ret_val)
+			return ret_val;
+
+		/* Set PHY page 0, register 30 to 0x0000 */
+		ret_val = e1e_wphy(hw, 30, 0x0000);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Commit the changes. */
+	ret_val = e1000e_commit_phy(hw);
+	if (ret_val) {
+		e_dbg("Error committing the PHY changes\n");
+		return ret_val;
+	}
+
+	if (phy->type == e1000_phy_82578) {
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/* 82578 PHY - set the downshift count to 1x. */
+		phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+		phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1000_phy_hw_reset(hw);
+	if (ret_val) {
+		e_dbg("Error resetting the PHY.\n");
+		return ret_val;
+	}
+
+	/*
+	 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+	 * timeout issues when LFS is enabled.
+	 */
+	msleep(100);
+
+	/* disable lplu d0 during driver init */
+	ret_val = e1000_set_d0_lplu_state(hw, false);
+	if (ret_val) {
+		e_dbg("Error Disabling LPLU D0\n");
+		return ret_val;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/*
+		 * when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default.
+		 */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				return ret_val;
+		}
+
+		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			return ret_val;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		return ret_val;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/*
+	 * Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/*
+	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+				 NWAY_AR_100TX_HD_CAPS |
+				 NWAY_AR_10T_FD_CAPS   |
+				 NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+	e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		e_dbg("Advertise 10mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		e_dbg("Advertise 10mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		e_dbg("Advertise 100mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		e_dbg("Advertise 100mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+		e_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		e_dbg("Advertise 1000mb Full duplex\n");
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *	  but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *	  but we do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *	  in the EEPROM is used.
+	 */
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		/*
+		 * Flow control (Rx & Tx) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled, and Tx Flow control is
+		 * disabled, by a software over-ride.
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of Rx Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric Rx PAUSE.  Later
+		 * (in e1000e_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		e_dbg("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		return ret_val;
+	}
+
+	ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	/*
+	 * Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/*
+	 * If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	e_dbg("Reconfiguring auto-neg advertisement params\n");
+	ret_val = e1000_phy_setup_autoneg(hw);
+	if (ret_val) {
+		e_dbg("Error Setting up Auto-Negotiation\n");
+		return ret_val;
+	}
+	e_dbg("Restarting Auto-Neg\n");
+
+	/*
+	 * Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->autoneg_wait_to_complete) {
+		ret_val = e1000_wait_autoneg(hw);
+		if (ret_val) {
+			e_dbg("Error while waiting for "
+				 "autoneg to complete\n");
+			return ret_val;
+		}
+	}
+
+	hw->mac.get_link_status = 1;
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_setup_copper_link - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_setup_copper_link(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			return ret_val;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		e_dbg("Forcing Speed and Duplex\n");
+		ret_val = e1000_phy_force_speed_duplex(hw);
+		if (ret_val) {
+			e_dbg("Error Forcing Speed and Duplex\n");
+			return ret_val;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw,
+					     COPPER_LINK_UP_LIMIT,
+					     10,
+					     &link);
+	if (ret_val)
+		return ret_val;
+
+	if (link) {
+		e_dbg("Valid link established!!!\n");
+		e1000e_config_collision_dist(hw);
+		ret_val = e1000e_config_fc_after_link_up(hw);
+	} else {
+		e_dbg("Unable to establish link!!!\n");
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("IGP PSCR: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+						     PHY_FORCE_LIMIT,
+						     100000,
+						     &link);
+		if (ret_val)
+			return ret_val;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+						     PHY_FORCE_LIMIT,
+						     100000,
+						     &link);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("M88E1000 PSCR: %X\n", phy_data);
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Reset the phy to commit changes. */
+	ret_val = e1000e_commit_phy(hw);
+	if (ret_val)
+		return ret_val;
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+
+		if (!link) {
+			if (hw->phy.type != e1000_phy_m88) {
+				e_dbg("Link taking longer than expected.\n");
+			} else {
+				/*
+				 * We didn't get link.
+				 * Reset the DSP and cross our fingers.
+				 */
+				ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+						   0x001d);
+				if (ret_val)
+					return ret_val;
+				ret_val = e1000e_phy_reset_dsp(hw);
+				if (ret_val)
+					return ret_val;
+			}
+		}
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+	}
+
+	if (hw->phy.type != e1000_phy_m88)
+		return 0;
+
+	ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	/* Disable MDI-X support for 10/100 */
+	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IFE_PMC_AUTO_MDIX;
+	data &= ~IFE_PMC_FORCE_MDIX;
+
+	ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	e_dbg("IFE PMC: %X\n", data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+	/* Turn off flow control when forcing speed/duplex */
+	hw->fc.current_mode = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = er32(CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+		e_dbg("Half Duplex\n");
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+		e_dbg("Full Duplex\n");
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+		e_dbg("Forcing 100mb\n");
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+		e_dbg("Forcing 10mb\n");
+	}
+
+	e1000e_config_collision_dist(hw);
+
+	ew32(CTRL, ctrl);
+}
+
+/**
+ *  e1000e_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		if (ret_val)
+			return ret_val;
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		if (ret_val)
+			return ret_val;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		if (ret_val)
+			return ret_val;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_check_downshift - Checks whether a downshift in speed occurred
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000e_check_downshift(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	switch (phy->type) {
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+	case e1000_phy_bm:
+	case e1000_phy_82578:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = false;
+		return 0;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+	/*
+	 * Polarity is determined based on the speed of
+	 * our connection.
+	 */
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/*
+		 * This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	/*
+	 * Polarity is determined based on the reversal feature being enabled.
+	 */
+	if (phy->polarity_correction) {
+		offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+		mask = IFE_PESC_POLARITY_REVERSED;
+	} else {
+		offset = IFE_PHY_SPECIAL_CONTROL;
+		mask = IFE_PSC_FORCE_POLARITY;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->cable_polarity = (phy_data & mask)
+		                       ? e1000_rev_polarity_reversed
+		                       : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg - Wait for auto-neg completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msleep(100);
+	}
+
+	/*
+	 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+			       u32 usec_interval, bool *success)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	for (i = 0; i < iterations; i++) {
+		/*
+		 * Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val) {
+			/*
+			 * If the first read fails, another entity may have
+			 * ownership of the resources, wait and try again to
+			 * see if they have relinquished the resources yet.
+			 */
+			if (usec_interval >= 1000)
+				mdelay(usec_interval/1000);
+			else
+				udelay(usec_interval);
+		}
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			mdelay(usec_interval/1000);
+		else
+			udelay(usec_interval);
+	}
+
+	*success = (i < iterations);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+	        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of coarse and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+	       IGP02E1000_PHY_AGC_A,
+	       IGP02E1000_PHY_AGC_B,
+	       IGP02E1000_PHY_AGC_C,
+	       IGP02E1000_PHY_AGC_D
+	};
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/*
+		 * Getting bits 15:9, which represent the combination of
+		 * coarse and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length.
+		 */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+				IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0))
+			return -E1000_ERR_PHY;
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+		      e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+				 (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+	if (phy->media_type != e1000_media_type_copper) {
+		e_dbg("Phy info is only valid for copper media\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		return ret_val;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy->polarity_correction = (phy_data &
+				    M88E1000_PSCR_POLARITY_REVERSAL);
+
+	ret_val = e1000_check_polarity_m88(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		return ret_val;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = e1000_check_polarity_igp(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			return ret_val;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+	if (ret_val)
+		goto out;
+	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+	                           ? false : true;
+
+	if (phy->polarity_correction) {
+		ret_val = e1000_check_polarity_ife(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* Polarity is forced */
+		phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+	}
+
+	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
+
+	/* The following parameters are undefined for 10/100 operation. */
+	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+	phy->local_rx = e1000_1000t_rx_status_undefined;
+	phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_sw_reset - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	udelay(1);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and release the semaphore (if necessary).
+ **/
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u32 ctrl;
+
+	ret_val = e1000_check_reset_block(hw);
+	if (ret_val)
+		return 0;
+
+	ret_val = phy->ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ctrl = er32(CTRL);
+	ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+	e1e_flush();
+
+	udelay(phy->reset_delay_us);
+
+	ew32(CTRL, ctrl);
+	e1e_flush();
+
+	udelay(150);
+
+	phy->ops.release(hw);
+
+	return e1000_get_phy_cfg_done(hw);
+}
+
+/**
+ *  e1000e_get_cfg_done - Generic configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Generic function to wait 10 milli-seconds for configuration to complete
+ *  and return success.
+ **/
+s32 e1000e_get_cfg_done(struct e1000_hw *hw)
+{
+	mdelay(10);
+	return 0;
+}
+
+/**
+ *  e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	e_dbg("Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	e1e_wphy(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	e1e_wphy(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	e1e_wphy(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	e1e_wphy(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to Tx amplitude in Gig mode */
+	e1e_wphy(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	e1e_wphy(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	e1e_wphy(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	e1e_wphy(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	e1e_wphy(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	e1e_wphy(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	e1e_wphy(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	e1e_wphy(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	e1e_wphy(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	e1e_wphy(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	e1e_wphy(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	e1e_wphy(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	e1e_wphy(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	e1e_wphy(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	e1e_wphy(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	e1e_wphy(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	e1e_wphy(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	e1e_wphy(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	e1e_wphy(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	e1e_wphy(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	e1e_wphy(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	e1e_wphy(hw, 0x1798, 0xD008);
+	/*
+	 * Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	e1e_wphy(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	e1e_wphy(hw, 0x187A, 0x0800);
+	/*
+	 * Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	e1e_wphy(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	e1e_wphy(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	e1e_wphy(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	e1e_wphy(hw, 0x0000, 0x1340);
+
+	return 0;
+}
+
+/* Internal function pointers */
+
+/**
+ *  e1000_get_phy_cfg_done - Generic PHY configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family did not implement a family specific
+ *  get_cfg_done function.
+ **/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.get_cfg_done)
+		return hw->phy.ops.get_cfg_done(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  When the silicon family has not implemented a forced speed/duplex
+ *  function for the PHY, simply return 0.
+ **/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.force_speed_duplex)
+		return hw->phy.ops.force_speed_duplex(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_phy_type_from_id - Get PHY type from id
+ *  @phy_id: phy_id read from the phy
+ *
+ *  Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
+{
+	enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+	switch (phy_id) {
+	case M88E1000_I_PHY_ID:
+	case M88E1000_E_PHY_ID:
+	case M88E1111_I_PHY_ID:
+	case M88E1011_I_PHY_ID:
+		phy_type = e1000_phy_m88;
+		break;
+	case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+		phy_type = e1000_phy_igp_2;
+		break;
+	case GG82563_E_PHY_ID:
+		phy_type = e1000_phy_gg82563;
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy_type = e1000_phy_igp_3;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy_type = e1000_phy_ife;
+		break;
+	case BME1000_E_PHY_ID:
+	case BME1000_E_PHY_ID_R2:
+		phy_type = e1000_phy_bm;
+		break;
+	case I82578_E_PHY_ID:
+		phy_type = e1000_phy_82578;
+		break;
+	case I82577_E_PHY_ID:
+		phy_type = e1000_phy_82577;
+		break;
+	case I82579_E_PHY_ID:
+		phy_type = e1000_phy_82579;
+		break;
+	case I217_E_PHY_ID:
+		phy_type = e1000_phy_i217;
+		break;
+	default:
+		phy_type = e1000_phy_unknown;
+		break;
+	}
+	return phy_type;
+}
+
+/**
+ *  e1000e_determine_phy_address - Determines PHY address.
+ *  @hw: pointer to the HW structure
+ *
+ *  This uses a trial and error method to loop through possible PHY
+ *  addresses. It tests each by reading the PHY ID registers and
+ *  checking for a match.
+ **/
+s32 e1000e_determine_phy_address(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_PHY_TYPE;
+	u32 phy_addr = 0;
+	u32 i;
+	enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+	hw->phy.id = phy_type;
+
+	for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+		hw->phy.addr = phy_addr;
+		i = 0;
+
+		do {
+			e1000e_get_phy_id(hw);
+			phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
+
+			/*
+			 * If phy_type is valid, break - we found our
+			 * PHY address
+			 */
+			if (phy_type  != e1000_phy_unknown) {
+				ret_val = 0;
+				goto out;
+			}
+			usleep_range(1000, 2000);
+			i++;
+		} while (i < 10);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  @brief Retrieve PHY page address
+ *  @param page page to access
+ *
+ *  @return PHY address for the page requested.
+ **/
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+	u32 phy_addr = 2;
+
+	if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+		phy_addr = 1;
+
+	return phy_addr;
+}
+
+/**
+ *  e1000e_write_phy_reg_bm - Write BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u32 page = offset >> IGP_PAGE_SHIFT;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, false);
+		goto out;
+	}
+
+	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		u32 page_shift, page_select;
+
+		/*
+		 * Page select is register 31 for phy address 1 and 22 for
+		 * phy address 2 and 3. Page select is shifted only for
+		 * phy address 1.
+		 */
+		if (hw->phy.addr == 1) {
+			page_shift = IGP_PAGE_SHIFT;
+			page_select = IGP01E1000_PHY_PAGE_SELECT;
+		} else {
+			page_shift = 0;
+			page_select = BM_PHY_PAGE_SELECT;
+		}
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+		                                    (page << page_shift));
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+	                                    data);
+
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_phy_reg_bm - Read BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u32 page = offset >> IGP_PAGE_SHIFT;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, false);
+		goto out;
+	}
+
+	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		u32 page_shift, page_select;
+
+		/*
+		 * Page select is register 31 for phy address 1 and 22 for
+		 * phy address 2 and 3. Page select is shifted only for
+		 * phy address 1.
+		 */
+		if (hw->phy.addr == 1) {
+			page_shift = IGP_PAGE_SHIFT;
+			page_select = IGP01E1000_PHY_PAGE_SELECT;
+		} else {
+			page_shift = 0;
+			page_select = BM_PHY_PAGE_SELECT;
+		}
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+		                                    (page << page_shift));
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+	                                   data);
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_phy_reg_bm2 - Read BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, false);
+		goto out;
+	}
+
+	hw->phy.addr = 1;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+						    page);
+
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					   data);
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_bm2 - Write BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, false);
+		goto out;
+	}
+
+	hw->phy.addr = 1;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+						    page);
+
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ *  @hw: pointer to the HW structure
+ *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ *  Assumes semaphore already acquired and phy_reg points to a valid memory
+ *  address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+	s32 ret_val;
+	u16 temp;
+
+	/* All page select, port ctrl and wakeup registers use phy address 1 */
+	hw->phy.addr = 1;
+
+	/* Select Port Control Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+	if (ret_val) {
+		e_dbg("Could not set Port Control page\n");
+		goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+	if (ret_val) {
+		e_dbg("Could not read PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+		goto out;
+	}
+
+	/*
+	 * Enable both PHY wakeup mode and Wakeup register page writes.
+	 * Prevent a power state change by disabling ME and Host PHY wakeup.
+	 */
+	temp = *phy_reg;
+	temp |= BM_WUC_ENABLE_BIT;
+	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+	if (ret_val) {
+		e_dbg("Could not write PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+		goto out;
+	}
+
+	/* Select Host Wakeup Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+
+	/* caller now able to write registers on the Wakeup registers page */
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ *  @hw: pointer to the HW structure
+ *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ *  Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ *  Assumes semaphore already acquired and *phy_reg is the contents of the
+ *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ *  caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+	s32 ret_val = 0;
+
+	/* Select Port Control Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+	if (ret_val) {
+		e_dbg("Could not set Port Control page\n");
+		goto out;
+	}
+
+	/* Restore 769.17 to its original value */
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+	if (ret_val)
+		e_dbg("Could not restore PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read or written
+ *  @data: pointer to the data to read or write
+ *  @read: determines if operation is read or write
+ *  @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ *  Read the PHY register at offset and store the retrieved information in
+ *  data, or write data to PHY register at offset.  Note the procedure to
+ *  access the PHY wakeup registers is different than reading the other PHY
+ *  registers. It works as such:
+ *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ *  2) Set page to 800 for host (801 if we were manageability)
+ *  3) Write the address using the address opcode (0x11)
+ *  4) Read or write the data using the data opcode (0x12)
+ *  5) Restore 769.17.2 to its original value
+ *
+ *  Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ *  step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ *  Assumes semaphore is already acquired.  When page_set==true, assumes
+ *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ *  is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+					  u16 *data, bool read, bool page_set)
+{
+	s32 ret_val;
+	u16 reg = BM_PHY_REG_NUM(offset);
+	u16 page = BM_PHY_REG_PAGE(offset);
+	u16 phy_reg = 0;
+
+	/* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+	if ((hw->mac.type == e1000_pchlan) &&
+	    (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+		e_dbg("Attempting to access page %d while gig enabled.\n",
+		      page);
+
+	if (!page_set) {
+		/* Enable access to PHY wakeup registers */
+		ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+		if (ret_val) {
+			e_dbg("Could not enable PHY wakeup reg access\n");
+			goto out;
+		}
+	}
+
+	e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+	/* Write the Wakeup register page offset value using opcode 0x11 */
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+	if (ret_val) {
+		e_dbg("Could not write address opcode to page %d\n", page);
+		goto out;
+	}
+
+	if (read) {
+		/* Read the Wakeup register page value using opcode 0x12 */
+		ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+		                                   data);
+	} else {
+		/* Write the Wakeup register page value using opcode 0x12 */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+						    *data);
+	}
+
+	if (ret_val) {
+		e_dbg("Could not access PHY reg %d.%d\n", page, reg);
+		goto out;
+	}
+
+	if (!page_set)
+		ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	e1e_wphy(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	e1e_wphy(hw, PHY_CONTROL, mii_reg);
+	usleep_range(1000, 2000);
+}
+
+/**
+ *  e1000e_commit_phy - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000e_commit_phy(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.commit)
+		return hw->phy.ops.commit(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_set_d0_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->phy.ops.set_d0_lplu_state)
+		return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+	return 0;
+}
+
+/**
+ *  __e1000_read_phy_reg_hv -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and stores the retrieved information in data.  Release any acquired
+ *  semaphore before exiting.
+ **/
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+				   bool locked, bool page_set)
+{
+	s32 ret_val;
+	u16 page = BM_PHY_REG_PAGE(offset);
+	u16 reg = BM_PHY_REG_NUM(offset);
+	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+	if (!locked) {
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, page_set);
+		goto out;
+	}
+
+	if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+		ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+		                                         data, true);
+		goto out;
+	}
+
+	if (!page_set) {
+		if (page == HV_INTC_FC_PAGE_START)
+			page = 0;
+
+		if (reg > MAX_PHY_MULTI_PAGE_REG) {
+			/* Page is shifted left, PHY expects (page x 32) */
+			ret_val = e1000_set_page_igp(hw,
+						     (page << IGP_PAGE_SHIFT));
+
+			hw->phy.addr = phy_addr;
+
+			if (ret_val)
+				goto out;
+		}
+	}
+
+	e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+	      page << IGP_PAGE_SHIFT, reg);
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+	                                  data);
+out:
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_hv -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores
+ *  the retrieved information in data.  Release the acquired semaphore
+ *  before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ *  e1000_read_phy_reg_hv_locked -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ *  e1000_read_phy_reg_page_hv - Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ *  __e1000_write_phy_reg_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+				    bool locked, bool page_set)
+{
+	s32 ret_val;
+	u16 page = BM_PHY_REG_PAGE(offset);
+	u16 reg = BM_PHY_REG_NUM(offset);
+	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+	if (!locked) {
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, page_set);
+		goto out;
+	}
+
+	if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+		ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+		                                         &data, false);
+		goto out;
+	}
+
+	if (!page_set) {
+		if (page == HV_INTC_FC_PAGE_START)
+			page = 0;
+
+		/*
+		 * Workaround MDIO accesses being disabled after entering IEEE
+		 * Power Down (when bit 11 of the PHY Control register is set)
+		 */
+		if ((hw->phy.type == e1000_phy_82578) &&
+		    (hw->phy.revision >= 1) &&
+		    (hw->phy.addr == 2) &&
+		    ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+			u16 data2 = 0x7EFF;
+			ret_val = e1000_access_phy_debug_regs_hv(hw,
+								 (1 << 6) | 0x3,
+								 &data2, false);
+			if (ret_val)
+				goto out;
+		}
+
+		if (reg > MAX_PHY_MULTI_PAGE_REG) {
+			/* Page is shifted left, PHY expects (page x 32) */
+			ret_val = e1000_set_page_igp(hw,
+						     (page << IGP_PAGE_SHIFT));
+
+			hw->phy.addr = phy_addr;
+
+			if (ret_val)
+				goto out;
+		}
+	}
+
+	e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+	      page << IGP_PAGE_SHIFT, reg);
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+	                                  data);
+
+out:
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register at the offset.
+ *  Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ *  e1000_write_phy_reg_hv_locked - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ *  e1000_write_phy_reg_page_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * @brief Get PHY address based on page
+ * @param page page to be accessed
+ * @return PHY address
+ */
+static u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+	u32 phy_addr = 2;
+
+	if (page >= HV_INTC_FC_PAGE_START)
+		phy_addr = 1;
+
+	return phy_addr;
+}
+
+/**
+ *  e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read or written
+ *  @data: pointer to the data to be read or written
+ *  @read: determines if operation is read or write
+ *
+ *  Reads the PHY register at offset and stores the retreived information
+ *  in data.  Assumes semaphore already acquired.  Note that the procedure
+ *  to access these regs uses the address port and data port to read/write.
+ *  These accesses done with PHY address 2 and without using pages.
+ **/
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+                                          u16 *data, bool read)
+{
+	s32 ret_val;
+	u32 addr_reg = 0;
+	u32 data_reg = 0;
+
+	/* This takes care of the difference with desktop vs mobile phy */
+	addr_reg = (hw->phy.type == e1000_phy_82578) ?
+	           I82578_ADDR_REG : I82577_ADDR_REG;
+	data_reg = addr_reg + 1;
+
+	/* All operations in this function are phy address 2 */
+	hw->phy.addr = 2;
+
+	/* masking with 0x3F to remove the page from offset */
+	ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+	if (ret_val) {
+		e_dbg("Could not write the Address Offset port register\n");
+		goto out;
+	}
+
+	/* Read or write the data value next */
+	if (read)
+		ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
+	else
+		ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
+
+	if (ret_val) {
+		e_dbg("Could not access the Data port register\n");
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_link_stall_workaround_hv - Si workaround
+ *  @hw: pointer to the HW structure
+ *
+ *  This function works around a Si bug where the link partner can get
+ *  a link up indication before the PHY does.  If small packets are sent
+ *  by the link partner they can be placed in the packet buffer without
+ *  being properly accounted for by the PHY and will stall preventing
+ *  further packets from being received.  The workaround is to clear the
+ *  packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 data;
+
+	if (hw->phy.type != e1000_phy_82578)
+		goto out;
+
+	/* Do not apply workaround if in PHY loopback bit 14 set */
+	e1e_rphy(hw, PHY_CONTROL, &data);
+	if (data & PHY_CONTROL_LB)
+		goto out;
+
+	/* check if link is up and at 1Gbps */
+	ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	data &= BM_CS_STATUS_LINK_UP |
+	        BM_CS_STATUS_RESOLVED |
+	        BM_CS_STATUS_SPEED_MASK;
+
+	if (data != (BM_CS_STATUS_LINK_UP |
+	             BM_CS_STATUS_RESOLVED |
+	             BM_CS_STATUS_SPEED_1000))
+		goto out;
+
+	mdelay(200);
+
+	/* flush the packets in the fifo buffer */
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+			   HV_MUX_DATA_CTRL_FORCE_SPEED);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_82577 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = e1000_check_polarity_82577(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
+
+	if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+	    I82577_PHY_STATUS2_SPEED_1000MBPS) {
+		ret_val = hw->phy.ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, length;
+
+	ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+	         I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+	if (length == E1000_CABLE_LENGTH_UNDEFINED)
+		ret_val = -E1000_ERR_PHY;
+
+	phy->cable_length = length;
+
+out:
+	return ret_val;
+}
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/defines.h	2022-03-21 12:58:29.570887196 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/lib.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_LPCD  0x00000004     /* LCD Power Cycle Done */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000004 /* Force SMBus mode*/
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK         0x00001000
+#define E1000_CTRL_EXT_PHYPDEN        0x00100000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
+
+#define E1000_MANC2H_PORT_623    0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664    0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623      0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664      0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_CSR_SM   0x8
+
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE    0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+
+/*
+ * Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion by NVM */
+#define E1000_STATUS_PHYRA      0x00000400      /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+						     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK          0x00000007
+#define E1000_PHY_LED0_IVRT               0x00000008
+#define E1000_PHY_LED0_MASK               0x0000001F
+
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Transmit Control */
+#define E1000_TCTL_EN     0x00000002    /* enable Tx */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG           0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK        0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0          0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1          0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER         0x01000000 /* Other Interrupts */
+
+/* PBA ECC Register */
+#define E1000_PBA_ECC_COUNTER_MASK  0xFFF00000 /* ECC counter mask */
+#define E1000_PBA_ECC_COUNTER_SHIFT 20         /* ECC counter shift value */
+#define E1000_PBA_ECC_CORR_EN       0x00000001 /* ECC correction enable */
+#define E1000_PBA_ECC_STAT_CLR      0x00000002 /* Clear ECC error counter */
+#define E1000_PBA_ECC_INT_EN        0x00000004 /* Enable ICR bit 5 for ECC */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1      E1000_ICR_TXQ1      /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER     E1000_ICR_OTHER     /* Other Interrupts */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of desc. still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+			   E1000_GCR_RXDSCW_NO_SNOOP      | \
+			   E1000_GCR_RXDSCR_NO_SNOOP      | \
+			   E1000_GCR_TXD_NO_SNOOP         | \
+			   E1000_GCR_TXDSCW_NO_SNOOP      | \
+			   E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS     0x0001 /* LP has Auto Neg Capability */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+					/* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+					/* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+					/* 0=Automatic Master/Slave config */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Register */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB   0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type (0-small, 1-large) */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+
+#define E1000_NVM_RW_REG_DATA   16   /* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH             11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_PBA_PTR_GUARD          0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+			      (ID_LED_OFF1_OFF2 <<  8) | \
+			      (ID_LED_DEF1_DEF2 <<  4) | \
+			      (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define BME1000_E_PHY_ID     0x01410CB0
+#define BME1000_E_PHY_ID_R2  0x01410CB1
+#define I82577_E_PHY_ID      0x01540050
+#define I82578_E_PHY_ID      0x004DD040
+#define I82579_E_PHY_ID      0x01540090
+#define I217_E_PHY_ID        0x015400A0
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+					       /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+
+#define I82578_EPSCR_DOWNSHIFT_ENABLE          0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK    0x001C
+
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT   0x0800 /* 1 = enable downshift */
+
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+                           ((reg) & MAX_PHY_REG_ADDRESS))
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+	(((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+	GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_PAGE_SELECT         \
+	GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+	GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+	GG82563_REG(0, 29) /* Alternate Page Select */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+	GG82563_REG(2, 21) /* MAC Specific Control Register */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+	GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+	GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+	GG82563_REG(193, 20) /* Power Management Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL         \
+	GG82563_REG(194, 18) /* Inband Control */
+
+/* MDI Control */
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_ERROR     0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_POLL_TIMEOUT          640
+
+/* FW Semaphore */
+#define E1000_FWSM_WLOCK_MAC_MASK	0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT	7
+
+#endif /* _E1000_DEFINES_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/lib.c	2022-03-21 12:58:29.565887245 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/82571.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+enum e1000_mng_mode {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG		0x20000000
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE		0x544D4149
+
+/**
+ *  e1000e_get_bus_info_pcie - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_bus_info *bus = &hw->bus;
+	struct e1000_adapter *adapter = hw->adapter;
+	u16 pcie_link_status, cap_offset;
+
+	cap_offset = pci_pcie_cap(adapter->pdev);
+	if (!cap_offset) {
+		bus->width = e1000_bus_width_unknown;
+	} else {
+		pci_read_config_word(adapter->pdev,
+				     cap_offset + PCIE_LINK_STATUS,
+				     &pcie_link_status);
+		bus->width = (enum e1000_bus_width)((pcie_link_status &
+						     PCIE_LINK_WIDTH_MASK) >>
+						    PCIE_LINK_WIDTH_SHIFT);
+	}
+
+	mac->ops.set_lan_id(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	u32 reg;
+
+	/*
+	 * The status register reports the correct function number
+	 * for the device regardless of function swap state.
+	 */
+	reg = er32(STATUS);
+	bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ *  e1000_set_lan_id_single_port - Set LAN id for a single port device
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+
+	bus->func = 0;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+		e1e_flush();
+	}
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+	e1e_flush();
+}
+
+/**
+ *  e1000e_init_rx_addrs - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setup the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+	u8 mac_addr[ETH_ALEN] = {0};
+
+	/* Setup the receive address */
+	e_dbg("Programming MAC Address into RAR[0]\n");
+
+	e1000e_rar_set(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+	for (i = 1; i < rar_count; i++)
+		e1000e_rar_set(hw, mac_addr, i);
+}
+
+/**
+ *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address. If an
+ *  alternate MAC address is found it is programmed into RAR0, replacing
+ *  the permanent address that was installed into RAR0 by the Si on reset.
+ *  This function will return SUCCESS unless it encounters an error while
+ *  reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+	u32 i;
+	s32 ret_val = 0;
+	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+	u8 alt_mac_addr[ETH_ALEN];
+
+	ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+	if (ret_val)
+		goto out;
+
+	/* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+	if (!((nvm_data & NVM_COMPAT_LOM) ||
+	      (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+	      (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
+	      (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
+		goto out;
+
+	ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+	                         &nvm_alt_mac_addr_offset);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+	    (nvm_alt_mac_addr_offset == 0x0000))
+		/* There is no Alternate MAC Address */
+		goto out;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+	for (i = 0; i < ETH_ALEN; i += 2) {
+		offset = nvm_alt_mac_addr_offset + (i >> 1);
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			goto out;
+		}
+
+		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+	}
+
+	/* if multicast bit is set, the alternate address will not be used */
+	if (is_multicast_ether_addr(alt_mac_addr)) {
+		e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+		goto out;
+	}
+
+	/*
+	 * We have a valid alternate MAC address, and we want to treat it the
+	 * same as the normal permanent MAC address stored by the HW into the
+	 * RAR. Do this by mapping this address into RAR0.
+	 */
+	e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_rar_set - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+		   ((u32) addr[1] << 8) |
+		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	/*
+	 * Some bridges will combine consecutive 32-bit writes into
+	 * a single burst write, which will malfunction on some parts.
+	 * The flushes avoid this.
+	 */
+	ew32(RAL(index), rar_low);
+	e1e_flush();
+	ew32(RAH(index), rar_high);
+	e1e_flush();
+}
+
+/**
+ *  e1000_hash_mc_addr - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  e1000_mta_set_generic()
+ **/
+static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/*
+	 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/*
+	 * The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 *
+	 * For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB		 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+	default:
+	case 0:
+		break;
+	case 1:
+		bit_shift += 1;
+		break;
+	case 2:
+		bit_shift += 2;
+		break;
+	case 3:
+		bit_shift += 4;
+		break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+				  (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  e1000e_update_mc_addr_list_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+					u8 *mc_addr_list, u32 mc_addr_count)
+{
+	u32 hash_value, hash_bit, hash_reg;
+	int i;
+
+	/* clear mta_shadow */
+	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+	/* update mta_shadow from mc_addr_list */
+	for (i = 0; (u32) i < mc_addr_count; i++) {
+		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+
+		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+		hash_bit = hash_value & 0x1F;
+
+		hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+		mc_addr_list += (ETH_ALEN);
+	}
+
+	/* replace the entire MTA table */
+	for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+	e1e_flush();
+}
+
+/**
+ *  e1000e_clear_hw_cntrs_base - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+	er32(CRCERRS);
+	er32(SYMERRS);
+	er32(MPC);
+	er32(SCC);
+	er32(ECOL);
+	er32(MCC);
+	er32(LATECOL);
+	er32(COLC);
+	er32(DC);
+	er32(SEC);
+	er32(RLEC);
+	er32(XONRXC);
+	er32(XONTXC);
+	er32(XOFFRXC);
+	er32(XOFFTXC);
+	er32(FCRUC);
+	er32(GPRC);
+	er32(BPRC);
+	er32(MPRC);
+	er32(GPTC);
+	er32(GORCL);
+	er32(GORCH);
+	er32(GOTCL);
+	er32(GOTCH);
+	er32(RNBC);
+	er32(RUC);
+	er32(RFC);
+	er32(ROC);
+	er32(RJC);
+	er32(TORL);
+	er32(TORH);
+	er32(TOTL);
+	er32(TOTH);
+	er32(TPR);
+	er32(TPT);
+	er32(MPTC);
+	er32(BPTC);
+}
+
+/**
+ *  e1000e_check_for_copper_link - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status)
+		return 0;
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		return ret_val;
+
+	if (!link)
+		return ret_val; /* No link detected */
+
+	mac->get_link_status = false;
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000e_check_downshift(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		return ret_val;
+	}
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000e_config_collision_dist(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000e_config_fc_after_link_up(hw);
+	if (ret_val)
+		e_dbg("Error configuring flow control\n");
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_check_for_fiber_link - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+	status = er32(STATUS);
+	rxcw = er32(RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), the cable is plugged in (we have signal),
+	 * and our link partner is not trying to auto-negotiate with us (we
+	 * are receiving idles or data), we need to force link up. We also
+	 * need to give auto-negotiation time to complete, in case the cable
+	 * was just plugged in. The autoneg_failed flag does this.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+	    (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			return 0;
+		}
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = er32(CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		ew32(CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000e_config_fc_after_link_up(hw);
+		if (ret_val) {
+			e_dbg("Error configuring flow control\n");
+			return ret_val;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+		ew32(TXCW, mac->txcw);
+		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = true;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_check_for_serdes_link - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+	status = er32(STATUS);
+	rxcw = er32(RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), and our link partner is not trying to
+	 * auto-negotiate with us (we are receiving idles or data),
+	 * we need to force link up. We also need to give auto-negotiation
+	 * time to complete.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			return 0;
+		}
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = er32(CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		ew32(CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000e_config_fc_after_link_up(hw);
+		if (ret_val) {
+			e_dbg("Error configuring flow control\n");
+			return ret_val;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+		ew32(TXCW, mac->txcw);
+		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = true;
+	} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+		/*
+		 * If we force link for non-auto-negotiation switch, check
+		 * link status based on MAC synchronization for internal
+		 * serdes media type.
+		 */
+		/* SYNCH bit and IV bit are sticky. */
+		udelay(10);
+		rxcw = er32(RXCW);
+		if (rxcw & E1000_RXCW_SYNCH) {
+			if (!(rxcw & E1000_RXCW_IV)) {
+				mac->serdes_has_link = true;
+				e_dbg("SERDES: Link up - forced.\n");
+			}
+		} else {
+			mac->serdes_has_link = false;
+			e_dbg("SERDES: Link down - force failed.\n");
+		}
+	}
+
+	if (E1000_TXCW_ANE & er32(TXCW)) {
+		status = er32(STATUS);
+		if (status & E1000_STATUS_LU) {
+			/* SYNCH bit and IV bit are sticky, so reread rxcw.  */
+			udelay(10);
+			rxcw = er32(RXCW);
+			if (rxcw & E1000_RXCW_SYNCH) {
+				if (!(rxcw & E1000_RXCW_IV)) {
+					mac->serdes_has_link = true;
+					e_dbg("SERDES: Link up - autoneg "
+					   "completed successfully.\n");
+				} else {
+					mac->serdes_has_link = false;
+					e_dbg("SERDES: Link down - invalid"
+					   "codewords detected in autoneg.\n");
+				}
+			} else {
+				mac->serdes_has_link = false;
+				e_dbg("SERDES: Link down - no sync.\n");
+			}
+		} else {
+			mac->serdes_has_link = false;
+			e_dbg("SERDES: Link down - autoneg failed\n");
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 nvm_data;
+
+	/*
+	 * Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		hw->fc.requested_mode = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		hw->fc.requested_mode = e1000_fc_tx_pause;
+	else
+		hw->fc.requested_mode = e1000_fc_full;
+
+	return 0;
+}
+
+/**
+ *  e1000e_setup_link - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000e_setup_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+
+	/*
+	 * In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (e1000_check_reset_block(hw))
+		return 0;
+
+	/*
+	 * If requested flow control is set to default, set flow control
+	 * based on the EEPROM flow control settings.
+	 */
+	if (hw->fc.requested_mode == e1000_fc_default) {
+		ret_val = e1000_set_default_fc_generic(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/*
+	 * Save off the requested flow control mode for use later.  Depending
+	 * on the link partner's capabilities, we may or may not use this mode.
+	 */
+	hw->fc.current_mode = hw->fc.requested_mode;
+
+	e_dbg("After fix-ups FlowControl is now = %x\n",
+		hw->fc.current_mode);
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = mac->ops.setup_physical_interface(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	e_dbg("Initializing the Flow Control address, type and timer regs\n");
+	ew32(FCT, FLOW_CONTROL_TYPE);
+	ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	ew32(FCTTV, hw->fc.pause_time);
+
+	return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ *  e1000_commit_fc_settings_generic - Configure flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Write the flow control settings to the Transmit Config Word Register (TXCW)
+ *  base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 txcw;
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the device accordingly.  If auto-negotiation is enabled, then
+	 * software will have to set the "PAUSE" bits to the correct value in
+	 * the Transmit Config Word Register (TXCW) and re-start auto-
+	 * negotiation.  However, if auto-negotiation is disabled, then
+	 * software will have to manually configure the two flow control enable
+	 * bits in the CTRL register.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames,
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames but we
+	 *          do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 */
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		/* Flow control completely disabled by a software over-ride. */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is disabled
+		 * by a software over-ride. Since there really isn't a way to
+		 * advertise that we are capable of Rx Pause ONLY, we will
+		 * advertise that we support both symmetric and asymmetric Rx
+		 * PAUSE.  Later, we will disable the adapter's ability to send
+		 * PAUSE frames.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is disabled,
+		 * by a software over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	default:
+		e_dbg("Flow control param set incorrectly\n");
+		return -E1000_ERR_CONFIG;
+		break;
+	}
+
+	ew32(TXCW, txcw);
+	mac->txcw = txcw;
+
+	return 0;
+}
+
+/**
+ *  e1000_poll_fiber_serdes_link_generic - Poll for link up
+ *  @hw: pointer to the HW structure
+ *
+ *  Polls for link up by reading the status register, if link fails to come
+ *  up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 i, status;
+	s32 ret_val;
+
+	/*
+	 * If we have a signal (the cable is plugged in, or assumed true for
+	 * serdes media) then poll for a "Link-Up" indication in the Device
+	 * Status Register.  Time-out if a link isn't seen in 500 milliseconds
+	 * seconds (Auto-negotiation should complete in less than 500
+	 * milliseconds even if the other end is doing it in SW).
+	 */
+	for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+		usleep_range(10000, 20000);
+		status = er32(STATUS);
+		if (status & E1000_STATUS_LU)
+			break;
+	}
+	if (i == FIBER_LINK_UP_LIMIT) {
+		e_dbg("Never got a valid link from auto-neg!!!\n");
+		mac->autoneg_failed = 1;
+		/*
+		 * AutoNeg failed to achieve a link, so we'll call
+		 * mac->check_for_link. This routine will force the
+		 * link up if we detect a signal. This will allow us to
+		 * communicate with non-autonegotiating link partners.
+		 */
+		ret_val = mac->ops.check_for_link(hw);
+		if (ret_val) {
+			e_dbg("Error while checking for link\n");
+			return ret_val;
+		}
+		mac->autoneg_failed = 0;
+	} else {
+		mac->autoneg_failed = 0;
+		e_dbg("Valid Link Found\n");
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes
+ *  links.  Upon successful setup, poll for link.
+ **/
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+
+	/* Take the link out of reset */
+	ctrl &= ~E1000_CTRL_LRST;
+
+	e1000e_config_collision_dist(hw);
+
+	ret_val = e1000_commit_fc_settings_generic(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Since auto-negotiation is enabled, take the link out of reset (the
+	 * link will be in reset, because we previously reset the chip). This
+	 * will restart auto-negotiation.  If auto-negotiation is successful
+	 * then the link-up status bit will be set and the flow control enable
+	 * bits (RFCE and TFCE) will be set according to their negotiated value.
+	 */
+	e_dbg("Auto-negotiation enabled\n");
+
+	ew32(CTRL, ctrl);
+	e1e_flush();
+	usleep_range(1000, 2000);
+
+	/*
+	 * For these adapters, the SW definable pin 1 is set when the optics
+	 * detect a signal.  If we have a signal, then poll for a "Link-Up"
+	 * indication.
+	 */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+	    (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
+		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+	} else {
+		e_dbg("No signal detected\n");
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000e_config_collision_dist(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+	tctl = er32(TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	ew32(TCTL, tctl);
+	e1e_flush();
+}
+
+/**
+ *  e1000e_set_fc_watermarks - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  transmission as well.
+ **/
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
+{
+	u32 fcrtl = 0, fcrth = 0;
+
+	/*
+	 * Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (hw->fc.current_mode & e1000_fc_tx_pause) {
+		/*
+		 * We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = hw->fc.low_water;
+		fcrtl |= E1000_FCRTL_XONE;
+		fcrth = hw->fc.high_water;
+	}
+	ew32(FCRTL, fcrtl);
+	ew32(FCRTH, fcrth);
+
+	return 0;
+}
+
+/**
+ *  e1000e_force_mac_fc - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000e_force_mac_fc(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = er32(CTRL);
+
+	/*
+	 * Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "hw->fc.current_mode" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+	e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		e_dbg("Flow control param set incorrectly\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	ew32(CTRL, ctrl);
+
+	return 0;
+}
+
+/**
+ *  e1000e_config_fc_after_link_up - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = 0;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+	/*
+	 * Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes)
+			ret_val = e1000e_force_mac_fc(hw);
+	} else {
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ret_val = e1000e_force_mac_fc(hw);
+	}
+
+	if (ret_val) {
+		e_dbg("Error forcing flow control settings\n");
+		return ret_val;
+	}
+
+	/*
+	 * Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/*
+		 * Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			return ret_val;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+			e_dbg("Copper PHY and Auto Neg "
+				 "has not completed.\n");
+			return ret_val;
+		}
+
+		/*
+		 * The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
+		if (ret_val)
+			return ret_val;
+		ret_val =
+		    e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+		if (ret_val)
+			return ret_val;
+
+		/*
+		 * Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/*
+			 * Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise Rx
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == e1000_fc_full) {
+				hw->fc.current_mode = e1000_fc_full;
+				e_dbg("Flow Control = FULL.\r\n");
+			} else {
+				hw->fc.current_mode = e1000_fc_rx_pause;
+				e_dbg("Flow Control = "
+				      "Rx PAUSE frames only.\r\n");
+			}
+		}
+		/*
+		 * For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_tx_pause;
+			e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
+		}
+		/*
+		 * For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
+		} else {
+			/*
+			 * Per the IEEE spec, at this point flow control
+			 * should be disabled.
+			 */
+			hw->fc.current_mode = e1000_fc_none;
+			e_dbg("Flow Control = NONE.\r\n");
+		}
+
+		/*
+		 * Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+		if (ret_val) {
+			e_dbg("Error getting link speed and duplex\n");
+			return ret_val;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			hw->fc.current_mode = e1000_fc_none;
+
+		/*
+		 * Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = e1000e_force_mac_fc(hw);
+		if (ret_val) {
+			e_dbg("Error forcing flow control settings\n");
+			return ret_val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	u32 status;
+
+	status = er32(STATUS);
+	if (status & E1000_STATUS_SPEED_1000)
+		*speed = SPEED_1000;
+	else if (status & E1000_STATUS_SPEED_100)
+		*speed = SPEED_100;
+	else
+		*speed = SPEED_10;
+
+	if (status & E1000_STATUS_FD)
+		*duplex = FULL_DUPLEX;
+	else
+		*duplex = HALF_DUPLEX;
+
+	e_dbg("%u Mbps, %s Duplex\n",
+	      *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+	      *duplex == FULL_DUPLEX ? "Full" : "Half");
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Sets the speed and duplex to gigabit full duplex (the only possible option)
+ *  for fiber/serdes links.
+ **/
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	*speed = SPEED_1000;
+	*duplex = FULL_DUPLEX;
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_hw_semaphore - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = er32(SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		e_dbg("Driver can't access device - SMBI bit is set.\n");
+		return -E1000_ERR_NVM;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (er32(SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000e_put_hw_semaphore(hw);
+		e_dbg("Driver can't access the NVM\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_put_hw_semaphore - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000e_put_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = er32(SWSM);
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+	ew32(SWSM, swsm);
+}
+
+/**
+ *  e1000e_get_auto_rd_done - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
+{
+	s32 i = 0;
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (er32(EECD) & E1000_EECD_AUTO_RD)
+			break;
+		usleep_range(1000, 2000);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		e_dbg("Auto read by HW from NVM has not completed.\n");
+		return -E1000_ERR_RESET;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_valid_led_default - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+
+	return 0;
+}
+
+/**
+ *  e1000e_id_led_init -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000e_id_led_init(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+	if (ret_val)
+		return ret_val;
+
+	mac->ledctl_default = er32(LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000e_setup_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl;
+
+	if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
+		return -E1000_ERR_CONFIG;
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		ledctl = er32(LEDCTL);
+		hw->mac.ledctl_default = ledctl;
+		/* Turn off LED0 */
+		ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+		            E1000_LEDCTL_LED0_BLINK |
+		            E1000_LEDCTL_LED0_MODE_MASK);
+		ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+		           E1000_LEDCTL_LED0_MODE_SHIFT);
+		ew32(LEDCTL, ledctl);
+	} else if (hw->phy.media_type == e1000_media_type_copper) {
+		ew32(LEDCTL, hw->mac.ledctl_mode1);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
+{
+	ew32(LEDCTL, hw->mac.ledctl_default);
+	return 0;
+}
+
+/**
+ *  e1000e_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the LEDs which are set to be on.
+ **/
+s32 e1000e_blink_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/*
+		 * set the blink bit for each LED that's "on" (0x0E)
+		 * in ledctl_mode2
+		 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+						 (i * 8));
+	}
+
+	ew32(LEDCTL, ledctl_blink);
+
+	return 0;
+}
+
+/**
+ *  e1000e_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000e_led_on_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = er32(CTRL);
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		ew32(CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		ew32(LEDCTL, hw->mac.ledctl_mode2);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000e_led_off_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = er32(CTRL);
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		ew32(CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		ew32(LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_set_pcie_no_snoop - Set PCI-express capabilities
+ *  @hw: pointer to the HW structure
+ *  @no_snoop: bitmap of snoop events
+ *
+ *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+	u32 gcr;
+
+	if (no_snoop) {
+		gcr = er32(GCR);
+		gcr &= ~(PCIE_NO_SNOOP_ALL);
+		gcr |= no_snoop;
+		ew32(GCR, gcr);
+	}
+}
+
+/**
+ *  e1000e_disable_pcie_master - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	ew32(CTRL, ctrl);
+
+	while (timeout) {
+		if (!(er32(STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		udelay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		e_dbg("Master requests are pending.\n");
+		return -E1000_ERR_MASTER_REQUESTS_PENDING;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000e_reset_adaptive(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!mac->adaptive_ifs) {
+		e_dbg("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	mac->current_ifs_val = 0;
+	mac->ifs_min_val = IFS_MIN;
+	mac->ifs_max_val = IFS_MAX;
+	mac->ifs_step_size = IFS_STEP;
+	mac->ifs_ratio = IFS_RATIO;
+
+	mac->in_ifs_mode = false;
+	ew32(AIT, 0);
+out:
+	return;
+}
+
+/**
+ *  e1000e_update_adaptive - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000e_update_adaptive(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!mac->adaptive_ifs) {
+		e_dbg("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+			mac->in_ifs_mode = true;
+			if (mac->current_ifs_val < mac->ifs_max_val) {
+				if (!mac->current_ifs_val)
+					mac->current_ifs_val = mac->ifs_min_val;
+				else
+					mac->current_ifs_val +=
+						mac->ifs_step_size;
+				ew32(AIT, mac->current_ifs_val);
+			}
+		}
+	} else {
+		if (mac->in_ifs_mode &&
+		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+			mac->current_ifs_val = 0;
+			mac->in_ifs_mode = false;
+			ew32(AIT, 0);
+		}
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	ew32(EECD, *eecd);
+	e1e_flush();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	ew32(EECD, *eecd);
+	e1e_flush();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u32 mask;
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		ew32(EECD, eecd);
+		e1e_flush();
+
+		udelay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	ew32(EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+	eecd = er32(EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		e1000_raise_eec_clk(hw, &eecd);
+
+		eecd = er32(EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = er32(EERD);
+		else
+			reg = er32(EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE)
+			return 0;
+
+		udelay(5);
+	}
+
+	return -E1000_ERR_NVM;
+}
+
+/**
+ *  e1000e_acquire_nvm - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000e_acquire_nvm(struct e1000_hw *hw)
+{
+	u32 eecd = er32(EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+	ew32(EECD, eecd | E1000_EECD_REQ);
+	eecd = er32(EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		udelay(5);
+		eecd = er32(EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		ew32(EECD, eecd);
+		e_dbg("Could not acquire NVM grant\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		ew32(EECD, eecd);
+		e1e_flush();
+		udelay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		ew32(EECD, eecd);
+		e1e_flush();
+		udelay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	eecd = er32(EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  e1000e_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000e_release_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	e1000_stop_nvm(hw);
+
+	eecd = er32(EECD);
+	eecd &= ~E1000_EECD_REQ;
+	ew32(EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u8 spi_stat_reg;
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		u16 timeout = NVM_MAX_RETRY_SPI;
+
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		ew32(EECD, eecd);
+		e1e_flush();
+		udelay(1);
+
+		/*
+		 * Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out.
+		 */
+		while (timeout) {
+			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+						 hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			udelay(5);
+			e1000_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			e_dbg("SPI NVM Status error\n");
+			return -E1000_ERR_NVM;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+		       E1000_NVM_RW_REG_START;
+
+		ew32(EERD, eerd);
+		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000e_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 widx = 0;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	ret_val = nvm->ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = e1000_ready_nvm_eeprom(hw);
+		if (ret_val) {
+			nvm->ops.release(hw);
+			return ret_val;
+		}
+
+		e1000_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+					 nvm->opcode_bits);
+
+		e1000_standby_nvm(hw);
+
+		/*
+		 * Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+					 nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+			word_out = (word_out >> 8) | (word_out << 8);
+			e1000_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				e1000_standby_nvm(hw);
+				break;
+			}
+		}
+	}
+
+	usleep_range(10000, 20000);
+	nvm->ops.release(hw);
+	return 0;
+}
+
+/**
+ *  e1000_read_pba_string_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+				  u32 pba_num_size)
+{
+	s32 ret_val;
+	u16 nvm_data;
+	u16 pba_ptr;
+	u16 offset;
+	u16 length;
+
+	if (pba_num == NULL) {
+		e_dbg("PBA string buffer was null\n");
+		ret_val = E1000_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	/*
+	 * if nvm_data is not ptr guard the PBA must be in legacy format which
+	 * means pba_ptr is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (nvm_data != NVM_PBA_PTR_GUARD) {
+		e_dbg("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (pba_num_size < 11) {
+			e_dbg("PBA string buffer too small\n");
+			return E1000_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pba_ptr */
+		pba_num[0] = (nvm_data >> 12) & 0xF;
+		pba_num[1] = (nvm_data >> 8) & 0xF;
+		pba_num[2] = (nvm_data >> 4) & 0xF;
+		pba_num[3] = nvm_data & 0xF;
+		pba_num[4] = (pba_ptr >> 12) & 0xF;
+		pba_num[5] = (pba_ptr >> 8) & 0xF;
+		pba_num[6] = '-';
+		pba_num[7] = 0;
+		pba_num[8] = (pba_ptr >> 4) & 0xF;
+		pba_num[9] = pba_ptr & 0xF;
+
+		/* put a null character on the end of our string */
+		pba_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (pba_num[offset] < 0xA)
+				pba_num[offset] += '0';
+			else if (pba_num[offset] < 0x10)
+				pba_num[offset] += 'A' - 0xA;
+		}
+
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		e_dbg("NVM PBA number section invalid length\n");
+		ret_val = E1000_ERR_NVM_PBA_SECTION;
+		goto out;
+	}
+	/* check if pba_num buffer is big enough */
+	if (pba_num_size < (((u32)length * 2) - 1)) {
+		e_dbg("PBA string buffer too small\n");
+		ret_val = E1000_ERR_NO_SPACE;
+		goto out;
+	}
+
+	/* trim pba length from start of string */
+	pba_ptr++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			goto out;
+		}
+		pba_num[offset * 2] = (u8)(nvm_data >> 8);
+		pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+	}
+	pba_num[offset * 2] = '\0';
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	rar_high = er32(RAH(0));
+	rar_low = er32(RAL(0));
+
+	for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+	for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+	return 0;
+}
+
+/**
+ *  e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			return ret_val;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		e_dbg("NVM Checksum Invalid\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error while updating checksum.\n");
+			return ret_val;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val)
+		e_dbg("NVM Write Error while updating checksum.\n");
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_reload_nvm - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000e_reload_nvm(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+
+	udelay(10);
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+}
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8  sum = 0;
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operation
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+	u32 hicr;
+	u8 i;
+
+	if (!(hw->mac.arc_subsystem_valid)) {
+		e_dbg("ARC subsystem not valid.\n");
+		return -E1000_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	/* Check that the host interface is enabled. */
+	hicr = er32(HICR);
+	if ((hicr & E1000_HICR_EN) == 0) {
+		e_dbg("E1000_HOST_EN bit disabled.\n");
+		return -E1000_ERR_HOST_INTERFACE_COMMAND;
+	}
+	/* check the previous command is completed */
+	for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+		hicr = er32(HICR);
+		if (!(hicr & E1000_HICR_C))
+			break;
+		mdelay(1);
+	}
+
+	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+		e_dbg("Previous command timeout failed .\n");
+		return -E1000_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_check_mng_mode_generic - check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the firmware semaphore register and returns true (>0) if
+ *  manageability is enabled, else false (0).
+ **/
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
+{
+	u32 fwsm = er32(FWSM);
+
+	return (fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ *  e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ **/
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+	struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+	u32 *buffer = (u32 *)&hw->mng_cookie;
+	u32 offset;
+	s32 ret_val, hdr_csum, csum;
+	u8 i, len;
+
+	hw->mac.tx_pkt_filtering = true;
+
+	/* No manageability, no filtering */
+	if (!e1000e_check_mng_mode(hw)) {
+		hw->mac.tx_pkt_filtering = false;
+		goto out;
+	}
+
+	/*
+	 * If we can't read from the host interface for whatever
+	 * reason, disable filtering.
+	 */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val) {
+		hw->mac.tx_pkt_filtering = false;
+		goto out;
+	}
+
+	/* Read in the header.  Length and offset are in dwords. */
+	len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+	offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+	for (i = 0; i < len; i++)
+		*(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
+	hdr_csum = hdr->checksum;
+	hdr->checksum = 0;
+	csum = e1000_calculate_checksum((u8 *)hdr,
+					E1000_MNG_DHCP_COOKIE_LENGTH);
+	/*
+	 * If either the checksums or signature don't match, then
+	 * the cookie area isn't considered valid, in which case we
+	 * take the safe route of assuming Tx filtering is enabled.
+	 */
+	if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+		hw->mac.tx_pkt_filtering = true;
+		goto out;
+	}
+
+	/* Cookie area is valid, make the final check for filtering. */
+	if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
+		hw->mac.tx_pkt_filtering = false;
+		goto out;
+	}
+
+out:
+	return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ *  e1000_mng_write_cmd_header - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+				  struct e1000_host_mng_command_header *hdr)
+{
+	u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+	/* Write the whole command header structure with new checksum. */
+
+	hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+	length >>= 2;
+	/* Write the relevant command block into the ram area. */
+	for (i = 0; i < length; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
+					    *((u32 *) hdr + i));
+		e1e_flush();
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_mng_host_if_write - Write to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
+				   u16 length, u16 offset, u8 *sum)
+{
+	u8 *tmp;
+	u8 *bufptr = buffer;
+	u32 data = 0;
+	u16 remaining, i, j, prev_bytes;
+
+	/* sum = only sum of the data and it is not checksum */
+
+	if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+		return -E1000_ERR_PARAM;
+
+	tmp = (u8 *)&data;
+	prev_bytes = offset & 0x3;
+	offset >>= 2;
+
+	if (prev_bytes) {
+		data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
+		for (j = prev_bytes; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
+		length -= j - prev_bytes;
+		offset++;
+	}
+
+	remaining = length & 0x3;
+	length -= remaining;
+
+	/* Calculate length in DWORDs */
+	length >>= 2;
+
+	/*
+	 * The device driver writes the relevant command block into the
+	 * ram area.
+	 */
+	for (i = 0; i < length; i++) {
+		for (j = 0; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+	}
+	if (remaining) {
+		for (j = 0; j < sizeof(u32); j++) {
+			if (j < remaining)
+				*(tmp + j) = *bufptr++;
+			else
+				*(tmp + j) = 0;
+
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+	struct e1000_host_mng_command_header hdr;
+	s32 ret_val;
+	u32 hicr;
+
+	hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+	hdr.command_length = length;
+	hdr.reserved1 = 0;
+	hdr.reserved2 = 0;
+	hdr.checksum = 0;
+
+	/* Enable the host interface */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Populate the host interface with the contents of "buffer". */
+	ret_val = e1000_mng_host_if_write(hw, buffer, length,
+					  sizeof(hdr), &(hdr.checksum));
+	if (ret_val)
+		return ret_val;
+
+	/* Write the manageability command header */
+	ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+	if (ret_val)
+		return ret_val;
+
+	/* Tell the ARC a new command is pending. */
+	hicr = er32(HICR);
+	ew32(HICR, hicr | E1000_HICR_C);
+
+	return 0;
+}
+
+/**
+ *  e1000e_enable_mng_pass_thru - Check if management passthrough is needed
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
+ **/
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = false;
+
+	manc = er32(MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN))
+		goto out;
+
+	if (hw->mac.has_fwsm) {
+		fwsm = er32(FWSM);
+		factps = er32(FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = true;
+			goto out;
+		}
+	} else if ((hw->mac.type == e1000_82574) ||
+		   (hw->mac.type == e1000_82583)) {
+		u16 data;
+
+		factps = er32(FACTPS);
+		e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+		     (e1000_mng_mode_pt << 13))) {
+			ret_val = true;
+			goto out;
+		}
+	} else if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = true;
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/82571.c	2022-03-21 12:58:29.561887284 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/80003es2lan.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82571EB Gigabit Ethernet Controller
+ * 82571EB Gigabit Ethernet Controller (Copper)
+ * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
+ * 82572EI Gigabit Ethernet Controller (Copper)
+ * 82572EI Gigabit Ethernet Controller (Fiber)
+ * 82572EI Gigabit Ethernet Controller
+ * 82573V Gigabit Ethernet Controller (Copper)
+ * 82573E Gigabit Ethernet Controller (Copper)
+ * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+			      (ID_LED_OFF1_ON2  <<  8) | \
+			      (ID_LED_DEF1_DEF2 <<  4) | \
+			      (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT          5 /* Autoneg Retry Count value */
+#define E1000_BASE1000T_STATUS          10
+#define E1000_IDLE_ERROR_COUNT_MASK     0xFF
+#define E1000_RECEIVE_ERROR_COUNTER     21
+#define E1000_RECEIVE_ERROR_MAX         0xFFFF
+
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+				      u16 words, u16 *data);
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+static s32 e1000_setup_link_82571(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static void e1000_clear_vfta_82571(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+static s32 e1000_led_on_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
+
+/**
+ *  e1000_init_phy_params_82571 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type = e1000_phy_none;
+		return 0;
+	}
+
+	phy->addr			 = 1;
+	phy->autoneg_mask		 = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us		 = 100;
+
+	phy->ops.power_up		 = e1000_power_up_phy_copper;
+	phy->ops.power_down		 = e1000_power_down_phy_copper_82571;
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		phy->type		 = e1000_phy_igp_2;
+		break;
+	case e1000_82573:
+		phy->type		 = e1000_phy_m88;
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		phy->type		 = e1000_phy_bm;
+		phy->ops.acquire = e1000_get_hw_semaphore_82574;
+		phy->ops.release = e1000_put_hw_semaphore_82574;
+		phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+		phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	/* This can only be done after all function pointers are setup. */
+	ret_val = e1000_get_phy_id_82571(hw);
+	if (ret_val) {
+		e_dbg("Error getting PHY ID\n");
+		return ret_val;
+	}
+
+	/* Verify phy id */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		if (phy->id != IGP01E1000_I_PHY_ID)
+			ret_val = -E1000_ERR_PHY;
+		break;
+	case e1000_82573:
+		if (phy->id != M88E1111_I_PHY_ID)
+			ret_val = -E1000_ERR_PHY;
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		if (phy->id != BME1000_E_PHY_ID_R2)
+			ret_val = -E1000_ERR_PHY;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u16 size;
+
+	nvm->opcode_bits = 8;
+	nvm->delay_usec = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (((eecd >> 15) & 0x3) == 0x3) {
+			nvm->type = e1000_nvm_flash_hw;
+			nvm->word_size = 2048;
+			/*
+			 * Autonomous Flash update bit must be cleared due
+			 * to Flash update issue.
+			 */
+			eecd &= ~E1000_EECD_AUPDEN;
+			ew32(EECD, eecd);
+			break;
+		}
+		fallthrough;
+	default:
+		nvm->type = e1000_nvm_eeprom_spi;
+		size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+				  E1000_EECD_SIZE_EX_SHIFT);
+		/*
+		 * Added to a constant, "size" becomes the left-shift value
+		 * for setting word_size.
+		 */
+		size += NVM_WORD_SIZE_BASE_SHIFT;
+
+		/* EEPROM access above 16k is unsupported */
+		if (size > 14)
+			size = 14;
+		nvm->word_size	= 1 << size;
+		break;
+	}
+
+	/* Function Pointers */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+		nvm->ops.release = e1000_put_hw_semaphore_82574;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_init_mac_params_82571 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_mac_operations *func = &mac->ops;
+	u32 swsm = 0;
+	u32 swsm2 = 0;
+	bool force_clear_smbi = false;
+
+	/* Set media type */
+	switch (adapter->pdev->device) {
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82572EI_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Adaptive IFS supported */
+	mac->adaptive_ifs = true;
+
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->setup_physical_interface = e1000_setup_copper_link_82571;
+		func->check_for_link = e1000e_check_for_copper_link;
+		func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
+		break;
+	case e1000_media_type_fiber:
+		func->setup_physical_interface =
+			e1000_setup_fiber_serdes_link_82571;
+		func->check_for_link = e1000e_check_for_fiber_link;
+		func->get_link_up_info =
+			e1000e_get_speed_and_duplex_fiber_serdes;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->setup_physical_interface =
+			e1000_setup_fiber_serdes_link_82571;
+		func->check_for_link = e1000_check_for_serdes_link_82571;
+		func->get_link_up_info =
+			e1000e_get_speed_and_duplex_fiber_serdes;
+		break;
+	default:
+		return -E1000_ERR_CONFIG;
+		break;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		func->set_lan_id = e1000_set_lan_id_single_port;
+		func->check_mng_mode = e1000e_check_mng_mode_generic;
+		func->led_on = e1000e_led_on_generic;
+		func->blink_led = e1000e_blink_led_generic;
+
+		/* FWSM register */
+		mac->has_fwsm = true;
+		/*
+		 * ARC supported; valid only if manageability features are
+		 * enabled.
+		 */
+		mac->arc_subsystem_valid =
+			(er32(FWSM) & E1000_FWSM_MODE_MASK)
+			? true : false;
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		func->set_lan_id = e1000_set_lan_id_single_port;
+		func->check_mng_mode = e1000_check_mng_mode_82574;
+		func->led_on = e1000_led_on_82574;
+		break;
+	default:
+		func->check_mng_mode = e1000e_check_mng_mode_generic;
+		func->led_on = e1000e_led_on_generic;
+		func->blink_led = e1000e_blink_led_generic;
+
+		/* FWSM register */
+		mac->has_fwsm = true;
+		break;
+	}
+
+	/*
+	 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+	 * first NVM or PHY access. This should be done for single-port
+	 * devices, and for one port only on dual-port devices so that
+	 * for those devices we can still use the SMBI lock to synchronize
+	 * inter-port accesses to the PHY & NVM.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		swsm2 = er32(SWSM2);
+
+		if (!(swsm2 & E1000_SWSM2_LOCK)) {
+			/* Only do this for the first interface on this card */
+			ew32(SWSM2,
+			    swsm2 | E1000_SWSM2_LOCK);
+			force_clear_smbi = true;
+		} else
+			force_clear_smbi = false;
+		break;
+	default:
+		force_clear_smbi = true;
+		break;
+	}
+
+	if (force_clear_smbi) {
+		/* Make sure SWSM.SMBI is clear */
+		swsm = er32(SWSM);
+		if (swsm & E1000_SWSM_SMBI) {
+			/* This bit should not be set on a first interface, and
+			 * indicates that the bootagent or EFI code has
+			 * improperly left this bit enabled
+			 */
+			e_dbg("Please update your 82571 Bootagent\n");
+		}
+		ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
+	}
+
+	/*
+	 * Initialize device specific counter of SMBI acquisition
+	 * timeouts.
+	 */
+	 hw->dev_spec.e82571.smb_counter = 0;
+
+	return 0;
+}
+
+static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	static int global_quad_port_a; /* global port a indication */
+	struct pci_dev *pdev = adapter->pdev;
+	int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
+	s32 rc;
+
+	rc = e1000_init_mac_params_82571(adapter);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_nvm_params_82571(hw);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_phy_params_82571(hw);
+	if (rc)
+		return rc;
+
+	/* tag quad port adapters first, it's used below */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+		adapter->flags |= FLAG_IS_QUAD_PORT;
+		/* mark the first port */
+		if (global_quad_port_a == 0)
+			adapter->flags |= FLAG_IS_QUAD_PORT_A;
+		/* Reset for multiple quad port adapters */
+		global_quad_port_a++;
+		if (global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	default:
+		break;
+	}
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82571:
+		/* these dual ports don't have WoL on port B at all */
+		if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
+		     (pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
+		     (pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
+		    (is_port_b))
+			adapter->flags &= ~FLAG_HAS_WOL;
+		/* quad ports only support WoL on port A */
+		if (adapter->flags & FLAG_IS_QUAD_PORT &&
+		    (!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
+			adapter->flags &= ~FLAG_HAS_WOL;
+		/* Does not support WoL on any port */
+		if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
+			adapter->flags &= ~FLAG_HAS_WOL;
+		break;
+	case e1000_82573:
+		if (pdev->device == E1000_DEV_ID_82573L) {
+			adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+			adapter->max_hw_frame_size = DEFAULT_JUMBO;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_id = 0;
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * The 82571 firmware may still be configuring the PHY.
+		 * In this case, we cannot access the PHY until the
+		 * configuration is done.  So we explicitly set the
+		 * PHY ID.
+		 */
+		phy->id = IGP01E1000_I_PHY_ID;
+		break;
+	case e1000_82573:
+		return e1000e_get_phy_id(hw);
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+		if (ret_val)
+			return ret_val;
+
+		phy->id = (u32)(phy_id << 16);
+		udelay(20);
+		ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+		if (ret_val)
+			return ret_val;
+
+		phy->id |= (u32)(phy_id);
+		phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 sw_timeout = hw->nvm.word_size + 1;
+	s32 fw_timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/*
+	 * If we have timedout 3 times on trying to acquire
+	 * the inter-port SMBI semaphore, there is old code
+	 * operating on the other port, and it is not
+	 * releasing SMBI. Modify the number of times that
+	 * we try for the semaphore to interwork with this
+	 * older code.
+	 */
+	if (hw->dev_spec.e82571.smb_counter > 2)
+		sw_timeout = 1;
+
+	/* Get the SW semaphore */
+	while (i < sw_timeout) {
+		swsm = er32(SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == sw_timeout) {
+		e_dbg("Driver can't access device - SMBI bit is set.\n");
+		hw->dev_spec.e82571.smb_counter++;
+	}
+	/* Get the FW semaphore. */
+	for (i = 0; i < fw_timeout; i++) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (er32(SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == fw_timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_82571(hw);
+		e_dbg("Driver can't access the NVM\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = er32(SWSM);
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+	ew32(SWSM, swsm);
+}
+/**
+ *  e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+	s32 ret_val = 0;
+	s32 i = 0;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+	extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+	do {
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+			break;
+
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+		usleep_range(2000, 4000);
+		i++;
+	} while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+	if (i == MDIO_OWNERSHIP_TIMEOUT) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_82573(hw);
+		e_dbg("Driver can't access the PHY\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ *  e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	mutex_lock(&swflag_mutex);
+	ret_val = e1000_get_hw_semaphore_82573(hw);
+	if (ret_val)
+		mutex_unlock(&swflag_mutex);
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+	e1000_put_hw_semaphore_82573(hw);
+	mutex_unlock(&swflag_mutex);
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.
+ *  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (active)
+		data |= E1000_PHY_CTRL_D0A_LPLU;
+	else
+		data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  when active is true, else clear lplu for D3. LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (!active) {
+		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+	} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_PHY_CTRL_NOND0A_LPLU;
+	}
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ *  Then for non-82573 hardware, set the EEPROM access request bit and wait
+ *  for EEPROM access grant bit.  If the access grant bit is not set, release
+ *  hardware semaphore.
+ **/
+static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = e1000_get_hw_semaphore_82571(hw);
+	if (ret_val)
+		return ret_val;
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		break;
+	default:
+		ret_val = e1000e_acquire_nvm(hw);
+		break;
+	}
+
+	if (ret_val)
+		e1000_put_hw_semaphore_82571(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+static void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+	e1000e_release_nvm(hw);
+	e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ *  e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000e_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+				 u16 *data)
+{
+	s32 ret_val;
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+		ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
+		break;
+	default:
+		ret_val = -E1000_ERR_NVM;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	u32 eecd;
+	s32 ret_val;
+	u16 i;
+
+	ret_val = e1000e_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * If our nvm is an EEPROM, then we're done
+	 * otherwise, commit the checksum to the flash NVM.
+	 */
+	if (hw->nvm.type != e1000_nvm_flash_hw)
+		return ret_val;
+
+	/* Check for pending operations. */
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		usleep_range(1000, 2000);
+		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES)
+		return -E1000_ERR_NVM;
+
+	/* Reset the firmware if using STM opcode. */
+	if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+		/*
+		 * The enabling of and the actual reset must be done
+		 * in two write cycles.
+		 */
+		ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
+		e1e_flush();
+		ew32(HICR, E1000_HICR_FW_RESET);
+	}
+
+	/* Commit the write to flash */
+	eecd = er32(EECD) | E1000_EECD_FLUPD;
+	ew32(EECD, eecd);
+
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		usleep_range(1000, 2000);
+		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES)
+		return -E1000_ERR_NVM;
+
+	return 0;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	if (hw->nvm.type == e1000_nvm_flash_hw)
+		e1000_fix_nvm_checksum_82571(hw);
+
+	return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ *  e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  After checking for invalid values, poll the EEPROM to ensure the previous
+ *  command has completed before trying to write the next word.  After write
+ *  poll for completion.
+ *
+ *  If e1000e_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+				      u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eewr = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
+		       ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+		       E1000_NVM_RW_REG_START;
+
+		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+
+		ew32(EEWR, eewr);
+
+		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_82571 - Poll for configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the management control register for the config done bit to be set.
+ **/
+static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+
+	while (timeout) {
+		if (er32(EEMNGCTL) &
+		    E1000_NVM_CFG_DONE_PORT_0)
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	if (!timeout) {
+		e_dbg("MNG configuration cycle has not completed.\n");
+		return -E1000_ERR_RESET;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When activating LPLU
+ *  this function also disables smart speed and vice versa.  LPLU will not be
+ *  activated unless the device autonegotiation advertisement meets standards
+ *  of either 10 or 10/100 or 10/100/1000 at all duplexes.  This is a function
+ *  pointer entry point only called by PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		if (ret_val)
+			return ret_val;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+		if (ret_val)
+			return ret_val;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_reset_hw_82571 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+	u32 ctrl, ctrl_ext;
+	s32 ret_val;
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000e_disable_pcie_master(hw);
+	if (ret_val)
+		e_dbg("PCI-E Master disable polling has failed.\n");
+
+	e_dbg("Masking off all interrupts\n");
+	ew32(IMC, 0xffffffff);
+
+	ew32(RCTL, 0);
+	ew32(TCTL, E1000_TCTL_PSP);
+	e1e_flush();
+
+	usleep_range(10000, 20000);
+
+	/*
+	 * Must acquire the MDIO ownership before MAC reset.
+	 * Ownership defaults to firmware after a reset.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82573:
+		ret_val = e1000_get_hw_semaphore_82573(hw);
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		ret_val = e1000_get_hw_semaphore_82574(hw);
+		break;
+	default:
+		break;
+	}
+	if (ret_val)
+		e_dbg("Cannot acquire MDIO ownership\n");
+
+	ctrl = er32(CTRL);
+
+	e_dbg("Issuing a global reset to MAC\n");
+	ew32(CTRL, ctrl | E1000_CTRL_RST);
+
+	/* Must release MDIO ownership and mutex after MAC reset. */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		e1000_put_hw_semaphore_82574(hw);
+		break;
+	default:
+		break;
+	}
+
+	if (hw->nvm.type == e1000_nvm_flash_hw) {
+		udelay(10);
+		ctrl_ext = er32(CTRL_EXT);
+		ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+		ew32(CTRL_EXT, ctrl_ext);
+		e1e_flush();
+	}
+
+	ret_val = e1000e_get_auto_rd_done(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		return ret_val;
+
+	/*
+	 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+	 * Need to wait for Phy configuration completion before accessing
+	 * NVM and Phy.
+	 */
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		msleep(25);
+		break;
+	default:
+		break;
+	}
+
+	/* Clear any pending interrupt events. */
+	ew32(IMC, 0xffffffff);
+	er32(ICR);
+
+	if (hw->mac.type == e1000_82571) {
+		/* Install any alternate MAC address into RAR0 */
+		ret_val = e1000_check_alt_mac_addr_generic(hw);
+		if (ret_val)
+			return ret_val;
+
+		e1000e_set_laa_state_82571(hw, true);
+	}
+
+	/* Reinitialize the 82571 serdes link state machine */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes)
+		hw->mac.serdes_link_state = e1000_serdes_link_down;
+
+	return 0;
+}
+
+/**
+ *  e1000_init_hw_82571 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+	e1000_initialize_hw_bits_82571(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000e_id_led_init(hw);
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+
+	/* Disabling VLAN filtering */
+	e_dbg("Initializing the IEEE VLAN\n");
+	mac->ops.clear_vfta(hw);
+
+	/* Setup the receive address. */
+	/*
+	 * If, however, a locally administered address was assigned to the
+	 * 82571, we must reserve a RAR for it to work around an issue where
+	 * resetting one port will reload the MAC on the other port.
+	 */
+	if (e1000e_get_laa_state_82571(hw))
+		rar_count--;
+	e1000e_init_rx_addrs(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	e_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link_82571(hw);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = er32(TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		   E1000_TXDCTL_FULL_TX_DESC_WB |
+		   E1000_TXDCTL_COUNT_DESC;
+	ew32(TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	switch (mac->type) {
+	case e1000_82573:
+		e1000e_enable_tx_pkt_filtering(hw);
+		fallthrough;
+	case e1000_82574:
+	case e1000_82583:
+		reg_data = er32(GCR);
+		reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+		ew32(GCR, reg_data);
+		break;
+	default:
+		reg_data = er32(TXDCTL(1));
+		reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+			   E1000_TXDCTL_FULL_TX_DESC_WB |
+			   E1000_TXDCTL_COUNT_DESC;
+		ew32(TXDCTL(1), reg_data);
+		break;
+	}
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82571(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	/* Transmit Descriptor Control 0 */
+	reg = er32(TXDCTL(0));
+	reg |= (1 << 22);
+	ew32(TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = er32(TXDCTL(1));
+	reg |= (1 << 22);
+	ew32(TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = er32(TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+		break;
+	default:
+		break;
+	}
+	ew32(TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = er32(TARC(1));
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg &= ~((1 << 29) | (1 << 30));
+		reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+		if (er32(TCTL) & E1000_TCTL_MULR)
+			reg &= ~(1 << 28);
+		else
+			reg |= (1 << 28);
+		ew32(TARC(1), reg);
+		break;
+	default:
+		break;
+	}
+
+	/* Device Control */
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		reg = er32(CTRL);
+		reg &= ~(1 << 29);
+		ew32(CTRL, reg);
+		break;
+	default:
+		break;
+	}
+
+	/* Extended Device Control */
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		reg = er32(CTRL_EXT);
+		reg &= ~(1 << 23);
+		reg |= (1 << 22);
+		ew32(CTRL_EXT, reg);
+		break;
+	default:
+		break;
+	}
+
+	if (hw->mac.type == e1000_82571) {
+		reg = er32(PBA_ECC);
+		reg |= E1000_PBA_ECC_CORR_EN;
+		ew32(PBA_ECC, reg);
+	}
+	/*
+	 * Workaround for hardware errata.
+	 * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+	 */
+
+        if ((hw->mac.type == e1000_82571) ||
+           (hw->mac.type == e1000_82572)) {
+                reg = er32(CTRL_EXT);
+                reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+                ew32(CTRL_EXT, reg);
+        }
+
+
+	/* PCI-Ex Control Registers */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		reg = er32(GCR);
+		reg |= (1 << 22);
+		ew32(GCR, reg);
+
+		/*
+		 * Workaround for hardware errata.
+		 * apply workaround for hardware errata documented in errata
+		 * docs Fixes issue where some error prone or unreliable PCIe
+		 * completions are occurring, particularly with ASPM enabled.
+		 * Without fix, issue can cause Tx timeouts.
+		 */
+		reg = er32(GCR2);
+		reg |= 1;
+		ew32(GCR2, reg);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ *  e1000_clear_vfta_82571 - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+	u32 offset;
+	u32 vfta_value = 0;
+	u32 vfta_offset = 0;
+	u32 vfta_bit_in_reg = 0;
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (hw->mng_cookie.vlan_id != 0) {
+			/*
+			 * The VFTA is a 4096b bit-field, each identifying
+			 * a single VLAN ID.  The following operations
+			 * determine which 32b entry (i.e. offset) into the
+			 * array we want to set the VLAN ID (i.e. bit) of
+			 * the manageability unit.
+			 */
+			vfta_offset = (hw->mng_cookie.vlan_id >>
+				       E1000_VFTA_ENTRY_SHIFT) &
+				      E1000_VFTA_ENTRY_MASK;
+			vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+					       E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+		}
+		break;
+	default:
+		break;
+	}
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		/*
+		 * If the offset we want to clear is the same offset of the
+		 * manageability VLAN ID, then clear all bits except that of
+		 * the manageability unit.
+		 */
+		vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+		e1e_flush();
+	}
+}
+
+/**
+ *  e1000_check_mng_mode_82574 - Check manageability is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the NVM Initialization Control Word 2 and returns true
+ *  (>0) if any manageability is enabled, else false (0).
+ **/
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+	u16 data;
+
+	e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+	return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ *  e1000_led_on_82574 - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+static s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	u32 i;
+
+	ctrl = hw->mac.ledctl_mode2;
+	if (!(E1000_STATUS_LU & er32(STATUS))) {
+		/*
+		 * If no link, then turn LED on by setting the invert bit
+		 * for each LED that's "on" (0x0E) in ledctl_mode2.
+		 */
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+	}
+	ew32(LEDCTL, ctrl);
+
+	return 0;
+}
+
+/**
+ *  e1000_check_phy_82574 - check 82574 phy hung state
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+	u16 status_1kbt = 0;
+	u16 receive_errors = 0;
+	bool phy_hung = false;
+	s32 ret_val = 0;
+
+	/*
+	 * Read PHY Receive Error counter first, if its is max - all F's then
+	 * read the Base1000T status register If both are max then PHY is hung.
+	 */
+	ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+
+	if (ret_val)
+		goto out;
+	if (receive_errors == E1000_RECEIVE_ERROR_MAX)  {
+		ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+		if (ret_val)
+			goto out;
+		if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+		    E1000_IDLE_ERROR_COUNT_MASK)
+			phy_hung = true;
+	}
+out:
+	return phy_hung;
+}
+
+/**
+ *  e1000_setup_link_82571 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+	/*
+	 * 82573 does not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (hw->fc.requested_mode == e1000_fc_default)
+			hw->fc.requested_mode = e1000_fc_full;
+		break;
+	default:
+		break;
+	}
+
+	return e1000e_setup_link(hw);
+}
+
+/**
+ *  e1000_setup_copper_link_82571 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ew32(CTRL, ctrl);
+
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
+	case e1000_phy_bm:
+		ret_val = e1000e_copper_link_setup_m88(hw);
+		break;
+	case e1000_phy_igp_2:
+		ret_val = e1000e_copper_link_setup_igp(hw);
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_setup_copper_link(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes links.
+ *  Upon successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * If SerDes loopback mode is entered, there is no form
+		 * of reset to take the adapter out of that mode.  So we
+		 * have to explicitly take the adapter out of loopback
+		 * mode.  This prevents drivers from twiddling their thumbs
+		 * if another tool failed to take it out of loopback mode.
+		 */
+		ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+		break;
+	default:
+		break;
+	}
+
+	return e1000e_setup_fiber_serdes_link(hw);
+}
+
+/**
+ *  e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Reports the link state as up or down.
+ *
+ *  If autonegotiation is supported by the link partner, the link state is
+ *  determined by the result of autonegotiation. This is the most likely case.
+ *  If autonegotiation is not supported by the link partner, and the link
+ *  has a valid signal, force the link up.
+ *
+ *  The link state is represented internally here by 4 states:
+ *
+ *  1) down
+ *  2) autoneg_progress
+ *  3) autoneg_complete (the link successfully autonegotiated)
+ *  4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
+ **/
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	u32 txcw;
+	u32 i;
+	s32 ret_val = 0;
+
+	ctrl = er32(CTRL);
+	status = er32(STATUS);
+	rxcw = er32(RXCW);
+
+	if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+		/* Receiver is synchronized with no invalid bits.  */
+		switch (mac->serdes_link_state) {
+		case e1000_serdes_link_autoneg_complete:
+			if (!(status & E1000_STATUS_LU)) {
+				/*
+				 * We have lost link, retry autoneg before
+				 * reporting link failure
+				 */
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
+				mac->serdes_has_link = false;
+				e_dbg("AN_UP     -> AN_PROG\n");
+			} else {
+				mac->serdes_has_link = true;
+			}
+			break;
+
+		case e1000_serdes_link_forced_up:
+			/*
+			 * If we are receiving /C/ ordered sets, re-enable
+			 * auto-negotiation in the TXCW register and disable
+			 * forced link in the Device Control register in an
+			 * attempt to auto-negotiate with our link partner.
+			 * If the partner code word is null, stop forcing
+			 * and restart auto negotiation.
+			 */
+			if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
+				/* Enable autoneg, and unforce link up */
+				ew32(TXCW, mac->txcw);
+				ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
+				mac->serdes_has_link = false;
+				e_dbg("FORCED_UP -> AN_PROG\n");
+			} else {
+				mac->serdes_has_link = true;
+			}
+			break;
+
+		case e1000_serdes_link_autoneg_progress:
+			if (rxcw & E1000_RXCW_C) {
+				/*
+				 * We received /C/ ordered sets, meaning the
+				 * link partner has autonegotiated, and we can
+				 * trust the Link Up (LU) status bit.
+				 */
+				if (status & E1000_STATUS_LU) {
+					mac->serdes_link_state =
+					    e1000_serdes_link_autoneg_complete;
+					e_dbg("AN_PROG   -> AN_UP\n");
+					mac->serdes_has_link = true;
+				} else {
+					/* Autoneg completed, but failed. */
+					mac->serdes_link_state =
+					    e1000_serdes_link_down;
+					e_dbg("AN_PROG   -> DOWN\n");
+				}
+			} else {
+				/*
+				 * The link partner did not autoneg.
+				 * Force link up and full duplex, and change
+				 * state to forced.
+				 */
+				ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+				ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+				ew32(CTRL, ctrl);
+
+				/* Configure Flow Control after link up. */
+				ret_val = e1000e_config_fc_after_link_up(hw);
+				if (ret_val) {
+					e_dbg("Error config flow control\n");
+					break;
+				}
+				mac->serdes_link_state =
+				    e1000_serdes_link_forced_up;
+				mac->serdes_has_link = true;
+				e_dbg("AN_PROG   -> FORCED_UP\n");
+			}
+			break;
+
+		case e1000_serdes_link_down:
+		default:
+			/*
+			 * The link was down but the receiver has now gained
+			 * valid sync, so lets see if we can bring the link
+			 * up.
+			 */
+			ew32(TXCW, mac->txcw);
+			ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+			mac->serdes_link_state =
+			    e1000_serdes_link_autoneg_progress;
+			mac->serdes_has_link = false;
+			e_dbg("DOWN      -> AN_PROG\n");
+			break;
+		}
+	} else {
+		if (!(rxcw & E1000_RXCW_SYNCH)) {
+			mac->serdes_has_link = false;
+			mac->serdes_link_state = e1000_serdes_link_down;
+			e_dbg("ANYSTATE  -> DOWN\n");
+		} else {
+			/*
+			 * Check several times, if Sync and Config
+			 * both are consistently 1 then simply ignore
+			 * the Invalid bit and restart Autoneg
+			 */
+			for (i = 0; i < AN_RETRY_COUNT; i++) {
+				udelay(10);
+				rxcw = er32(RXCW);
+				if ((rxcw & E1000_RXCW_IV) &&
+				    !((rxcw & E1000_RXCW_SYNCH) &&
+				      (rxcw & E1000_RXCW_C))) {
+					mac->serdes_has_link = false;
+					mac->serdes_link_state =
+					    e1000_serdes_link_down;
+					e_dbg("ANYSTATE  -> DOWN\n");
+					break;
+				}
+			}
+
+			if (i == AN_RETRY_COUNT) {
+				txcw = er32(TXCW);
+				txcw |= E1000_TXCW_ANE;
+				ew32(TXCW, txcw);
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
+				mac->serdes_has_link = false;
+				e_dbg("ANYSTATE  -> AN_PROG\n");
+			}
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_82571 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (*data == ID_LED_RESERVED_F746)
+			*data = ID_LED_DEFAULT_82573;
+		break;
+	default:
+		if (*data == ID_LED_RESERVED_0000 ||
+		    *data == ID_LED_RESERVED_FFFF)
+			*data = ID_LED_DEFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_laa_state_82571 - Get locally administered address state
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieve and return the current locally administered address state.
+ **/
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
+{
+	if (hw->mac.type != e1000_82571)
+		return false;
+
+	return hw->dev_spec.e82571.laa_is_present;
+}
+
+/**
+ *  e1000e_set_laa_state_82571 - Set locally administered address state
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable locally administered address
+ *
+ *  Enable/Disable the current locally administered address state.
+ **/
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+	if (hw->mac.type != e1000_82571)
+		return;
+
+	hw->dev_spec.e82571.laa_is_present = state;
+
+	/* If workaround is activated... */
+	if (state)
+		/*
+		 * Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered and the time it
+		 * gets fixed, the actual LAA is in one of the RARs and no
+		 * incoming packets directed to this port are dropped.
+		 * Eventually the LAA will be in RAR[0] and RAR[14].
+		 */
+		e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
+}
+
+/**
+ *  e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies that the EEPROM has completed the update.  After updating the
+ *  EEPROM, we need to check bit 15 in work 0x23 for the checksum fix.  If
+ *  the checksum fix is not implemented, we need to set the bit and update
+ *  the checksum.  Otherwise, if bit 15 is set and the checksum is incorrect,
+ *  we need to return bad checksum.
+ **/
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 data;
+
+	if (nvm->type != e1000_nvm_flash_hw)
+		return 0;
+
+	/*
+	 * Check bit 4 of word 10h.  If it is 0, firmware is done updating
+	 * 10h-12h.  Checksum may need to be fixed.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (!(data & 0x10)) {
+		/*
+		 * Read 0x23 and check bit 15.  This bit is a 1
+		 * when the checksum has already been fixed.  If
+		 * the checksum is still wrong and this bit is a
+		 * 1, we need to return bad checksum.  Otherwise,
+		 * we need to set this bit to a 1 and update the
+		 * checksum.
+		 */
+		ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
+		if (ret_val)
+			return ret_val;
+
+		if (!(data & 0x8000)) {
+			data |= 0x8000;
+			ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
+			if (ret_val)
+				return ret_val;
+			ret_val = e1000e_update_nvm_checksum(hw);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_read_mac_addr_82571 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (hw->mac.type == e1000_82571) {
+		/*
+		 * If there's an alternate MAC address place it in RAR0
+		 * so that it will override the Si installed default perm
+		 * address.
+		 */
+		ret_val = e1000_check_alt_mac_addr_generic(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!(phy->ops.check_reset_block))
+		return;
+
+	/* If the management interface is not enabled, then power down */
+	if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+	e1000e_clear_hw_cntrs_base(hw);
+
+	er32(PRC64);
+	er32(PRC127);
+	er32(PRC255);
+	er32(PRC511);
+	er32(PRC1023);
+	er32(PRC1522);
+	er32(PTC64);
+	er32(PTC127);
+	er32(PTC255);
+	er32(PTC511);
+	er32(PTC1023);
+	er32(PTC1522);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	er32(ICRXPTC);
+	er32(ICRXATC);
+	er32(ICTXPTC);
+	er32(ICTXATC);
+	er32(ICTXQEC);
+	er32(ICTXQMTC);
+	er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations e82571_mac_ops = {
+	/* .check_mng_mode: mac type dependent */
+	/* .check_for_link: media type dependent */
+	.id_led_init		= e1000e_id_led_init,
+	.cleanup_led		= e1000e_cleanup_led_generic,
+	.clear_hw_cntrs		= e1000_clear_hw_cntrs_82571,
+	.get_bus_info		= e1000e_get_bus_info_pcie,
+	.set_lan_id		= e1000_set_lan_id_multi_port_pcie,
+	/* .get_link_up_info: media type dependent */
+	/* .led_on: mac type dependent */
+	.led_off		= e1000e_led_off_generic,
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.write_vfta		= e1000_write_vfta_generic,
+	.clear_vfta		= e1000_clear_vfta_82571,
+	.reset_hw		= e1000_reset_hw_82571,
+	.init_hw		= e1000_init_hw_82571,
+	.setup_link		= e1000_setup_link_82571,
+	/* .setup_physical_interface: media type dependent */
+	.setup_led		= e1000e_setup_led_generic,
+	.read_mac_addr		= e1000_read_mac_addr_82571,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_igp = {
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_igp,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit			= NULL,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_igp,
+	.get_cfg_done		= e1000_get_cfg_done_82571,
+	.get_cable_length	= e1000e_get_cable_length_igp_2,
+	.get_info		= e1000e_get_phy_info_igp,
+	.read_reg		= e1000e_read_phy_reg_igp,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_reg		= e1000e_write_phy_reg_igp,
+	.cfg_on_link_up      	= NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_m88 = {
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_m88,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit			= e1000e_phy_sw_reset,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
+	.get_cfg_done		= e1000e_get_cfg_done,
+	.get_cable_length	= e1000e_get_cable_length_m88,
+	.get_info		= e1000e_get_phy_info_m88,
+	.read_reg		= e1000e_read_phy_reg_m88,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_reg		= e1000e_write_phy_reg_m88,
+	.cfg_on_link_up      	= NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_bm = {
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_m88,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit			= e1000e_phy_sw_reset,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
+	.get_cfg_done		= e1000e_get_cfg_done,
+	.get_cable_length	= e1000e_get_cable_length_m88,
+	.get_info		= e1000e_get_phy_info_m88,
+	.read_reg		= e1000e_read_phy_reg_bm2,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_reg		= e1000e_write_phy_reg_bm2,
+	.cfg_on_link_up      	= NULL,
+};
+
+static const struct e1000_nvm_operations e82571_nvm_ops = {
+	.acquire		= e1000_acquire_nvm_82571,
+	.read			= e1000e_read_nvm_eerd,
+	.release		= e1000_release_nvm_82571,
+	.update			= e1000_update_nvm_checksum_82571,
+	.valid_led_default	= e1000_valid_led_default_82571,
+	.validate		= e1000_validate_nvm_checksum_82571,
+	.write			= e1000_write_nvm_82571,
+};
+
+const struct e1000_info e1000_82571_info = {
+	.mac			= e1000_82571,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_RESET_OVERWRITES_LAA /* errata */
+				  | FLAG_TARC_SPEED_MODE_BIT /* errata */
+				  | FLAG_APME_CHECK_PORT_B,
+	.flags2			= FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+				  | FLAG2_DMA_BURST,
+	.pba			= 38,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_igp,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82572_info = {
+	.mac			= e1000_82572,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_TARC_SPEED_MODE_BIT, /* errata */
+	.flags2			= FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+				  | FLAG2_DMA_BURST,
+	.pba			= 38,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_igp,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82573_info = {
+	.mac			= e1000_82573,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_SWSM_ON_LOAD,
+	.flags2			= FLAG2_DISABLE_ASPM_L1
+				  | FLAG2_DISABLE_ASPM_L0S,
+	.pba			= 20,
+	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_m88,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82574_info = {
+	.mac			= e1000_82574,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_MSIX
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_CTRLEXT_ON_LOAD,
+	.flags2			  = FLAG2_CHECK_PHY_HANG
+				  | FLAG2_DISABLE_ASPM_L0S
+				  | FLAG2_NO_DISABLE_RX,
+	.pba			= 32,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_bm,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82583_info = {
+	.mac			= e1000_82583,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_CTRLEXT_ON_LOAD,
+	.flags2			= FLAG2_DISABLE_ASPM_L0S
+				  | FLAG2_NO_DISABLE_RX,
+	.pba			= 32,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_bm,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/80003es2lan.c	2022-03-21 12:58:29.556887333 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000.h"
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL	 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL	 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL	 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE	 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS	 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS	 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING	 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT	 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE		 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK		 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO	 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN	 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN	 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN	 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE	 0x0002 /* 1=Reversal Disab. */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK	 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI		 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX	 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO	 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG		 0x2000
+						/* 1=Reverse Auto-Negotiation */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK		 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5		 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25		 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25		 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX		 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH		 0x0007 /* 0 = <50M
+							   1 = 50-80M
+							   2 = 80-110M
+							   3 = 110-140M
+							   4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER		 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY  0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE	 0x0001
+					   /* 1=Enable SERDES Electrical Idle */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING			 0x0010 /* Disable Padding */
+
+/*
+ * A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] = {
+	 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_gg82563_cable_length_table)
+
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+                                            u16 *data);
+static s32  e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+                                             u16 data);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type	= e1000_phy_none;
+		return 0;
+	} else {
+		phy->ops.power_up = e1000_power_up_phy_copper;
+		phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+	}
+
+	phy->addr		= 1;
+	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us      = 100;
+	phy->type		= e1000_phy_gg82563;
+
+	/* This can only be done after all function pointers are setup. */
+	ret_val = e1000e_get_phy_id(hw);
+
+	/* Verify phy id */
+	if (phy->id != GG82563_E_PHY_ID)
+		return -E1000_ERR_PHY;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u16 size;
+
+	nvm->opcode_bits	= 8;
+	nvm->delay_usec	 = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	nvm->type = e1000_nvm_eeprom_spi;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+			  E1000_EECD_SIZE_EX_SHIFT);
+
+	/*
+	 * Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* EEPROM access above 16k is unsupported */
+	if (size > 14)
+		size = 14;
+	nvm->word_size	= 1 << size;
+
+	return 0;
+}
+
+/**
+ *  e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_mac_operations *func = &mac->ops;
+
+	/* Set media type */
+	switch (adapter->pdev->device) {
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* FWSM register */
+	mac->has_fwsm = true;
+	/* ARC supported; valid only if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (er32(FWSM) & E1000_FWSM_MODE_MASK)
+	                ? true : false;
+	/* Adaptive IFS not supported */
+	mac->adaptive_ifs = false;
+
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
+		func->check_for_link = e1000e_check_for_copper_link;
+		break;
+	case e1000_media_type_fiber:
+		func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+		func->check_for_link = e1000e_check_for_fiber_link;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+		func->check_for_link = e1000e_check_for_serdes_link;
+		break;
+	default:
+		return -E1000_ERR_CONFIG;
+		break;
+	}
+
+	/* set lan id for port to determine which phy lock to use */
+	hw->mac.ops.set_lan_id(hw);
+
+	return 0;
+}
+
+static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	s32 rc;
+
+	rc = e1000_init_mac_params_80003es2lan(adapter);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_nvm_params_80003es2lan(hw);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_phy_params_80003es2lan(hw);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/**
+ *  e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_80003es2lan - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = E1000_SWFW_CSR_SM;
+
+	return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
+ *  @hw: pointer to the HW structure
+ *
+ *  Release the semaphore used to access the Kumeran interface
+ **/
+static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = E1000_SWFW_CSR_SM;
+
+	e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the semaphore to access the EEPROM.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_acquire_nvm(hw);
+
+	if (ret_val)
+		e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Release the semaphore used to access the EEPROM.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	e1000e_release_nvm(hw);
+	e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 i = 0;
+	s32 timeout = 50;
+
+	while (i < timeout) {
+		if (e1000e_get_hw_semaphore(hw))
+			return -E1000_ERR_SWFW_SYNC;
+
+		swfw_sync = er32(SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/*
+		 * Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
+		e1000e_put_hw_semaphore(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		return -E1000_ERR_SWFW_SYNC;
+	}
+
+	swfw_sync |= swmask;
+	ew32(SW_FW_SYNC, swfw_sync);
+
+	e1000e_put_hw_semaphore(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (e1000e_get_hw_semaphore(hw) != 0)
+		; /* Empty */
+
+	swfw_sync = er32(SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	ew32(SW_FW_SYNC, swfw_sync);
+
+	e1000e_put_hw_semaphore(hw);
+}
+
+/**
+ *  e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: pointer to the data returned from the operation
+ *
+ *  Read the GG82563 PHY register.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+						  u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		return ret_val;
+	}
+
+	if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+		/*
+		 * The "ready" bit in the MDIC register may be incorrectly set
+		 * before the device has completed the "Page Select" MDI
+		 * transaction.  So we wait 200us after each MDI command...
+		 */
+		udelay(200);
+
+		/* ...and verify the command was successful. */
+		ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+		if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+			ret_val = -E1000_ERR_PHY;
+			e1000_release_phy_80003es2lan(hw);
+			return ret_val;
+		}
+
+		udelay(200);
+
+		ret_val = e1000e_read_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+
+		udelay(200);
+	} else {
+		ret_val = e1000e_read_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+	}
+
+	e1000_release_phy_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: value to write to the register
+ *
+ *  Write to the GG82563 PHY register.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+						   u32 offset, u16 data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		return ret_val;
+	}
+
+	if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+		/*
+		 * The "ready" bit in the MDIC register may be incorrectly set
+		 * before the device has completed the "Page Select" MDI
+		 * transaction.  So we wait 200us after each MDI command...
+		 */
+		udelay(200);
+
+		/* ...and verify the command was successful. */
+		ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+		if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+			e1000_release_phy_80003es2lan(hw);
+			return -E1000_ERR_PHY;
+		}
+
+		udelay(200);
+
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+
+		udelay(200);
+	} else {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+	}
+
+	e1000_release_phy_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @words: number of words to write
+ *  @data: buffer of data to write to the NVM
+ *
+ *  Write "words" of data to the ESB2 NVM.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+				       u16 words, u16 *data)
+{
+	return e1000e_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ *  e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ *  @hw: pointer to the HW structure
+ *
+ *  Wait a specific amount of time for manageability processes to complete.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+
+	while (timeout) {
+		if (er32(EEMNGCTL) & mask)
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	if (!timeout) {
+		e_dbg("MNG configuration cycle has not completed.\n");
+		return -E1000_ERR_RESET;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the speed and duplex settings onto the PHY.  This is a
+ *  function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+	ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("GG82563 PSCR: %X\n", phy_data);
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	udelay(1);
+
+	if (hw->phy.autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link "
+			 "on GG82563 phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+
+		if (!link) {
+			/*
+			 * We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000e_phy_reset_dsp(hw);
+			if (ret_val)
+				return ret_val;
+		}
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+	}
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Resetting the phy means we need to verify the TX_CLK corresponds
+	 * to the link speed.  10Mbps -> 2.5MHz, else 25MHz.
+	 */
+	phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+	if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+		phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+	else
+		phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_80003es2lan - Set approximate cable length
+ *  @hw: pointer to the HW structure
+ *
+ *  Find the approximate cable length as measured by the GG82563 PHY.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data, index;
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+	if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+	phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to speed buffer
+ *  @duplex: pointer to duplex buffer
+ *
+ *  Retrieve the current speed and duplex configuration.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+					      u16 *duplex)
+{
+	s32 ret_val;
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		ret_val = e1000e_get_speed_and_duplex_copper(hw,
+								    speed,
+								    duplex);
+		hw->phy.ops.cfg_on_link_up(hw);
+	} else {
+		ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
+								  speed,
+								  duplex);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Perform a global reset to the ESB2 controller.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000e_disable_pcie_master(hw);
+	if (ret_val)
+		e_dbg("PCI-E Master disable polling has failed.\n");
+
+	e_dbg("Masking off all interrupts\n");
+	ew32(IMC, 0xffffffff);
+
+	ew32(RCTL, 0);
+	ew32(TCTL, E1000_TCTL_PSP);
+	e1e_flush();
+
+	usleep_range(10000, 20000);
+
+	ctrl = er32(CTRL);
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	e_dbg("Issuing a global reset to MAC\n");
+	ew32(CTRL, ctrl | E1000_CTRL_RST);
+	e1000_release_phy_80003es2lan(hw);
+
+	ret_val = e1000e_get_auto_rd_done(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		return ret_val;
+
+	/* Clear any pending interrupt events. */
+	ew32(IMC, 0xffffffff);
+	er32(ICR);
+
+	ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 kum_reg_data;
+	u16 i;
+
+	e1000_initialize_hw_bits_80003es2lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000e_id_led_init(hw);
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+
+	/* Disabling VLAN filtering */
+	e_dbg("Initializing the IEEE VLAN\n");
+	mac->ops.clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	e_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000e_setup_link(hw);
+
+	/* Disable IBIST slave mode (far-end loopback) */
+	e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					&kum_reg_data);
+	kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+	e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					 kum_reg_data);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = er32(TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		   E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	ew32(TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	reg_data = er32(TXDCTL(1));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		   E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	ew32(TXDCTL(1), reg_data);
+
+	/* Enable retransmit on late collisions */
+	reg_data = er32(TCTL);
+	reg_data |= E1000_TCTL_RTLC;
+	ew32(TCTL, reg_data);
+
+	/* Configure Gigabit Carry Extend Padding */
+	reg_data = er32(TCTL_EXT);
+	reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+	reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+	ew32(TCTL_EXT, reg_data);
+
+	/* Configure Transmit Inter-Packet Gap */
+	reg_data = er32(TIPG);
+	reg_data &= ~E1000_TIPG_IPGT_MASK;
+	reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	ew32(TIPG, reg_data);
+
+	reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+	reg_data &= ~0x00100000;
+	E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+	/* default to true to enable the MDIC W/A */
+	hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
+
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+	                              E1000_KMRNCTRLSTA_OFFSET >>
+	                              E1000_KMRNCTRLSTA_OFFSET_SHIFT,
+	                              &i);
+	if (!ret_val) {
+		if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+		     E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+			hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
+	}
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	/* Transmit Descriptor Control 0 */
+	reg = er32(TXDCTL(0));
+	reg |= (1 << 22);
+	ew32(TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = er32(TXDCTL(1));
+	reg |= (1 << 22);
+	ew32(TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = er32(TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	if (hw->phy.media_type != e1000_media_type_copper)
+		reg &= ~(1 << 20);
+	ew32(TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = er32(TARC(1));
+	if (er32(TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	ew32(TARC(1), reg);
+}
+
+/**
+ *  e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ *  @hw: pointer to the HW structure
+ *
+ *  Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u32 ctrl_ext;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+	/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+	data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+	ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+	switch (phy->mdix) {
+	case 1:
+		data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+		break;
+	case 2:
+		data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+		break;
+	case 0:
+	default:
+		data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+		break;
+	}
+
+	/*
+	 * Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+	if (phy->disable_polarity_correction)
+		data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+	ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	/* SW Reset the PHY so all changes take effect */
+	ret_val = e1000e_commit_phy(hw);
+	if (ret_val) {
+		e_dbg("Error Resetting the PHY\n");
+		return ret_val;
+	}
+
+	/* Bypass Rx and Tx FIFO's */
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+					E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+					E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+					E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+				       E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+				       &data);
+	if (ret_val)
+		return ret_val;
+	data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+					E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+					data);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+	if (ret_val)
+		return ret_val;
+
+	data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+	ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
+	if (ret_val)
+		return ret_val;
+
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+	ew32(CTRL_EXT, ctrl_ext);
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Do not init these registers when the HW is in IAMT mode, since the
+	 * firmware will have already initialized them.  We only initialize
+	 * them if the HW is not in IAMT mode.
+	 */
+	if (!e1000e_check_mng_mode(hw)) {
+		/* Enable Electrical Idle on the PHY */
+		data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+		ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
+		if (ret_val)
+			return ret_val;
+
+		data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+		ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/*
+	 * Workaround: Disable padding in Kumeran interface in the MAC
+	 * and in the PHY to avoid CRC errors.
+	 */
+	ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data |= GG82563_ICR_DIS_PADDING;
+	ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/**
+ *  e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Essentially a wrapper for setting up all things "copper" related.
+ *  This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 reg_data;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ew32(CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each
+	 * iteration and increase the max iterations when
+	 * polling the phy; this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+	                                           0xFFFF);
+	if (ret_val)
+		return ret_val;
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+	                                          &reg_data);
+	if (ret_val)
+		return ret_val;
+	reg_data |= 0x3F;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+	                                           reg_data);
+	if (ret_val)
+		return ret_val;
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+				      E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+				      &reg_data);
+	if (ret_val)
+		return ret_val;
+	reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+					E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+					reg_data);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_setup_copper_link(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ *  @hw: pointer to the HW structure
+ *  @duplex: current duplex setting
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  10/100 operation.
+ **/
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 speed;
+	u16 duplex;
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
+		                                             &duplex);
+		if (ret_val)
+			return ret_val;
+
+		if (speed == SPEED_1000)
+			ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+		else
+			ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ *  @hw: pointer to the HW structure
+ *  @duplex: current duplex setting
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+	s32 ret_val;
+	u32 tipg;
+	u32 i = 0;
+	u16 reg_data, reg_data2;
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = er32(TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+	ew32(TIPG, tipg);
+
+	do {
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+		if (ret_val)
+			return ret_val;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	if (duplex == HALF_DUPLEX)
+		reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+	else
+		reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+	ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+	return 0;
+}
+
+/**
+ *  e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 reg_data, reg_data2;
+	u32 tipg;
+	u32 i = 0;
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = er32(TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	ew32(TIPG, tipg);
+
+	do {
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+		if (ret_val)
+			return ret_val;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+	ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquire semaphore, then read the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release the semaphore before exiting.
+ **/
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+					   u16 *data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	kmrnctrlsta = er32(KMRNCTRLSTA);
+	*data = (u16)kmrnctrlsta;
+
+	e1000_release_mac_csr_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquire semaphore, then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release semaphore
+ *  before exiting.
+ **/
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+					    u16 data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | data;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	e1000_release_mac_csr_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_80003es2lan - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/*
+	 * If there's an alternate MAC address place it in RAR0
+	 * so that it will override the Si installed default perm
+	 * address.
+	 */
+	ret_val = e1000_check_alt_mac_addr_generic(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(hw->mac.ops.check_mng_mode(hw) ||
+	      hw->phy.ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+}
+
+/**
+ *  e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+	e1000e_clear_hw_cntrs_base(hw);
+
+	er32(PRC64);
+	er32(PRC127);
+	er32(PRC255);
+	er32(PRC511);
+	er32(PRC1023);
+	er32(PRC1522);
+	er32(PTC64);
+	er32(PTC127);
+	er32(PTC255);
+	er32(PTC511);
+	er32(PTC1023);
+	er32(PTC1522);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	er32(ICRXPTC);
+	er32(ICRXATC);
+	er32(ICTXPTC);
+	er32(ICTXATC);
+	er32(ICTXQEC);
+	er32(ICTXQMTC);
+	er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations es2_mac_ops = {
+	.read_mac_addr		= e1000_read_mac_addr_80003es2lan,
+	.id_led_init		= e1000e_id_led_init,
+	.blink_led		= e1000e_blink_led_generic,
+	.check_mng_mode		= e1000e_check_mng_mode_generic,
+	/* check_for_link dependent on media type */
+	.cleanup_led		= e1000e_cleanup_led_generic,
+	.clear_hw_cntrs		= e1000_clear_hw_cntrs_80003es2lan,
+	.get_bus_info		= e1000e_get_bus_info_pcie,
+	.set_lan_id		= e1000_set_lan_id_multi_port_pcie,
+	.get_link_up_info	= e1000_get_link_up_info_80003es2lan,
+	.led_on			= e1000e_led_on_generic,
+	.led_off		= e1000e_led_off_generic,
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.write_vfta		= e1000_write_vfta_generic,
+	.clear_vfta		= e1000_clear_vfta_generic,
+	.reset_hw		= e1000_reset_hw_80003es2lan,
+	.init_hw		= e1000_init_hw_80003es2lan,
+	.setup_link		= e1000e_setup_link,
+	/* setup_physical_interface dependent on media type */
+	.setup_led		= e1000e_setup_led_generic,
+};
+
+static const struct e1000_phy_operations es2_phy_ops = {
+	.acquire		= e1000_acquire_phy_80003es2lan,
+	.check_polarity		= e1000_check_polarity_m88,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit		 	= e1000e_phy_sw_reset,
+	.force_speed_duplex 	= e1000_phy_force_speed_duplex_80003es2lan,
+	.get_cfg_done       	= e1000_get_cfg_done_80003es2lan,
+	.get_cable_length   	= e1000_get_cable_length_80003es2lan,
+	.get_info       	= e1000e_get_phy_info_m88,
+	.read_reg       	= e1000_read_phy_reg_gg82563_80003es2lan,
+	.release		= e1000_release_phy_80003es2lan,
+	.reset		  	= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state  	= NULL,
+	.set_d3_lplu_state  	= e1000e_set_d3_lplu_state,
+	.write_reg      	= e1000_write_phy_reg_gg82563_80003es2lan,
+	.cfg_on_link_up      	= e1000_cfg_on_link_up_80003es2lan,
+};
+
+static const struct e1000_nvm_operations es2_nvm_ops = {
+	.acquire		= e1000_acquire_nvm_80003es2lan,
+	.read			= e1000e_read_nvm_eerd,
+	.release		= e1000_release_nvm_80003es2lan,
+	.update			= e1000e_update_nvm_checksum_generic,
+	.valid_led_default	= e1000e_valid_led_default,
+	.validate		= e1000e_validate_nvm_checksum_generic,
+	.write			= e1000_write_nvm_80003es2lan,
+};
+
+const struct e1000_info e1000_es2_info = {
+	.mac			= e1000_80003es2lan,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_RX_NEEDS_RESTART /* errata */
+				  | FLAG_TARC_SET_BIT_ZERO /* errata */
+				  | FLAG_APME_CHECK_PORT_B
+				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+				  | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
+	.flags2			= FLAG2_DMA_BURST,
+	.pba			= 38,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_80003es2lan,
+	.mac_ops		= &es2_mac_ops,
+	.phy_ops		= &es2_phy_ops,
+	.nvm_ops		= &es2_nvm_ops,
+};
+
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/Makefile	2022-03-21 12:58:29.551887382 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/param.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000E) += rt_e1000e.o
+
+rt_e1000e-y := \
+	82571.o \
+	80003es2lan.o \
+	ich8lan.o \
+	lib.o \
+	netdev.o \
+	param.o \
+	phy.o
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/param.c	2022-03-21 12:58:29.546887430 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/ich8lan.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "e1000.h"
+
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+#define COPYBREAK_DEFAULT 256
+unsigned int copybreak = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+	"Maximum size of packet that is copied to a new buffer on receive");
+
+/*
+ * All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc)					\
+	static int X[E1000_MAX_NIC+1]		\
+		= E1000_PARAM_INIT;				\
+	static unsigned int num_##X;				\
+	module_param_array_named(X, X, int, &num_##X, 0);	\
+	MODULE_PARM_DESC(X, desc);
+
+/*
+ * Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non-zero
+ *
+ * Valid Range: 0-65535
+ * 
+ * Default Value: 0 for rtnet
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 0
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/*
+ * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ * 
+ * Default Value: 0 for rtnet
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 0
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/*
+ * Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/*
+ * Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/*
+ * Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ * 
+ * Default Value: 0 for rtnet
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 0
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0 - 2
+ *
+ * Default Value: 2 (MSI-X)
+ */
+E1000_PARAM(IntMode, "Interrupt Mode");
+#define MAX_INTMODE	2
+#define MIN_INTMODE	0
+
+/*
+ * Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/*
+ * Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+/*
+ * Write Protect NVM
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+
+/*
+ * Enable CRC Stripping
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
+                          "the CRC");
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int e1000_validate_option(unsigned int *value,
+					   const struct e1000_option *opt,
+					   struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			e_info("%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			e_info("%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			e_info("%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					e_info("%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
+	       opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+/**
+ * e1000e_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void e1000e_check_options(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int bd = adapter->bd_number;
+
+	if (bd >= E1000_MAX_NIC) {
+		e_notice("Warning: no configuration for board #%i\n", bd);
+		e_notice("Using defaults for all values\n");
+	}
+
+	{ /* Transmit Interrupt Delay */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY } }
+		};
+
+		if (num_TxIntDelay > bd) {
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY } }
+		};
+
+		if (num_TxAbsIntDelay > bd) {
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Interrupt Delay */
+		static struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY } }
+		};
+
+		if (num_RxIntDelay > bd) {
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY } }
+		};
+
+		if (num_RxAbsIntDelay > bd) {
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Interrupt Throttling Rate */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR } }
+		};
+
+		if (num_InterruptThrottleRate > bd) {
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				e_info("%s turned off\n", opt.name);
+				break;
+			case 1:
+				e_info("%s set to dynamic mode\n", opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				e_info("%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 4:
+				e_info("%s set to simplified (2000-8000 ints) "
+				       "mode\n", opt.name);
+				adapter->itr_setting = 4;
+				break;
+			default:
+				/*
+				 * Save the setting, because the dynamic bits
+				 * change itr.
+				 */
+				if (e1000_validate_option(&adapter->itr, &opt,
+							  adapter) &&
+				    (adapter->itr == 3)) {
+					/*
+					 * In case of invalid user value,
+					 * default to conservative mode.
+					 */
+					adapter->itr_setting = adapter->itr;
+					adapter->itr = 20000;
+				} else {
+					/*
+					 * Clear the lower two bits because
+					 * they are used as control.
+					 */
+					adapter->itr_setting =
+						adapter->itr & ~3;
+				}
+				break;
+			}
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 0;
+		}
+	}
+	{ /* Interrupt Mode */
+		static struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Mode",
+			.err  = "defaulting to 2 (MSI-X)",
+			.def  = E1000E_INT_MODE_MSIX,
+			.arg  = { .r = { .min = MIN_INTMODE,
+					 .max = MAX_INTMODE } }
+		};
+
+		if (num_IntMode > bd) {
+			unsigned int int_mode = IntMode[bd];
+			e1000_validate_option(&int_mode, &opt, adapter);
+			adapter->int_mode = int_mode;
+		} else {
+			adapter->int_mode = opt.def;
+		}
+	}
+	{ /* Smart Power Down */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+		if (num_SmartPowerDownEnable > bd) {
+			unsigned int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
+			    && spd)
+				adapter->flags |= FLAG_SMART_POWER_DOWN;
+		}
+	}
+	{ /* CRC Stripping */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "CRC Stripping",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_CrcStripping > bd) {
+			unsigned int crc_stripping = CrcStripping[bd];
+			e1000_validate_option(&crc_stripping, &opt, adapter);
+			if (crc_stripping == OPTION_ENABLED)
+				adapter->flags2 |= FLAG2_CRC_STRIPPING;
+		} else {
+			adapter->flags2 |= FLAG2_CRC_STRIPPING;
+		}
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_KumeranLockLoss > bd) {
+			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			if (hw->mac.type == e1000_ich8lan)
+				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+								kmrn_lock_loss);
+		} else {
+			if (hw->mac.type == e1000_ich8lan)
+				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+								       opt.def);
+		}
+	}
+	{ /* Write-protect NVM */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Write-protect NVM",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (adapter->flags & FLAG_IS_ICH) {
+			if (num_WriteProtectNVM > bd) {
+				unsigned int write_protect_nvm = WriteProtectNVM[bd];
+				e1000_validate_option(&write_protect_nvm, &opt,
+						      adapter);
+				if (write_protect_nvm)
+					adapter->flags |= FLAG_READ_ONLY_NVM;
+			} else {
+				if (opt.def)
+					adapter->flags |= FLAG_READ_ONLY_NVM;
+			}
+		}
+	}
+}
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/ich8lan.c	2022-03-21 12:58:29.541887479 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000e/e1000.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82562G 10/100 Network Connection
+ * 82562G-2 10/100 Network Connection
+ * 82562GT 10/100 Network Connection
+ * 82562GT-2 10/100 Network Connection
+ * 82562V 10/100 Network Connection
+ * 82562V-2 10/100 Network Connection
+ * 82566DC-2 Gigabit Network Connection
+ * 82566DC Gigabit Network Connection
+ * 82566DM-2 Gigabit Network Connection
+ * 82566DM Gigabit Network Connection
+ * 82566MC Gigabit Network Connection
+ * 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567V Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+#define ICH_FLASH_GFPREG		0x0000
+#define ICH_FLASH_HSFSTS		0x0004
+#define ICH_FLASH_HSFCTL		0x0006
+#define ICH_FLASH_FADDR			0x0008
+#define ICH_FLASH_FDATA0		0x0010
+#define ICH_FLASH_PR0			0x0074
+
+#define ICH_FLASH_READ_COMMAND_TIMEOUT	500
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT	500
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT	3000000
+#define ICH_FLASH_LINEAR_ADDR_MASK	0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT	10
+
+#define ICH_CYCLE_READ			0
+#define ICH_CYCLE_WRITE			2
+#define ICH_CYCLE_ERASE			3
+
+#define FLASH_GFPREG_BASE_MASK		0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT		12
+
+#define ICH_FLASH_SEG_SIZE_256		256
+#define ICH_FLASH_SEG_SIZE_4K		4096
+#define ICH_FLASH_SEG_SIZE_8K		8192
+#define ICH_FLASH_SEG_SIZE_64K		65536
+
+
+#define E1000_ICH_FWSM_RSPCIPHY	0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID		0x00008000
+
+#define E1000_ICH_MNG_IAMT_MODE		0x2
+
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+				 (ID_LED_DEF1_OFF2 <<  8) | \
+				 (ID_LED_DEF1_ON2  <<  4) | \
+				 (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD		0x13
+#define E1000_ICH_NVM_SIG_MASK		0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK    0xC0
+#define E1000_ICH_NVM_SIG_VALUE         0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT	1500
+
+#define E1000_FEXTNVM_SW_CONFIG		1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK    0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC  0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
+
+#define PCIE_ICH8_SNOOP_ALL		PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES		7
+#define E1000_PCH2_RAR_ENTRIES		5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES	12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+			   ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG  PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL    PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS	0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN	0x0200
+
+#define HV_LED_CONFIG		PHY_REG(768, 30) /* LED Configuration */
+
+#define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL		PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS	0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR            PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK       0x007F
+#define HV_SMB_ADDR_PEC_EN     0x0200
+#define HV_SMB_ADDR_VALID      0x0080
+#define HV_SMB_ADDR_FREQ_MASK           0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT      8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT     12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL		PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA	0x100
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL				PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK		0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT	0x80
+
+/* EMI Registers */
+#define I82579_EMI_ADDR         0x10
+#define I82579_EMI_DATA         0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805	/* in 40ns units + 40 ns base value */
+
+#define I217_EEE_ADVERTISEMENT  0x8001	/* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY     0x8002	/* IEEE MMD Register 7.61 */
+#define I217_EEE_100_SUPPORTED  (1 << 1)	/* 100BaseTx EEE supported */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
+#define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_MASK                0x1000
+#define I217_CGFREG                     PHY_REG(772, 29)
+#define I217_CGFREG_MASK                0x0002
+#define I217_MEMPWR                     PHY_REG(772, 26)
+#define I217_MEMPWR_MASK                0x0010
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP                     0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK       0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT      12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS            PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU       0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS    0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1  /* NVM Enable K1 bit */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL      PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW      0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA                  PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK    0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT   12
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+	struct ich8_hsfsts {
+		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
+		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
+		u16 dael       :1; /* bit 2 Direct Access error Log */
+		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
+		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
+		u16 reserved1  :2; /* bit 13:6 Reserved */
+		u16 reserved2  :6; /* bit 13:6 Reserved */
+		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
+	} hsf_status;
+	u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+	struct ich8_hsflctl {
+		u16 flcgo      :1;   /* 0 Flash Cycle Go */
+		u16 flcycle    :2;   /* 2:1 Flash Cycle */
+		u16 reserved   :5;   /* 7:3 Reserved  */
+		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
+		u16 flockdn    :6;   /* 15:10 Reserved */
+	} hsf_ctrl;
+	u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+	struct ich8_flracc {
+		u32 grra      :8; /* 0:7 GbE region Read Access */
+		u32 grwa      :8; /* 8:15 GbE region Write Access */
+		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
+		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
+	} hsf_flregacc;
+	u16 regval;
+};
+
+/* ICH Flash Protected Region */
+union ich8_flash_protected_range {
+	struct ich8_pr {
+		u32 base:13;     /* 0:12 Protected Range Base */
+		u32 reserved1:2; /* 13:14 Reserved */
+		u32 rpe:1;       /* 15 Read Protection Enable */
+		u32 limit:13;    /* 16:28 Protected Range Limit */
+		u32 reserved2:2; /* 29:30 Reserved */
+		u32 wpe:1;       /* 31 Write Protection Enable */
+	} range;
+	u32 regval;
+};
+
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+						u32 offset, u8 byte);
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 *data);
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u16 *data);
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 size, u16 *data);
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
+static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+
+static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
+{
+	return readw(hw->flash_address + reg);
+}
+
+static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
+{
+	return readl(hw->flash_address + reg);
+}
+
+static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
+{
+	writew(val, hw->flash_address + reg);
+}
+
+static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+	writel(val, hw->flash_address + reg);
+}
+
+#define er16flash(reg)		__er16flash(hw, (reg))
+#define er32flash(reg)		__er32flash(hw, (reg))
+#define ew16flash(reg,val)	__ew16flash(hw, (reg), (val))
+#define ew32flash(reg,val)	__ew32flash(hw, (reg), (val))
+
+static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+	ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+	ew32(CTRL, ctrl);
+	e1e_flush();
+	udelay(10);
+	ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+	ew32(CTRL, ctrl);
+}
+
+/**
+ *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 fwsm;
+	s32 ret_val = 0;
+
+	phy->addr                     = 1;
+	phy->reset_delay_us           = 100;
+
+	phy->ops.set_page             = e1000_set_page_igp;
+	phy->ops.read_reg             = e1000_read_phy_reg_hv;
+	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
+	phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
+	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
+	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
+	phy->ops.write_reg            = e1000_write_phy_reg_hv;
+	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
+	phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
+	phy->ops.power_up             = e1000_power_up_phy_copper;
+	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
+	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+	/*
+	 * The MAC-PHY interconnect may still be in SMBus mode
+	 * after Sx->S0.  If the manageability engine (ME) is
+	 * disabled, then toggle the LANPHYPC Value bit to force
+	 * the interconnect to PCIe mode.
+	 */
+	fwsm = er32(FWSM);
+	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
+		e1000_toggle_lanphypc_value_ich8lan(hw);
+		msleep(50);
+
+		/*
+		 * Gate automatic PHY configuration by hardware on
+		 * non-managed 82579
+		 */
+		if (hw->mac.type == e1000_pch2lan)
+			e1000_gate_hw_phy_config_ich8lan(hw, true);
+	}
+
+	/*
+	 * Reset the PHY before any access to it.  Doing so, ensures that
+	 * the PHY is in a known good state before we read/write PHY registers.
+	 * The generic reset is sufficient here, because we haven't determined
+	 * the PHY type yet.
+	 */
+	ret_val = e1000e_phy_hw_reset_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/* Ungate automatic PHY configuration on non-managed 82579 */
+	if ((hw->mac.type == e1000_pch2lan) &&
+	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+		usleep_range(10000, 20000);
+		e1000_gate_hw_phy_config_ich8lan(hw, false);
+	}
+
+	phy->id = e1000_phy_unknown;
+	switch (hw->mac.type) {
+	default:
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+			break;
+		fallthrough;
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		/*
+		 * In case the PHY needs to be in mdio slow mode,
+		 * set slow mode and try to get the PHY id again.
+		 */
+		ret_val = e1000_set_mdio_slow_mode_hv(hw);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+		break;
+	}
+	phy->type = e1000e_get_phy_type_from_id(phy->id);
+
+	switch (phy->type) {
+	case e1000_phy_82577:
+	case e1000_phy_82579:
+	case e1000_phy_i217:
+		phy->ops.check_polarity = e1000_check_polarity_82577;
+		phy->ops.force_speed_duplex =
+		    e1000_phy_force_speed_duplex_82577;
+		phy->ops.get_cable_length = e1000_get_cable_length_82577;
+		phy->ops.get_info = e1000_get_phy_info_82577;
+		phy->ops.commit = e1000e_phy_sw_reset;
+		break;
+	case e1000_phy_82578:
+		phy->ops.check_polarity = e1000_check_polarity_m88;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
+		phy->ops.get_info = e1000e_get_phy_info_m88;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 i = 0;
+
+	phy->addr			= 1;
+	phy->reset_delay_us		= 100;
+
+	phy->ops.power_up               = e1000_power_up_phy_copper;
+	phy->ops.power_down             = e1000_power_down_phy_copper_ich8lan;
+
+	/*
+	 * We may need to do this twice - once for IGP and if that fails,
+	 * we'll set BM func pointers and try again
+	 */
+	ret_val = e1000e_determine_phy_address(hw);
+	if (ret_val) {
+		phy->ops.write_reg = e1000e_write_phy_reg_bm;
+		phy->ops.read_reg  = e1000e_read_phy_reg_bm;
+		ret_val = e1000e_determine_phy_address(hw);
+		if (ret_val) {
+			e_dbg("Cannot determine PHY addr. Erroring out\n");
+			return ret_val;
+		}
+	}
+
+	phy->id = 0;
+	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
+	       (i++ < 100)) {
+		usleep_range(1000, 2000);
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Verify phy id */
+	switch (phy->id) {
+	case IGP03E1000_E_PHY_ID:
+		phy->type = e1000_phy_igp_3;
+		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
+		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
+		phy->ops.get_info = e1000e_get_phy_info_igp;
+		phy->ops.check_polarity = e1000_check_polarity_igp;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy->type = e1000_phy_ife;
+		phy->autoneg_mask = E1000_ALL_NOT_GIG;
+		phy->ops.get_info = e1000_get_phy_info_ife;
+		phy->ops.check_polarity = e1000_check_polarity_ife;
+		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+		break;
+	case BME1000_E_PHY_ID:
+		phy->type = e1000_phy_bm;
+		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		phy->ops.read_reg = e1000e_read_phy_reg_bm;
+		phy->ops.write_reg = e1000e_write_phy_reg_bm;
+		phy->ops.commit = e1000e_phy_sw_reset;
+		phy->ops.get_info = e1000e_get_phy_info_m88;
+		phy->ops.check_polarity = e1000_check_polarity_m88;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific NVM parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 gfpreg, sector_base_addr, sector_end_addr;
+	u16 i;
+
+	/* Can't read flash registers if the register set isn't mapped. */
+	if (!hw->flash_address) {
+		e_dbg("ERROR: Flash registers not mapped\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	nvm->type = e1000_nvm_flash_sw;
+
+	gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+	/*
+	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
+	 * Add 1 to sector_end_addr since this sector is included in
+	 * the overall size.
+	 */
+	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+	/* flash_base_addr is byte-aligned */
+	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+
+	/*
+	 * find total size of the NVM, then cut in half since the total
+	 * size represents two separate NVM banks.
+	 */
+	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
+				<< FLASH_SECTOR_ADDR_SHIFT;
+	nvm->flash_bank_size /= 2;
+	/* Adjust to word count */
+	nvm->flash_bank_size /= sizeof(u16);
+
+	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
+
+	/* Clear shadow ram */
+	for (i = 0; i < nvm->word_size; i++) {
+		dev_spec->shadow_ram[i].modified = false;
+		dev_spec->shadow_ram[i].value    = 0xFFFF;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific MAC parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+
+	/* Set media type function pointer */
+	hw->phy.media_type = e1000_media_type_copper;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 32;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+	if (mac->type == e1000_ich8lan)
+		mac->rar_entry_count--;
+	/* FWSM register */
+	mac->has_fwsm = true;
+	/* ARC subsystem not supported */
+	mac->arc_subsystem_valid = false;
+	/* Adaptive IFS supported */
+	mac->adaptive_ifs = true;
+
+	/* LED operations */
+	switch (mac->type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+	case e1000_ich10lan:
+		/* check management mode */
+		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
+		/* ID LED init */
+		mac->ops.id_led_init = e1000e_id_led_init;
+		/* blink LED */
+		mac->ops.blink_led = e1000e_blink_led_generic;
+		/* setup LED */
+		mac->ops.setup_led = e1000e_setup_led_generic;
+		/* cleanup LED */
+		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+		/* turn on/off LED */
+		mac->ops.led_on = e1000_led_on_ich8lan;
+		mac->ops.led_off = e1000_led_off_ich8lan;
+		break;
+	case e1000_pch_lpt:
+	case e1000_pchlan:
+	case e1000_pch2lan:
+		/* check management mode */
+		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
+		/* ID LED init */
+		mac->ops.id_led_init = e1000_id_led_init_pchlan;
+		/* setup LED */
+		mac->ops.setup_led = e1000_setup_led_pchlan;
+		/* cleanup LED */
+		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+		/* turn on/off LED */
+		mac->ops.led_on = e1000_led_on_pchlan;
+		mac->ops.led_off = e1000_led_off_pchlan;
+		break;
+	default:
+		break;
+	}
+
+	if (mac->type == e1000_pch_lpt) {
+		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+		mac->ops.rar_set = e1000_rar_set_pch_lpt;
+	}
+
+	/* Enable PCS Lock-loss workaround for ICH8 */
+	if (mac->type == e1000_ich8lan)
+		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+
+	/* Gate automatic PHY configuration by hardware on managed
+	 * 82579 and i217
+	 */
+	if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
+	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+		e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+	return 0;
+}
+
+/**
+ *  e1000_set_eee_pchlan - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
+ *  the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	s32 ret_val = 0;
+	u16 phy_reg;
+
+	if ((hw->phy.type != e1000_phy_82579) &&
+	    (hw->phy.type != e1000_phy_i217))
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+	if (ret_val)
+		return ret_val;
+
+	if (dev_spec->eee_disable)
+		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+	else
+		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+	ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+
+	if (ret_val)
+		return ret_val;
+
+	if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
+		/* Save off link partner's EEE ability */
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+					  I217_EEE_LP_ABILITY);
+		if (ret_val)
+			goto release;
+		e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
+
+		/* EEE is not supported in 100Half, so ignore partner's EEE
+		 * in 100 ability if full-duplex is not advertised.
+		 */
+		e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
+		if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
+			dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
+release:
+		hw->phy.ops.release(hw);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+	u16 phy_reg;
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = 0;
+		goto out;
+	}
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type == e1000_pchlan) {
+		ret_val = e1000_k1_gig_workaround_hv(hw, link);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Clear link partner's EEE ability */
+	hw->dev_spec.ich8lan.eee_lp_ability = 0;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = false;
+
+	switch (hw->mac.type) {
+	case e1000_pch2lan:
+		ret_val = e1000_k1_workaround_lv(hw);
+		if (ret_val)
+			goto out;
+		fallthrough;
+	case e1000_pchlan:
+		if (hw->phy.type == e1000_phy_82578) {
+			ret_val = e1000_link_stall_workaround_hv(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/*
+		 * Workaround for PCHx parts in half-duplex:
+		 * Set the number of preambles removed from the packet
+		 * when it is passed from the PHY to the MAC to prevent
+		 * the MAC from misinterpreting the packet type.
+		 */
+		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
+			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000e_check_downshift(hw);
+
+	/* Enable/Disable EEE after link up */
+	ret_val = e1000_set_eee_pchlan(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000e_config_collision_dist(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000e_config_fc_after_link_up(hw);
+	if (ret_val)
+		e_dbg("Error configuring flow control\n");
+
+out:
+	return ret_val;
+}
+
+static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	s32 rc;
+
+	rc = e1000_init_mac_params_ich8lan(adapter);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_nvm_params_ich8lan(hw);
+	if (rc)
+		return rc;
+
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+	case e1000_ich10lan:
+		rc = e1000_init_phy_params_ich8lan(hw);
+		break;
+	case e1000_pchlan:
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		rc = e1000_init_phy_params_pchlan(hw);
+		break;
+	default:
+		break;
+	}
+	if (rc)
+		return rc;
+
+	/*
+	 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
+	 */
+	if ((adapter->hw.phy.type == e1000_phy_ife) ||
+	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
+	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
+		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+		hw->mac.ops.blink_led = NULL;
+	}
+
+	if ((adapter->hw.mac.type == e1000_ich8lan) &&
+	    (adapter->hw.phy.type != e1000_phy_ife))
+		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+
+	/* Enable workaround for 82579 w/ ME enabled */
+	if ((adapter->hw.mac.type == e1000_pch2lan) &&
+	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
+
+	/* Disable EEE by default until IEEE802.3az spec is finalized */
+	if (adapter->flags2 & FLAG2_HAS_EEE)
+		adapter->hw.dev_spec.ich8lan.eee_disable = true;
+
+	return 0;
+}
+
+static DEFINE_MUTEX(nvm_mutex);
+
+/**
+ *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+	mutex_lock(&nvm_mutex);
+
+	return 0;
+}
+
+/**
+ *  e1000_release_nvm_ich8lan - Release NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+	mutex_unlock(&nvm_mutex);
+}
+
+/**
+ *  e1000_acquire_swflag_ich8lan - Acquire software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the software control flag for performing PHY and select
+ *  MAC CSR accesses.
+ **/
+static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = 0;
+
+	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
+			     &hw->adapter->state)) {
+		WARN(1, "e1000e: %s: contention for Phy access\n",
+		     hw->adapter->netdev->name);
+		return -E1000_ERR_PHY;
+	}
+
+	while (timeout) {
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+			break;
+
+		mdelay(1);
+		timeout--;
+	}
+
+	if (!timeout) {
+		e_dbg("SW has already locked the resource.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	timeout = SW_FLAG_TIMEOUT;
+
+	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+	while (timeout) {
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+			break;
+
+		mdelay(1);
+		timeout--;
+	}
+
+	if (!timeout) {
+		e_dbg("Failed to acquire the semaphore, FW or HW has it: "
+		      "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+		      er32(FWSM), extcnf_ctrl);
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	if (ret_val)
+		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swflag_ich8lan - Release software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the software control flag for performing PHY and select
+ *  MAC CSR accesses.
+ **/
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+
+	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+	} else {
+		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
+	}
+
+	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+}
+
+/**
+ *  e1000_check_mng_mode_ich8lan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has any manageability enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+	       ((fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_check_mng_mode_pchlan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has iAMT enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_rar_set_pch_lpt - Set receive address registers
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address register array at index to the address passed
+ *  in by addr. For LPT, RAR[0] is the base address register that is to
+ *  contain the MAC address. SHRA[0-10] are the shared receive address
+ *  registers that are shared between the Host and manageability engine (ME).
+ **/
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+	u32 wlock_mac;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	if (index == 0) {
+		ew32(RAL(index), rar_low);
+		e1e_flush();
+		ew32(RAH(index), rar_high);
+		e1e_flush();
+		return;
+	}
+
+	/* The manageability engine (ME) can lock certain SHRAR registers that
+	 * it is using - those registers are unavailable for use.
+	 */
+	if (index < hw->mac.rar_entry_count) {
+		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+		/* Check if all SHRAR registers are locked */
+		if (wlock_mac == 1)
+			goto out;
+
+		if ((wlock_mac == 0) || (index <= wlock_mac)) {
+			s32 ret_val;
+
+			ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+			if (ret_val)
+				goto out;
+
+			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
+			e1e_flush();
+			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
+			e1e_flush();
+
+			e1000_release_swflag_ich8lan(hw);
+
+			/* verify the register updates */
+			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
+				return;
+		}
+	}
+
+out:
+	e_dbg("Failed to write receive address at index %d\n", index);
+}
+
+/**
+ *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if firmware is blocking the reset of the PHY.
+ *  This is a function pointer entry point only called by
+ *  reset routines.
+ **/
+static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+
+	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+}
+
+/**
+ *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ *  @hw: pointer to the HW structure
+ *
+ *  Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+	u16 phy_data;
+	u32 strap = er32(STRAP);
+	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+	    E1000_STRAP_SMT_FREQ_SHIFT;
+	s32 ret_val = 0;
+
+	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~HV_SMB_ADDR_MASK;
+	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+
+	if (hw->phy.type == e1000_phy_i217) {
+		/* Restore SMBus frequency */
+		if (freq--) {
+			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
+			phy_data |= (freq & (1 << 0)) <<
+			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
+			phy_data |= (freq & (1 << 1)) <<
+			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
+		} else {
+			e_dbg("Unsupported SMB frequency in PHY\n");
+		}
+	}
+
+	ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ *  @hw:   pointer to the HW structure
+ *
+ *  SW should configure the LCD from the NVM extended configuration region
+ *  as a workaround for certain parts.
+ **/
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+	s32 ret_val = 0;
+	u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+	/*
+	 * Initialize the PHY from the NVM on ICH platforms.  This
+	 * is needed due to an issue where the NVM configuration is
+	 * not properly autoloaded after power transitions.
+	 * Therefore, after each PHY reset, we will load the
+	 * configuration data out of the NVM manually.
+	 */
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+		if (phy->type != e1000_phy_igp_3)
+			return ret_val;
+
+		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+			break;
+		}
+		fallthrough;
+	case e1000_pchlan:
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+		break;
+	default:
+		return ret_val;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	data = er32(FEXTNVM);
+	if (!(data & sw_cfg_mask))
+		goto out;
+
+	/*
+	 * Make sure HW does not configure LCD from PHY
+	 * extended configuration before SW configuration
+	 */
+	data = er32(EXTCNF_CTRL);
+	if ((hw->mac.type < e1000_pch2lan) &&
+	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
+		goto out;
+
+	cnf_size = er32(EXTCNF_SIZE);
+	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+	if (!cnf_size)
+		goto out;
+
+	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+	if (((hw->mac.type == e1000_pchlan) &&
+	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
+	    (hw->mac.type > e1000_pchlan)) {
+		/*
+		 * HW configures the SMBus address and LEDs when the
+		 * OEM and LCD Write Enable bits are set in the NVM.
+		 * When both NVM bits are cleared, SW will configure
+		 * them instead.
+		 */
+		ret_val = e1000_write_smbus_addr(hw);
+		if (ret_val)
+			goto out;
+
+		data = er32(LEDCTL);
+		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+							(u16)data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Configure LCD from extended configuration region. */
+
+	/* cnf_base_addr is in DWORD */
+	word_addr = (u16)(cnf_base_addr << 1);
+
+	for (i = 0; i < cnf_size; i++) {
+		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+					 &reg_data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+					 1, &reg_addr);
+		if (ret_val)
+			goto out;
+
+		/* Save off the PHY page for future writes. */
+		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+			phy_page = reg_data;
+			continue;
+		}
+
+		reg_addr &= PHY_REG_MASK;
+		reg_addr |= phy_page;
+
+		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+						    reg_data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000_k1_gig_workaround_hv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *  @link: link up bool flag
+ *
+ *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
+ *  If link is down, the function will restore the default K1 setting located
+ *  in the NVM.
+ **/
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+	s32 ret_val = 0;
+	u16 status_reg = 0;
+	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+	if (hw->mac.type != e1000_pchlan)
+		goto out;
+
+	/* Wrap the whole flow with the sw flag */
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+	if (link) {
+		if (hw->phy.type == e1000_phy_82578) {
+			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
+			                                          &status_reg);
+			if (ret_val)
+				goto release;
+
+			status_reg &= BM_CS_STATUS_LINK_UP |
+			              BM_CS_STATUS_RESOLVED |
+			              BM_CS_STATUS_SPEED_MASK;
+
+			if (status_reg == (BM_CS_STATUS_LINK_UP |
+			                   BM_CS_STATUS_RESOLVED |
+			                   BM_CS_STATUS_SPEED_1000))
+				k1_enable = false;
+		}
+
+		if (hw->phy.type == e1000_phy_82577) {
+			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
+			                                          &status_reg);
+			if (ret_val)
+				goto release;
+
+			status_reg &= HV_M_STATUS_LINK_UP |
+			              HV_M_STATUS_AUTONEG_COMPLETE |
+			              HV_M_STATUS_SPEED_MASK;
+
+			if (status_reg == (HV_M_STATUS_LINK_UP |
+			                   HV_M_STATUS_AUTONEG_COMPLETE |
+			                   HV_M_STATUS_SPEED_1000))
+				k1_enable = false;
+		}
+
+		/* Link stall fix for link up */
+		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+		                                           0x0100);
+		if (ret_val)
+			goto release;
+
+	} else {
+		/* Link stall fix for link down */
+		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+		                                           0x4100);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+	hw->phy.ops.release(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_configure_k1_ich8lan - Configure K1 power state
+ *  @hw: pointer to the HW structure
+ *  @enable: K1 state to configure
+ *
+ *  Configure the K1 power state based on the provided parameter.
+ *  Assumes semaphore already acquired.
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+	s32 ret_val = 0;
+	u32 ctrl_reg = 0;
+	u32 ctrl_ext = 0;
+	u32 reg = 0;
+	u16 kmrn_reg = 0;
+
+	ret_val = e1000e_read_kmrn_reg_locked(hw,
+	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
+	                                     &kmrn_reg);
+	if (ret_val)
+		goto out;
+
+	if (k1_enable)
+		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+	else
+		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+	ret_val = e1000e_write_kmrn_reg_locked(hw,
+	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
+	                                      kmrn_reg);
+	if (ret_val)
+		goto out;
+
+	udelay(20);
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_reg = er32(CTRL);
+
+	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+	reg |= E1000_CTRL_FRCSPD;
+	ew32(CTRL, reg);
+
+	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+	e1e_flush();
+	udelay(20);
+	ew32(CTRL, ctrl_reg);
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+	udelay(20);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ *  @hw:       pointer to the HW structure
+ *  @d0_state: boolean if entering d0 or d3 device state
+ *
+ *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
+ *  in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+	s32 ret_val = 0;
+	u32 mac_reg;
+	u16 oem_reg;
+
+	if (hw->mac.type < e1000_pchlan)
+		return ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	if (hw->mac.type == e1000_pchlan) {
+		mac_reg = er32(EXTCNF_CTRL);
+		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+			goto out;
+	}
+
+	mac_reg = er32(FEXTNVM);
+	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+		goto out;
+
+	mac_reg = er32(PHY_CTRL);
+
+	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+	if (ret_val)
+		goto out;
+
+	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+	if (d0_state) {
+		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+			oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+			oem_reg |= HV_OEM_BITS_LPLU;
+
+		/* Set Restart auto-neg to activate the bits */
+		if (!e1000_check_reset_block(hw))
+			oem_reg |= HV_OEM_BITS_RESTART_AN;
+	} else {
+		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
+			oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+			       E1000_PHY_CTRL_NOND0A_LPLU))
+			oem_reg |= HV_OEM_BITS_LPLU;
+	}
+
+	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+out:
+	hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+
+/**
+ *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ *  @hw:   pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data |= HV_KMRN_MDIO_SLOW;
+
+	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ *  done after every PHY reset.
+ **/
+static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 phy_data;
+
+	if (hw->mac.type != e1000_pchlan)
+		return ret_val;
+
+	/* Set MDIO slow mode before any other MDIO access */
+	if (hw->phy.type == e1000_phy_82577) {
+		ret_val = e1000_set_mdio_slow_mode_hv(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (((hw->phy.type == e1000_phy_82577) &&
+	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+		/* Disable generation of early preamble */
+		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
+		if (ret_val)
+			return ret_val;
+
+		/* Preamble tuning for SSC */
+		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
+		if (ret_val)
+			return ret_val;
+	}
+
+	if (hw->phy.type == e1000_phy_82578) {
+		/*
+		 * Return registers to default by doing a soft reset then
+		 * writing 0x3140 to the control register.
+		 */
+		if (hw->phy.revision < 2) {
+			e1000e_phy_sw_reset(hw);
+			ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+		}
+	}
+
+	/* Select page 0 */
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	hw->phy.addr = 1;
+	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+	hw->phy.ops.release(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Configure the K1 Si workaround during phy reset assuming there is
+	 * link so that it disables K1 if link is in 1Gbps.
+	 */
+	ret_val = e1000_k1_gig_workaround_hv(hw, true);
+	if (ret_val)
+		goto out;
+
+	/* Workaround for link disconnects on a busy hub in half duplex */
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+	if (ret_val)
+		goto release;
+	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+					       phy_data & 0x00FF);
+release:
+	hw->phy.ops.release(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ *  @hw:   pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+	u32 mac_reg;
+	u16 i, phy_reg = 0;
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return;
+	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+	if (ret_val)
+		goto release;
+
+	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+		mac_reg = er32(RAL(i));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+					   (u16)((mac_reg >> 16) & 0xFFFF));
+
+		mac_reg = er32(RAH(i));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+					   (u16)((mac_reg & E1000_RAH_AV)
+						 >> 16));
+	}
+
+	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+	hw->phy.ops.release(hw);
+}
+
+/**
+ *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ *  with 82579 PHY
+ *  @hw: pointer to the HW structure
+ *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+	s32 ret_val = 0;
+	u16 phy_reg, data;
+	u32 mac_reg;
+	u16 i;
+
+	if (hw->mac.type < e1000_pch2lan)
+		goto out;
+
+	/* disable Rx path while enabling/disabling workaround */
+	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+	if (ret_val)
+		goto out;
+
+	if (enable) {
+		/*
+		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+		 * SHRAL/H) and initial CRC values to the MAC
+		 */
+		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+			u8 mac_addr[ETH_ALEN] = {0};
+			u32 addr_high, addr_low;
+
+			addr_high = er32(RAH(i));
+			if (!(addr_high & E1000_RAH_AV))
+				continue;
+			addr_low = er32(RAL(i));
+			mac_addr[0] = (addr_low & 0xFF);
+			mac_addr[1] = ((addr_low >> 8) & 0xFF);
+			mac_addr[2] = ((addr_low >> 16) & 0xFF);
+			mac_addr[3] = ((addr_low >> 24) & 0xFF);
+			mac_addr[4] = (addr_high & 0xFF);
+			mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
+		}
+
+		/* Write Rx addresses to the PHY */
+		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+		/* Enable jumbo frame workaround in the MAC */
+		mac_reg = er32(FFLT_DBG);
+		mac_reg &= ~(1 << 14);
+		mac_reg |= (7 << 15);
+		ew32(FFLT_DBG, mac_reg);
+
+		mac_reg = er32(RCTL);
+		mac_reg |= E1000_RCTL_SECRC;
+		ew32(RCTL, mac_reg);
+
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						&data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						data | (1 << 0));
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						&data);
+		if (ret_val)
+			goto out;
+		data &= ~(0xF << 8);
+		data |= (0xB << 8);
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						data);
+		if (ret_val)
+			goto out;
+
+		/* Enable jumbo frame workaround in the PHY */
+		e1e_rphy(hw, PHY_REG(769, 23), &data);
+		data &= ~(0x7F << 5);
+		data |= (0x37 << 5);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 16), &data);
+		data &= ~(1 << 13);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(776, 20), &data);
+		data &= ~(0x3FF << 2);
+		data |= (0x1A << 2);
+		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+		if (ret_val)
+			goto out;
+		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, HV_PM_CTRL, &data);
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+		if (ret_val)
+			goto out;
+	} else {
+		/* Write MAC register values back to h/w defaults */
+		mac_reg = er32(FFLT_DBG);
+		mac_reg &= ~(0xF << 14);
+		ew32(FFLT_DBG, mac_reg);
+
+		mac_reg = er32(RCTL);
+		mac_reg &= ~E1000_RCTL_SECRC;
+		ew32(RCTL, mac_reg);
+
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						&data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						data & ~(1 << 0));
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						&data);
+		if (ret_val)
+			goto out;
+		data &= ~(0xF << 8);
+		data |= (0xB << 8);
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						data);
+		if (ret_val)
+			goto out;
+
+		/* Write PHY register values back to h/w defaults */
+		e1e_rphy(hw, PHY_REG(769, 23), &data);
+		data &= ~(0x7F << 5);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 16), &data);
+		data |= (1 << 13);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(776, 20), &data);
+		data &= ~(0x3FF << 2);
+		data |= (0x8 << 2);
+		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+		if (ret_val)
+			goto out;
+		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, HV_PM_CTRL, &data);
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+		if (ret_val)
+			goto out;
+	}
+
+	/* re-enable Rx path after enabling/disabling workaround */
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ *  done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (hw->mac.type < e1000_pch2lan)
+		goto out;
+
+	/* Set MDIO slow mode before any other MDIO access */
+	ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_k1_gig_workaround_lv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *
+ *  Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 status_reg = 0;
+	u32 mac_reg;
+	u16 phy_reg;
+
+	if (hw->mac.type != e1000_pch2lan)
+		goto out;
+
+	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
+	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+	if (ret_val)
+		goto out;
+
+	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+		mac_reg = er32(FEXTNVM4);
+		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+		ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+		if (ret_val)
+			goto out;
+
+		if (status_reg & HV_M_STATUS_SPEED_1000) {
+			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+		} else {
+			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+		}
+		ew32(FEXTNVM4, mac_reg);
+		ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ *  @hw:   pointer to the HW structure
+ *  @gate: boolean set to true to gate, false to ungate
+ *
+ *  Gate/ungate the automatic PHY configuration via hardware; perform
+ *  the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+	u32 extcnf_ctrl;
+
+	if (hw->mac.type != e1000_pch2lan)
+		return;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+
+	if (gate)
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+	else
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+	return;
+}
+
+/**
+ *  e1000_lan_init_done_ich8lan - Check for PHY config completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check the appropriate indication the MAC has finished configuring the
+ *  PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+	/* Wait for basic configuration completes before proceeding */
+	do {
+		data = er32(STATUS);
+		data &= E1000_STATUS_LAN_INIT_DONE;
+		udelay(100);
+	} while ((!data) && --loop);
+
+	/*
+	 * If basic configuration is incomplete before the above loop
+	 * count reaches 0, loading the configuration from NVM will
+	 * leave the PHY in a bad state possibly resulting in no link.
+	 */
+	if (loop == 0)
+		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
+
+	/* Clear the Init Done bit for the next init event */
+	data = er32(STATUS);
+	data &= ~E1000_STATUS_LAN_INIT_DONE;
+	ew32(STATUS, data);
+}
+
+/**
+ *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 reg;
+
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	/* Allow time for h/w to get to quiescent state after reset */
+	usleep_range(10000, 20000);
+
+	/* Perform any necessary post-reset workarounds */
+	switch (hw->mac.type) {
+	case e1000_pchlan:
+		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+		if (ret_val)
+			goto out;
+		break;
+	case e1000_pch2lan:
+		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+		if (ret_val)
+			goto out;
+		break;
+	default:
+		break;
+	}
+
+	/* Clear the host wakeup bit after lcd reset */
+	if (hw->mac.type >= e1000_pchlan) {
+		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
+		reg &= ~BM_WUC_HOST_WU_BIT;
+		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
+	}
+
+	/* Configure the LCD with the extended configuration region in NVM */
+	ret_val = e1000_sw_lcd_config_ich8lan(hw);
+	if (ret_val)
+		goto out;
+
+	/* Configure the LCD with the OEM bits in NVM */
+	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+
+	if (hw->mac.type == e1000_pch2lan) {
+		/* Ungate automatic PHY configuration on non-managed 82579 */
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+			usleep_range(10000, 20000);
+			e1000_gate_hw_phy_config_ich8lan(hw, false);
+		}
+
+		/* Set EEE LPI Update Timer to 200usec */
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+						       I82579_LPI_UPDATE_TIMER);
+		if (ret_val)
+			goto release;
+		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+						       0x1387);
+release:
+		hw->phy.ops.release(hw);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY
+ *  This is a function pointer entry point called by drivers
+ *  or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
+	if ((hw->mac.type == e1000_pch2lan) &&
+	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+		e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+	ret_val = e1000e_phy_hw_reset_generic(hw);
+	if (ret_val)
+		return ret_val;
+
+	return e1000_post_phy_reset_ich8lan(hw);
+}
+
+/**
+ *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
+ *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ *  the phy speed. This function will manually set the LPLU bit and restart
+ *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ *  since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+	s32 ret_val = 0;
+	u16 oem_reg;
+
+	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+	if (ret_val)
+		goto out;
+
+	if (active)
+		oem_reg |= HV_OEM_BITS_LPLU;
+	else
+		oem_reg &= ~HV_OEM_BITS_LPLU;
+
+	oem_reg |= HV_OEM_BITS_RESTART_AN;
+	ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val = 0;
+	u16 data;
+
+	if (phy->type == e1000_phy_ife)
+		return ret_val;
+
+	phy_ctrl = er32(PHY_CTRL);
+
+	if (active) {
+		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000e_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+		if (ret_val)
+			return ret_val;
+	} else {
+		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D3 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val;
+	u16 data;
+
+	phy_ctrl = er32(PHY_CTRL);
+
+	if (!active) {
+		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000e_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		if (ret_val)
+			return ret_val;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ *  @hw: pointer to the HW structure
+ *  @bank:  pointer to the variable that returns the active bank
+ *
+ *  Reads signature byte from the NVM using the flash access registers.
+ *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+	u32 eecd;
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+	u8 sig_byte = 0;
+	s32 ret_val = 0;
+
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		eecd = er32(EECD);
+		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
+		    E1000_EECD_SEC1VAL_VALID_MASK) {
+			if (eecd & E1000_EECD_SEC1VAL)
+				*bank = 1;
+			else
+				*bank = 0;
+
+			return 0;
+		}
+		e_dbg("Unable to determine valid NVM bank via EEC - "
+		       "reading flash signature\n");
+		fallthrough;
+	default:
+		/* set bank to 0 in case flash read fails */
+		*bank = 0;
+
+		/* Check bank 0 */
+		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
+		                                        &sig_byte);
+		if (ret_val)
+			return ret_val;
+		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+		    E1000_ICH_NVM_SIG_VALUE) {
+			*bank = 0;
+			return 0;
+		}
+
+		/* Check bank 1 */
+		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
+		                                        bank1_offset,
+		                                        &sig_byte);
+		if (ret_val)
+			return ret_val;
+		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+		    E1000_ICH_NVM_SIG_VALUE) {
+			*bank = 1;
+			return 0;
+		}
+
+		e_dbg("ERROR: No valid NVM bank present\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to read.
+ *  @words: Size of data to read in words
+ *  @data: Pointer to the word(s) to read at offset.
+ *
+ *  Reads a word(s) from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 act_offset;
+	s32 ret_val = 0;
+	u32 bank = 0;
+	u16 i, word;
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	nvm->ops.acquire(hw);
+
+	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val) {
+		e_dbg("Could not detect valid bank, assuming bank 0\n");
+		bank = 0;
+	}
+
+	act_offset = (bank) ? nvm->flash_bank_size : 0;
+	act_offset += offset;
+
+	ret_val = 0;
+	for (i = 0; i < words; i++) {
+		if (dev_spec->shadow_ram[offset+i].modified) {
+			data[i] = dev_spec->shadow_ram[offset+i].value;
+		} else {
+			ret_val = e1000_read_flash_word_ich8lan(hw,
+								act_offset + i,
+								&word);
+			if (ret_val)
+				break;
+			data[i] = word;
+		}
+	}
+
+	nvm->ops.release(hw);
+
+out:
+	if (ret_val)
+		e_dbg("NVM read error: %d\n", ret_val);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_init_ich8lan - Initialize flash
+ *  @hw: pointer to the HW structure
+ *
+ *  This function does initial flash setup so that a new read/write/erase cycle
+ *  can be started.
+ **/
+static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+
+	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+	/* Check if the flash descriptor is valid */
+	if (hsfsts.hsf_status.fldesvalid == 0) {
+		e_dbg("Flash descriptor invalid.  "
+			 "SW Sequencing must be used.\n");
+		return -E1000_ERR_NVM;
+	}
+
+	/* Clear FCERR and DAEL in hw status by writing 1 */
+	hsfsts.hsf_status.flcerr = 1;
+	hsfsts.hsf_status.dael = 1;
+
+	ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+	/*
+	 * Either we should have a hardware SPI cycle in progress
+	 * bit to check against, in order to start a new cycle or
+	 * FDONE bit should be changed in the hardware so that it
+	 * is 1 after hardware reset, which can then be used as an
+	 * indication whether a cycle is in progress or has been
+	 * completed.
+	 */
+
+	if (hsfsts.hsf_status.flcinprog == 0) {
+		/*
+		 * There is no cycle running at present,
+		 * so we can start a cycle.
+		 * Begin by setting Flash Cycle Done.
+		 */
+		hsfsts.hsf_status.flcdone = 1;
+		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+		ret_val = 0;
+	} else {
+		s32 i = 0;
+
+		/*
+		 * Otherwise poll for sometime so the current
+		 * cycle has a chance to end before giving up.
+		 */
+		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+			hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcinprog == 0) {
+				ret_val = 0;
+				break;
+			}
+			udelay(1);
+		}
+		if (ret_val == 0) {
+			/*
+			 * Successful in waiting for previous cycle to timeout,
+			 * now set the Flash Cycle Done.
+			 */
+			hsfsts.hsf_status.flcdone = 1;
+			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+		} else {
+			e_dbg("Flash controller busy, cannot get access\n");
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ *  @hw: pointer to the HW structure
+ *  @timeout: maximum time to wait for completion
+ *
+ *  This function starts a flash cycle and waits for its completion.
+ **/
+static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+	union ich8_hws_flash_ctrl hsflctl;
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i = 0;
+
+	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+	hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+	hsflctl.hsf_ctrl.flcgo = 1;
+	ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+	/* wait till FDONE bit is set to 1 */
+	do {
+		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+		if (hsfsts.hsf_status.flcdone == 1)
+			break;
+		udelay(1);
+	} while (i++ < timeout);
+
+	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+		return 0;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_flash_word_ich8lan - Read word from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: offset to data location
+ *  @data: pointer to the location for storing the data
+ *
+ *  Reads the flash word at offset into data.  Offset is converted
+ *  to bytes before read.
+ **/
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u16 *data)
+{
+	/* Must convert offset into bytes. */
+	offset <<= 1;
+
+	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+}
+
+/**
+ *  e1000_read_flash_byte_ich8lan - Read byte from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to read.
+ *  @data: Pointer to a byte to store the value read.
+ *
+ *  Reads a single byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 *data)
+{
+	s32 ret_val;
+	u16 word = 0;
+
+	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+	if (ret_val)
+		return ret_val;
+
+	*data = (u8)word;
+
+	return 0;
+}
+
+/**
+ *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte or word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: Pointer to the word to store the value read.
+ *
+ *  Reads a byte or word from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 size, u16 *data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+	u8 count = 0;
+
+	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		return -E1000_ERR_NVM;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+			    hw->nvm.flash_base_addr;
+
+	do {
+		udelay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val != 0)
+			break;
+
+		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size - 1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+						ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+		/*
+		 * Check if FCERR is set to 1, if set to 1, clear it
+		 * and try the whole sequence a few more times, else
+		 * read in (shift in) the Flash Data0, the order is
+		 * least significant byte first msb to lsb
+		 */
+		if (ret_val == 0) {
+			flash_data = er32flash(ICH_FLASH_FDATA0);
+			if (size == 1)
+				*data = (u8)(flash_data & 0x000000FF);
+			else if (size == 2)
+				*data = (u16)(flash_data & 0x0000FFFF);
+			break;
+		} else {
+			/*
+			 * If we've gotten here, then things are probably
+			 * completely hosed, but if the error condition is
+			 * detected, it won't hurt to give it another try...
+			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+			 */
+			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1) {
+				/* Repeat for some time before giving up. */
+				continue;
+			} else if (hsfsts.hsf_status.flcdone == 0) {
+				e_dbg("Timeout error - flash cycle "
+					 "did not complete.\n");
+				break;
+			}
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to write.
+ *  @words: Size of data to write in words
+ *  @data: Pointer to the word(s) to write at offset.
+ *
+ *  Writes a byte or word to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u16 i;
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	nvm->ops.acquire(hw);
+
+	for (i = 0; i < words; i++) {
+		dev_spec->shadow_ram[offset+i].modified = true;
+		dev_spec->shadow_ram[offset+i].value = data[i];
+	}
+
+	nvm->ops.release(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  The NVM checksum is updated by calling the generic update_nvm_checksum,
+ *  which writes the checksum to the shadow ram.  The changes in the shadow
+ *  ram are then committed to the EEPROM by processing each bank at a time
+ *  checking for the modified bit and writing only the pending changes.
+ *  After a successful commit, the shadow ram is cleared and is ready for
+ *  future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1000e_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		goto out;
+
+	if (nvm->type != e1000_nvm_flash_sw)
+		goto out;
+
+	nvm->ops.acquire(hw);
+
+	/*
+	 * We're writing to the opposite bank so if we're on bank 1,
+	 * write to bank 0 etc.  We also need to erase the segment that
+	 * is going to be written
+	 */
+	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val) {
+		e_dbg("Could not detect valid bank, assuming bank 0\n");
+		bank = 0;
+	}
+
+	if (bank == 0) {
+		new_bank_offset = nvm->flash_bank_size;
+		old_bank_offset = 0;
+		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+		if (ret_val)
+			goto release;
+	} else {
+		old_bank_offset = nvm->flash_bank_size;
+		new_bank_offset = 0;
+		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+		if (ret_val)
+			goto release;
+	}
+
+	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+		/*
+		 * Determine whether to write the value stored
+		 * in the other NVM bank or a modified value stored
+		 * in the shadow RAM
+		 */
+		if (dev_spec->shadow_ram[i].modified) {
+			data = dev_spec->shadow_ram[i].value;
+		} else {
+			ret_val = e1000_read_flash_word_ich8lan(hw, i +
+			                                        old_bank_offset,
+			                                        &data);
+			if (ret_val)
+				break;
+		}
+
+		/*
+		 * If the word is 0x13, then make sure the signature bits
+		 * (15:14) are 11b until the commit has completed.
+		 * This will allow us to write 10b which indicates the
+		 * signature is valid.  We want to do this after the write
+		 * has completed so that we don't mark the segment valid
+		 * while the write is still in progress
+		 */
+		if (i == E1000_ICH_NVM_SIG_WORD)
+			data |= E1000_ICH_NVM_SIG_MASK;
+
+		/* Convert offset to bytes. */
+		act_offset = (i + new_bank_offset) << 1;
+
+		udelay(100);
+		/* Write the bytes to the new bank. */
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+							       act_offset,
+							       (u8)data);
+		if (ret_val)
+			break;
+
+		udelay(100);
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+							  act_offset + 1,
+							  (u8)(data >> 8));
+		if (ret_val)
+			break;
+	}
+
+	/*
+	 * Don't bother writing the segment valid bits if sector
+	 * programming failed.
+	 */
+	if (ret_val) {
+		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+		e_dbg("Flash commit failed.\n");
+		goto release;
+	}
+
+	/*
+	 * Finally validate the new segment by setting bit 15:14
+	 * to 10b in word 0x13 , this can be done without an
+	 * erase as well since these bits are 11 to start with
+	 * and we need to change bit 14 to 0b
+	 */
+	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+	if (ret_val)
+		goto release;
+
+	data &= 0xBFFF;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+						       act_offset * 2 + 1,
+						       (u8)(data >> 8));
+	if (ret_val)
+		goto release;
+
+	/*
+	 * And invalidate the previously valid segment by setting
+	 * its signature word (0x13) high_byte to 0b. This can be
+	 * done without an erase because flash erase sets all bits
+	 * to 1's. We can write 1's to 0's without an erase
+	 */
+	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+	if (ret_val)
+		goto release;
+
+	/* Great!  Everything worked, we can now clear the cached entries. */
+	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+		dev_spec->shadow_ram[i].modified = false;
+		dev_spec->shadow_ram[i].value = 0xFFFF;
+	}
+
+release:
+	nvm->ops.release(hw);
+
+	/*
+	 * Reload the EEPROM, or else modifications will not appear
+	 * until after the next adapter reset.
+	 */
+	if (!ret_val) {
+		e1000e_reload_nvm(hw);
+		usleep_range(10000, 20000);
+	}
+
+out:
+	if (ret_val)
+		e_dbg("NVM update error: %d\n", ret_val);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
+ *  calculated, in which case we need to calculate the checksum and set bit 6.
+ **/
+static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 data;
+
+	/*
+	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
+	 * needs to be fixed.  This bit is an indication that the NVM
+	 * was prepared by OEM software and did not calculate the
+	 * checksum...a likely scenario.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+	if (ret_val)
+		return ret_val;
+
+	if ((data & 0x40) == 0) {
+		data |= 0x40;
+		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1000e_update_nvm_checksum(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
+ *  @hw: pointer to the HW structure
+ *
+ *  To prevent malicious write/erase of the NVM, set it to be read-only
+ *  so that the hardware ignores all write/erase cycles of the NVM via
+ *  the flash control registers.  The shadow-ram copy of the NVM will
+ *  still be updated, however any updates to this copy will not stick
+ *  across driver reloads.
+ **/
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	union ich8_flash_protected_range pr0;
+	union ich8_hws_flash_status hsfsts;
+	u32 gfpreg;
+
+	nvm->ops.acquire(hw);
+
+	gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+	/* Write-protect GbE Sector of NVM */
+	pr0.regval = er32flash(ICH_FLASH_PR0);
+	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
+	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
+	pr0.range.wpe = true;
+	ew32flash(ICH_FLASH_PR0, pr0.regval);
+
+	/*
+	 * Lock down a subset of GbE Flash Control Registers, e.g.
+	 * PR0 to prevent the write-protection from being lifted.
+	 * Once FLOCKDN is set, the registers protected by it cannot
+	 * be written until FLOCKDN is cleared by a hardware reset.
+	 */
+	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+	hsfsts.hsf_status.flockdn = true;
+	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+	nvm->ops.release(hw);
+}
+
+/**
+ *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte/word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: The byte(s) to write to the NVM.
+ *
+ *  Writes one/two bytes to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+					  u8 size, u16 data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val;
+	u8 count = 0;
+
+	if (size < 1 || size > 2 || data > size * 0xff ||
+	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		return -E1000_ERR_NVM;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+			    hw->nvm.flash_base_addr;
+
+	do {
+		udelay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val)
+			break;
+
+		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size -1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+		if (size == 1)
+			flash_data = (u32)data & 0x00FF;
+		else
+			flash_data = (u32)data;
+
+		ew32flash(ICH_FLASH_FDATA0, flash_data);
+
+		/*
+		 * check if FCERR is set to 1 , if set to 1, clear it
+		 * and try the whole sequence a few more times else done
+		 */
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+		if (!ret_val)
+			break;
+
+		/*
+		 * If we're here, then things are most likely
+		 * completely hosed, but if the error condition
+		 * is detected, it won't hurt to give it another
+		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+		 */
+		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+		if (hsfsts.hsf_status.flcerr == 1)
+			/* Repeat for some time before giving up. */
+			continue;
+		if (hsfsts.hsf_status.flcdone == 0) {
+			e_dbg("Timeout error - flash cycle "
+				 "did not complete.");
+			break;
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The index of the byte to read.
+ *  @data: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					  u8 data)
+{
+	u16 word = (u16)data;
+
+	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+ *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to write.
+ *  @byte: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ *  Goes through a retry algorithm before giving up.
+ **/
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+						u32 offset, u8 byte)
+{
+	s32 ret_val;
+	u16 program_retries;
+
+	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+	if (!ret_val)
+		return ret_val;
+
+	for (program_retries = 0; program_retries < 100; program_retries++) {
+		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
+		udelay(100);
+		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+		if (!ret_val)
+			break;
+	}
+	if (program_retries == 100)
+		return -E1000_ERR_NVM;
+
+	return 0;
+}
+
+/**
+ *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ *  @hw: pointer to the HW structure
+ *  @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ *  bank N is 4096 * N + flash_reg_addr.
+ **/
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	/* bank size is in 16bit words - adjust to bytes */
+	u32 flash_bank_size = nvm->flash_bank_size * 2;
+	s32 ret_val;
+	s32 count = 0;
+	s32 j, iteration, sector_size;
+
+	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+	/*
+	 * Determine HW Sector size: Read BERASE bits of hw flash status
+	 * register
+	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
+	 *     consecutive sectors.  The start index for the nth Hw sector
+	 *     can be calculated as = bank * 4096 + n * 256
+	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+	 *     The start index for the nth Hw sector can be calculated
+	 *     as = bank * 4096
+	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+	 *     (ich9 only, otherwise error condition)
+	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+	 */
+	switch (hsfsts.hsf_status.berasesz) {
+	case 0:
+		/* Hw sector size 256 */
+		sector_size = ICH_FLASH_SEG_SIZE_256;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+		break;
+	case 1:
+		sector_size = ICH_FLASH_SEG_SIZE_4K;
+		iteration = 1;
+		break;
+	case 2:
+		sector_size = ICH_FLASH_SEG_SIZE_8K;
+		iteration = 1;
+		break;
+	case 3:
+		sector_size = ICH_FLASH_SEG_SIZE_64K;
+		iteration = 1;
+		break;
+	default:
+		return -E1000_ERR_NVM;
+	}
+
+	/* Start with the base address, then add the sector offset. */
+	flash_linear_addr = hw->nvm.flash_base_addr;
+	flash_linear_addr += (bank) ? flash_bank_size : 0;
+
+	for (j = 0; j < iteration ; j++) {
+		do {
+			/* Steps */
+			ret_val = e1000_flash_cycle_init_ich8lan(hw);
+			if (ret_val)
+				return ret_val;
+
+			/*
+			 * Write a value 11 (block Erase) in Flash
+			 * Cycle field in hw flash control
+			 */
+			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+			/*
+			 * Write the last 24 bits of an index within the
+			 * block into Flash Linear address field in Flash
+			 * Address.
+			 */
+			flash_linear_addr += (j * sector_size);
+			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+			ret_val = e1000_flash_cycle_ich8lan(hw,
+					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+			if (ret_val == 0)
+				break;
+
+			/*
+			 * Check if FCERR is set to 1.  If 1,
+			 * clear it and try the whole sequence
+			 * a few more times else Done
+			 */
+			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1)
+				/* repeat for some time before giving up */
+				continue;
+			else if (hsfsts.hsf_status.flcdone == 0)
+				return ret_val;
+		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_valid_led_default_ich8lan - Set the default LED settings
+ *  @hw: pointer to the HW structure
+ *  @data: Pointer to the LED settings
+ *
+ *  Reads the LED default settings from the NVM to data.  If the NVM LED
+ *  settings is all 0's or F's, set the LED default to a valid LED default
+ *  setting.
+ **/
+static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 ||
+	    *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT_ICH8LAN;
+
+	return 0;
+}
+
+/**
+ *  e1000_id_led_init_pchlan - store LED configurations
+ *  @hw: pointer to the HW structure
+ *
+ *  PCH does not control LEDs via the LEDCTL register, rather it uses
+ *  the PHY LED configuration register.
+ *
+ *  PCH also does not have an "always on" or "always off" mode which
+ *  complicates the ID feature.  Instead of using the "on" mode to indicate
+ *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
+ *  use "link_up" mode.  The LEDs will still ID on request if there is no
+ *  link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+	u16 data, i, temp, shift;
+
+	/* Get default ID LED modes */
+	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = er32(LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+		shift = (i * 5);
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode1 |= (ledctl_on << shift);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode1 |= (ledctl_off << shift);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode2 |= (ledctl_on << shift);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode2 |= (ledctl_off << shift);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ *  @hw: pointer to the HW structure
+ *
+ *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ *  register, so the the bus width is hard coded.
+ **/
+static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+
+	ret_val = e1000e_get_bus_info_pcie(hw);
+
+	/*
+	 * ICH devices are "PCI Express"-ish.  They have
+	 * a configuration space, but do not contain
+	 * PCI Express Capability registers, so bus width
+	 * must be hardcoded.
+	 */
+	if (bus->width == e1000_bus_width_unknown)
+		bus->width = e1000_bus_width_pcie_x1;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_ich8lan - Reset the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a full reset of the hardware which includes a reset of the PHY and
+ *  MAC.
+ **/
+static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u16 reg;
+	u32 ctrl, kab;
+	s32 ret_val;
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000e_disable_pcie_master(hw);
+	if (ret_val)
+		e_dbg("PCI-E Master disable polling has failed.\n");
+
+	e_dbg("Masking off all interrupts\n");
+	ew32(IMC, 0xffffffff);
+
+	/*
+	 * Disable the Transmit and Receive units.  Then delay to allow
+	 * any pending transactions to complete before we hit the MAC
+	 * with the global reset.
+	 */
+	ew32(RCTL, 0);
+	ew32(TCTL, E1000_TCTL_PSP);
+	e1e_flush();
+
+	usleep_range(10000, 20000);
+
+	/* Workaround for ICH8 bit corruption issue in FIFO memory */
+	if (hw->mac.type == e1000_ich8lan) {
+		/* Set Tx and Rx buffer allocation to 8k apiece. */
+		ew32(PBA, E1000_PBA_8K);
+		/* Set Packet Buffer Size to 16k. */
+		ew32(PBS, E1000_PBS_16K);
+	}
+
+	if (hw->mac.type == e1000_pchlan) {
+		/* Save the NVM K1 bit setting*/
+		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
+		if (ret_val)
+			return ret_val;
+
+		if (reg & E1000_NVM_K1_ENABLE)
+			dev_spec->nvm_k1_enabled = true;
+		else
+			dev_spec->nvm_k1_enabled = false;
+	}
+
+	ctrl = er32(CTRL);
+
+	if (!e1000_check_reset_block(hw)) {
+		/*
+		 * Full-chip reset requires MAC and PHY reset at the same
+		 * time to make sure the interface between MAC and the
+		 * external PHY is reset.
+		 */
+		ctrl |= E1000_CTRL_PHY_RST;
+
+		/*
+		 * Gate automatic PHY configuration by hardware on
+		 * non-managed 82579
+		 */
+		if ((hw->mac.type == e1000_pch2lan) &&
+		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+			e1000_gate_hw_phy_config_ich8lan(hw, true);
+	}
+	ret_val = e1000_acquire_swflag_ich8lan(hw);
+	e_dbg("Issuing a global reset to ich8lan\n");
+	ew32(CTRL, (ctrl | E1000_CTRL_RST));
+	/* cannot issue a flush here because it hangs the hardware */
+	msleep(20);
+
+	if (!ret_val)
+		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+
+	if (ctrl & E1000_CTRL_PHY_RST) {
+		ret_val = hw->phy.ops.get_cfg_done(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_post_phy_reset_ich8lan(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * For PCH, this write will make sure that any noise
+	 * will be detected as a CRC error and be dropped rather than show up
+	 * as a bad packet to the DMA engine.
+	 */
+	if (hw->mac.type == e1000_pchlan)
+		ew32(CRC_OFFSET, 0x65656565);
+
+	ew32(IMC, 0xffffffff);
+	er32(ICR);
+
+	kab = er32(KABGTXD);
+	kab |= E1000_KABGTXD_BGSQLBIAS;
+	ew32(KABGTXD, kab);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_ich8lan - Initialize the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Prepares the hardware for transmit and receive by doing the following:
+ *   - initialize hardware bits
+ *   - initialize LED identification
+ *   - setup receive address registers
+ *   - setup flow control
+ *   - setup transmit descriptors
+ *   - clear statistics
+ **/
+static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl_ext, txdctl, snoop;
+	s32 ret_val;
+	u16 i;
+
+	e1000_initialize_hw_bits_ich8lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = mac->ops.id_led_init(hw);
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+
+	/* Setup the receive address. */
+	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	e_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/*
+	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
+	 * the ME.  Disable wakeup by clearing the host wakeup bit.
+	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
+	 */
+	if (hw->phy.type == e1000_phy_82578) {
+		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
+		i &= ~BM_WUC_HOST_WU_BIT;
+		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
+		ret_val = e1000_phy_hw_reset_ich8lan(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link_ich8lan(hw);
+
+	/* Set the transmit descriptor write-back policy for both queues */
+	txdctl = er32(TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	ew32(TXDCTL(0), txdctl);
+	txdctl = er32(TXDCTL(1));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	ew32(TXDCTL(1), txdctl);
+
+	/*
+	 * ICH8 has opposite polarity of no_snoop bits.
+	 * By default, we should use snoop behavior.
+	 */
+	if (mac->type == e1000_ich8lan)
+		snoop = PCIE_ICH8_SNOOP_ALL;
+	else
+		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+	e1000e_set_pcie_no_snoop(hw, snoop);
+
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+	ew32(CTRL_EXT, ctrl_ext);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_ich8lan(hw);
+
+	return 0;
+}
+/**
+ *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets/Clears required hardware bits necessary for correctly setting up the
+ *  hardware for transmit and receive.
+ **/
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	/* Extended Device Control */
+	reg = er32(CTRL_EXT);
+	reg |= (1 << 22);
+	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
+	if (hw->mac.type >= e1000_pchlan)
+		reg |= E1000_CTRL_EXT_PHYPDEN;
+	ew32(CTRL_EXT, reg);
+
+	/* Transmit Descriptor Control 0 */
+	reg = er32(TXDCTL(0));
+	reg |= (1 << 22);
+	ew32(TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = er32(TXDCTL(1));
+	reg |= (1 << 22);
+	ew32(TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = er32(TARC(0));
+	if (hw->mac.type == e1000_ich8lan)
+		reg |= (1 << 28) | (1 << 29);
+	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+	ew32(TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = er32(TARC(1));
+	if (er32(TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	reg |= (1 << 24) | (1 << 26) | (1 << 30);
+	ew32(TARC(1), reg);
+
+	/* Device Status */
+	if (hw->mac.type == e1000_ich8lan) {
+		reg = er32(STATUS);
+		reg &= ~(1 << 31);
+		ew32(STATUS, reg);
+	}
+
+	/*
+	 * work-around descriptor data corruption issue during nfs v2 udp
+	 * traffic, just disable the nfs filtering capability
+	 */
+	reg = er32(RFCTL);
+	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+	ew32(RFCTL, reg);
+}
+
+/**
+ *  e1000_setup_link_ich8lan - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	if (e1000_check_reset_block(hw))
+		return 0;
+
+	/*
+	 * ICH parts do not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	if (hw->fc.requested_mode == e1000_fc_default) {
+		/* Workaround h/w hang when Tx flow control enabled */
+		if (hw->mac.type == e1000_pchlan)
+			hw->fc.requested_mode = e1000_fc_rx_pause;
+		else
+			hw->fc.requested_mode = e1000_fc_full;
+	}
+
+	/*
+	 * Save off the requested flow control mode for use later.  Depending
+	 * on the link partner's capabilities, we may or may not use this mode.
+	 */
+	hw->fc.current_mode = hw->fc.requested_mode;
+
+	e_dbg("After fix-ups FlowControl is now = %x\n",
+		hw->fc.current_mode);
+
+	/* Continue to configure the copper link. */
+	ret_val = e1000_setup_copper_link_ich8lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	ew32(FCTTV, hw->fc.pause_time);
+	if ((hw->phy.type == e1000_phy_82578) ||
+	    (hw->phy.type == e1000_phy_82579) ||
+	    (hw->phy.type == e1000_phy_i217) ||
+	    (hw->phy.type == e1000_phy_82577)) {
+		ew32(FCRTV_PCH, hw->fc.refresh_time);
+
+		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+				   hw->fc.pause_time);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the kumeran interface to the PHY to wait the appropriate time
+ *  when polling the PHY, then call the generic setup_copper_link to finish
+ *  configuring the copper link.
+ **/
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 reg_data;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ew32(CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each iteration
+	 * and increase the max iterations when polling the phy;
+	 * this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
+	if (ret_val)
+		return ret_val;
+	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+	                               &reg_data);
+	if (ret_val)
+		return ret_val;
+	reg_data |= 0x3F;
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+	                                reg_data);
+	if (ret_val)
+		return ret_val;
+
+	switch (hw->phy.type) {
+	case e1000_phy_igp_3:
+		ret_val = e1000e_copper_link_setup_igp(hw);
+		if (ret_val)
+			return ret_val;
+		break;
+	case e1000_phy_bm:
+	case e1000_phy_82578:
+		ret_val = e1000e_copper_link_setup_m88(hw);
+		if (ret_val)
+			return ret_val;
+		break;
+	case e1000_phy_82577:
+	case e1000_phy_82579:
+	case e1000_phy_i217:
+		ret_val = e1000_copper_link_setup_82577(hw);
+		if (ret_val)
+			return ret_val;
+		break;
+	case e1000_phy_ife:
+		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+		if (ret_val)
+			return ret_val;
+
+		reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+		switch (hw->phy.mdix) {
+		case 1:
+			reg_data &= ~IFE_PMC_FORCE_MDIX;
+			break;
+		case 2:
+			reg_data |= IFE_PMC_FORCE_MDIX;
+			break;
+		case 0:
+		default:
+			reg_data |= IFE_PMC_AUTO_MDIX;
+			break;
+		}
+		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+		if (ret_val)
+			return ret_val;
+		break;
+	default:
+		break;
+	}
+	return e1000e_setup_copper_link(hw);
+}
+
+/**
+ *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to store current link speed
+ *  @duplex: pointer to store the current link duplex
+ *
+ *  Calls the generic get_speed_and_duplex to retrieve the current link
+ *  information and then calls the Kumeran lock loss workaround for links at
+ *  gigabit speeds.
+ **/
+static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+					  u16 *duplex)
+{
+	s32 ret_val;
+
+	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
+	if (ret_val)
+		return ret_val;
+
+	if ((hw->mac.type == e1000_ich8lan) &&
+	    (hw->phy.type == e1000_phy_igp_3) &&
+	    (*speed == SPEED_1000)) {
+		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ *  @hw: pointer to the HW structure
+ *
+ *  Work-around for 82566 Kumeran PCS lock loss:
+ *  On link status change (i.e. PCI reset, speed change) and link is up and
+ *  speed is gigabit-
+ *    0) if workaround is optionally disabled do nothing
+ *    1) wait 1ms for Kumeran link to come up
+ *    2) check Kumeran Diagnostic register PCS lock loss bit
+ *    3) if not set the link is locked (all is good), otherwise...
+ *    4) reset the PHY
+ *    5) repeat up to 10 times
+ *  Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 phy_ctrl;
+	s32 ret_val;
+	u16 i, data;
+	bool link;
+
+	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
+		return 0;
+
+	/*
+	 * Make sure link is up before proceeding.  If not just return.
+	 * Attempting this while link is negotiating fouled up link
+	 * stability
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (!link)
+		return 0;
+
+	for (i = 0; i < 10; i++) {
+		/* read once to clear */
+		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			return ret_val;
+		/* and again to get new status */
+		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			return ret_val;
+
+		/* check for PCS lock */
+		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+			return 0;
+
+		/* Issue PHY reset */
+		e1000_phy_hw_reset(hw);
+		mdelay(5);
+	}
+	/* Disable GigE link negotiation */
+	phy_ctrl = er32(PHY_CTRL);
+	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+	ew32(PHY_CTRL, phy_ctrl);
+
+	/*
+	 * Call gig speed drop workaround on Gig disable before accessing
+	 * any PHY registers
+	 */
+	e1000e_gig_downshift_workaround_ich8lan(hw);
+
+	/* unable to acquire PCS lock */
+	return -E1000_ERR_PHY;
+}
+
+/**
+ *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ *  @hw: pointer to the HW structure
+ *  @state: boolean value used to set the current Kumeran workaround state
+ *
+ *  If ICH8, set the current Kumeran workaround state (enabled - true
+ *  /disabled - false).
+ **/
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+						 bool state)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+
+	if (hw->mac.type != e1000_ich8lan) {
+		e_dbg("Workaround applies to ICH8 only.\n");
+		return;
+	}
+
+	dev_spec->kmrn_lock_loss_workaround_enabled = state;
+}
+
+/**
+ *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ *  @hw: pointer to the HW structure
+ *
+ *  Workaround for 82566 power-down on D3 entry:
+ *    1) disable gigabit link
+ *    2) write VR power-down enable
+ *    3) read it back
+ *  Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+	u16 data;
+	u8  retry = 0;
+
+	if (hw->phy.type != e1000_phy_igp_3)
+		return;
+
+	/* Try the workaround twice (if needed) */
+	do {
+		/* Disable link */
+		reg = er32(PHY_CTRL);
+		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+		ew32(PHY_CTRL, reg);
+
+		/*
+		 * Call gig speed drop workaround on Gig disable before
+		 * accessing any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000e_gig_downshift_workaround_ich8lan(hw);
+
+		/* Write VR power-down enable */
+		e1e_rphy(hw, IGP3_VR_CTRL, &data);
+		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+		/* Read it back and test */
+		e1e_rphy(hw, IGP3_VR_CTRL, &data);
+		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+			break;
+
+		/* Issue PHY reset and repeat at most one more time */
+		reg = er32(CTRL);
+		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+		retry++;
+	} while (retry);
+}
+
+/**
+ *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ *  @hw: pointer to the HW structure
+ *
+ *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ *  LPLU, Gig disable, MDIC PHY reset):
+ *    1) Set Kumeran Near-end loopback
+ *    2) Clear Kumeran Near-end loopback
+ *  Should only be called for ICH8[m] devices with any 1G Phy.
+ **/
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 reg_data;
+
+	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
+		return;
+
+	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+				      &reg_data);
+	if (ret_val)
+		return;
+	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+				       reg_data);
+	if (ret_val)
+		return;
+	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+				       reg_data);
+}
+
+/**
+ *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
+ *  @hw: pointer to the HW structure
+ *
+ *  During S0 to Sx transition, it is possible the link remains at gig
+ *  instead of negotiating to a lower speed.  Before going to Sx, set
+ *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
+ *  to a lower speed.  For PCH and newer parts, the OEM bits PHY register
+ *  (LED, GbE disable and LPLU configurations) also needs to be written.
+ *  Parts that support (and are linked to a partner which support) EEE in
+ *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
+ *  than 10Mbps w/o EEE.
+ **/
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 phy_ctrl;
+	s32 ret_val;
+
+	phy_ctrl = er32(PHY_CTRL);
+	phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+
+	if (hw->phy.type == e1000_phy_i217) {
+		u16 phy_reg;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+
+		if (!dev_spec->eee_disable) {
+			u16 eee_advert;
+
+			ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+						  I217_EEE_ADVERTISEMENT);
+			if (ret_val)
+				goto release;
+			e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
+
+			/* Disable LPLU if both link partners support 100BaseT
+			 * EEE and 100Full is advertised on both ends of the
+			 * link.
+			 */
+			if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+			    (dev_spec->eee_lp_ability &
+			     I217_EEE_100_SUPPORTED) &&
+			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
+					      E1000_PHY_CTRL_NOND0A_LPLU);
+		}
+
+		/* For i217 Intel Rapid Start Technology support,
+		 * when the system is going into Sx and no manageability engine
+		 * is present, the driver must configure proxy to reset only on
+		 * power good.  LPI (Low Power Idle) state must also reset only
+		 * on power good, as well as the MTA (Multicast table array).
+		 * The SMBus release must also be disabled on LCD reset.
+		 */
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+
+			/* Enable proxy to reset only on power good. */
+			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
+			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
+			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
+
+			/* Set bit enable LPI (EEE) to reset only on
+			 * power good.
+			 */
+			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
+			phy_reg |= I217_SxCTRL_MASK;
+			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
+
+			/* Disable the SMB release on LCD reset. */
+			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+			phy_reg &= ~I217_MEMPWR;
+			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+		}
+
+		/* Enable MTA to reset for Intel Rapid Start Technology
+		 * Support
+		 */
+		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+		phy_reg |= I217_CGFREG_MASK;
+		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+
+release:
+		hw->phy.ops.release(hw);
+	}
+out:
+	ew32(PHY_CTRL, phy_ctrl);
+
+	if (hw->mac.type == e1000_ich8lan)
+		e1000e_gig_downshift_workaround_ich8lan(hw);
+
+	if (hw->mac.type >= e1000_pchlan) {
+		e1000_oem_bits_config_ich8lan(hw, false);
+		e1000_phy_hw_reset_ich8lan(hw);
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return;
+		e1000_write_smbus_addr(hw);
+		hw->phy.ops.release(hw);
+	}
+}
+
+/**
+ *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ *  @hw: pointer to the HW structure
+ *
+ *  During Sx to S0 transitions on non-managed devices or managed devices
+ *  on which PHY resets are not blocked, if the PHY registers cannot be
+ *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ *  the PHY.
+ *  On i217, setup Intel Rapid Start Technology.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	if (hw->mac.type != e1000_pch2lan)
+		return;
+
+	fwsm = er32(FWSM);
+	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
+		u16 phy_id1, phy_id2;
+		s32 ret_val;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_dbg("Failed to acquire PHY semaphore in resume\n");
+			return;
+		}
+
+	/* For i217 Intel Rapid Start Technology support when the system
+	 * is transitioning from Sx and no manageability engine is present
+	 * configure SMBus to restore on reset, disable proxy, and enable
+	 * the reset on MTA (Multicast table array).
+	 */
+	if (hw->phy.type == e1000_phy_i217) {
+		u16 phy_reg;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_dbg("Failed to setup iRST\n");
+			return;
+		}
+
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+			/* Restore clear on SMB if no manageability engine
+			 * is present
+			 */
+			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+			if (ret_val)
+				goto _release;
+			phy_reg |= I217_MEMPWR_MASK;
+			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+
+			/* Disable Proxy */
+			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
+		}
+		/* Enable reset on MTA */
+		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+		if (ret_val)
+			goto _release;
+		phy_reg &= ~I217_CGFREG_MASK;
+		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+	_release:
+		if (ret_val)
+			e_dbg("Error %d in resume workarounds\n", ret_val);
+		hw->phy.ops.release(hw);
+	}
+
+		/* Test access to the PHY registers by reading the ID regs */
+		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
+		if (ret_val)
+			goto release;
+		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
+		if (ret_val)
+			goto release;
+
+		if (hw->phy.id == ((u32)(phy_id1 << 16) |
+				   (u32)(phy_id2 & PHY_REVISION_MASK)))
+			goto release;
+
+		e1000_toggle_lanphypc_value_ich8lan(hw);
+
+		hw->phy.ops.release(hw);
+		msleep(50);
+		e1000_phy_hw_reset(hw);
+		msleep(50);
+		return;
+	}
+
+release:
+	hw->phy.ops.release(hw);
+
+	return;
+}
+
+/**
+ *  e1000_cleanup_led_ich8lan - Restore the default LED operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+	if (hw->phy.type == e1000_phy_ife)
+		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+
+	ew32(LEDCTL, hw->mac.ledctl_default);
+	return 0;
+}
+
+/**
+ *  e1000_led_on_ich8lan - Turn LEDs on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn on the LEDs.
+ **/
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+	if (hw->phy.type == e1000_phy_ife)
+		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+
+	ew32(LEDCTL, hw->mac.ledctl_mode2);
+	return 0;
+}
+
+/**
+ *  e1000_led_off_ich8lan - Turn LEDs off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn off the LEDs.
+ **/
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+	if (hw->phy.type == e1000_phy_ife)
+		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+				(IFE_PSCL_PROBE_MODE |
+				 IFE_PSCL_PROBE_LEDS_OFF));
+
+	ew32(LEDCTL, hw->mac.ledctl_mode1);
+	return 0;
+}
+
+/**
+ *  e1000_setup_led_pchlan - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use.
+ **/
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ *  e1000_cleanup_led_pchlan - Restore the default LED operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
+}
+
+/**
+ *  e1000_led_on_pchlan - Turn LEDs on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn on the LEDs.
+ **/
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+	u16 data = (u16)hw->mac.ledctl_mode2;
+	u32 i, led;
+
+	/*
+	 * If no link, then turn LED on by setting the invert bit
+	 * for each LED that's mode is "link_up" in ledctl_mode2.
+	 */
+	if (!(er32(STATUS) & E1000_STATUS_LU)) {
+		for (i = 0; i < 3; i++) {
+			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+			if ((led & E1000_PHY_LED0_MODE_MASK) !=
+			    E1000_LEDCTL_MODE_LINK_UP)
+				continue;
+			if (led & E1000_PHY_LED0_IVRT)
+				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+			else
+				data |= (E1000_PHY_LED0_IVRT << (i * 5));
+		}
+	}
+
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ *  e1000_led_off_pchlan - Turn LEDs off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn off the LEDs.
+ **/
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+	u16 data = (u16)hw->mac.ledctl_mode1;
+	u32 i, led;
+
+	/*
+	 * If no link, then turn LED off by clearing the invert bit
+	 * for each LED that's mode is "link_up" in ledctl_mode1.
+	 */
+	if (!(er32(STATUS) & E1000_STATUS_LU)) {
+		for (i = 0; i < 3; i++) {
+			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+			if ((led & E1000_PHY_LED0_MODE_MASK) !=
+			    E1000_LEDCTL_MODE_LINK_UP)
+				continue;
+			if (led & E1000_PHY_LED0_IVRT)
+				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+			else
+				data |= (E1000_PHY_LED0_IVRT << (i * 5));
+		}
+	}
+
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Read appropriate register for the config done bit for completion status
+ *  and configure the PHY through s/w for EEPROM-less parts.
+ *
+ *  NOTE: some silicon which is EEPROM-less will fail trying to read the
+ *  config done bit, so only an error is logged and continues.  If we were
+ *  to return with error, EEPROM-less silicon would not be able to be reset
+ *  or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 bank = 0;
+	u32 status;
+
+	e1000e_get_cfg_done(hw);
+
+	/* Wait for indication from h/w that it has completed basic config */
+	if (hw->mac.type >= e1000_ich10lan) {
+		e1000_lan_init_done_ich8lan(hw);
+	} else {
+		ret_val = e1000e_get_auto_rd_done(hw);
+		if (ret_val) {
+			/*
+			 * When auto config read does not complete, do not
+			 * return with an error. This can happen in situations
+			 * where there is no eeprom and prevents getting link.
+			 */
+			e_dbg("Auto Read Done did not complete\n");
+			ret_val = 0;
+		}
+	}
+
+	/* Clear PHY Reset Asserted bit */
+	status = er32(STATUS);
+	if (status & E1000_STATUS_PHYRA)
+		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+	else
+		e_dbg("PHY Reset Asserted not set - needs delay\n");
+
+	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
+	if (hw->mac.type <= e1000_ich9lan) {
+		if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
+		    (hw->phy.type == e1000_phy_igp_3)) {
+			e1000e_phy_init_script_igp3(hw);
+		}
+	} else {
+		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+			/* Maybe we should do a basic PHY config */
+			e_dbg("EEPROM not present\n");
+			ret_val = -E1000_ERR_CONFIG;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(hw->mac.ops.check_mng_mode(hw) ||
+	      hw->phy.ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+}
+
+/**
+ *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears hardware counters specific to the silicon family and calls
+ *  clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+	u16 phy_data;
+	s32 ret_val;
+
+	e1000e_clear_hw_cntrs_base(hw);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	/* Clear PHY statistics registers */
+	if ((hw->phy.type == e1000_phy_82578) ||
+	    (hw->phy.type == e1000_phy_82579) ||
+	    (hw->phy.type == e1000_phy_i217) ||	
+	    (hw->phy.type == e1000_phy_82577)) {
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return;
+		ret_val = hw->phy.ops.set_page(hw,
+					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
+		if (ret_val)
+			goto release;
+		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+		hw->phy.ops.release(hw);
+	}
+}
+
+static const struct e1000_mac_operations ich8_mac_ops = {
+	.id_led_init		= e1000e_id_led_init,
+	/* check_mng_mode dependent on mac type */
+	.check_for_link		= e1000_check_for_copper_link_ich8lan,
+	/* cleanup_led dependent on mac type */
+	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
+	.get_bus_info		= e1000_get_bus_info_ich8lan,
+	.set_lan_id		= e1000_set_lan_id_single_port,
+	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
+	/* led_on dependent on mac type */
+	/* led_off dependent on mac type */
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.reset_hw		= e1000_reset_hw_ich8lan,
+	.init_hw		= e1000_init_hw_ich8lan,
+	.setup_link		= e1000_setup_link_ich8lan,
+	.setup_physical_interface= e1000_setup_copper_link_ich8lan,
+	/* id_led_init dependent on mac type */
+};
+
+static const struct e1000_phy_operations ich8_phy_ops = {
+	.acquire		= e1000_acquire_swflag_ich8lan,
+	.check_reset_block	= e1000_check_reset_block_ich8lan,
+	.commit			= NULL,
+	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
+	.get_cable_length	= e1000e_get_cable_length_igp_2,
+	.read_reg		= e1000e_read_phy_reg_igp,
+	.release		= e1000_release_swflag_ich8lan,
+	.reset			= e1000_phy_hw_reset_ich8lan,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
+	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
+	.write_reg		= e1000e_write_phy_reg_igp,
+};
+
+static const struct e1000_nvm_operations ich8_nvm_ops = {
+	.acquire		= e1000_acquire_nvm_ich8lan,
+	.read		 	= e1000_read_nvm_ich8lan,
+	.release		= e1000_release_nvm_ich8lan,
+	.update			= e1000_update_nvm_checksum_ich8lan,
+	.valid_led_default	= e1000_valid_led_default_ich8lan,
+	.validate		= e1000_validate_nvm_checksum_ich8lan,
+	.write			= e1000_write_nvm_ich8lan,
+};
+
+const struct e1000_info e1000_ich8_info = {
+	.mac			= e1000_ich8lan,
+	.flags			= FLAG_HAS_WOL
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 8,
+	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich9_info = {
+	.mac			= e1000_ich9lan,
+	.flags			= FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_ERT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 10,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich10_info = {
+	.mac			= e1000_ich10lan,
+	.flags			= FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_ERT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 10,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_info = {
+	.mac			= e1000_pchlan,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS,
+	.pba			= 26,
+	.max_hw_frame_size	= 4096,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch2_info = {
+	.mac			= e1000_pch2lan,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS
+				  | FLAG2_HAS_EEE,
+	.pba			= 26,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_lpt_info = {
+	.mac			= e1000_pch_lpt,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS
+				  | FLAG2_HAS_EEE,
+	.pba			= 26,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+++ linux-patched/drivers/xenomai/net/drivers/e1000e/e1000.h	2022-03-21 12:58:29.536887528 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/rt_eth1394.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+
+#include <rtnet_port.h>
+
+#include "hw.h"
+
+struct e1000_info;
+
+#define e_dbg(format, arg...) \
+	pr_debug(format, ## arg)
+#define e_err(format, arg...) \
+	pr_err(format, ## arg)
+#define e_info(format, arg...) \
+	pr_info(format, ## arg)
+#define e_warn(format, arg...) \
+	pr_warn(format, ## arg)
+#define e_notice(format, arg...) \
+	pr_notice(format, ## arg)
+
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define E1000E_INT_MODE_LEGACY		0
+#define E1000E_INT_MODE_MSI		1
+#define E1000E_INT_MODE_MSIX		2
+
+/* Tx/Rx descriptor defines */
+#define E1000_DEFAULT_TXD		256
+#define E1000_MAX_TXD			4096
+#define E1000_MIN_TXD			64
+
+#define E1000_DEFAULT_RXD		256
+#define E1000_MAX_RXD			4096
+#define E1000_MIN_RXD			64
+
+#define E1000_MIN_ITR_USECS		10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS		10000 /* 100    irq/sec */
+
+/* Early Receive defines */
+#define E1000_ERT_2048			0x100
+
+#define E1000_FC_PAUSE_TIME		0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE		16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES			0
+#define E1000_EEPROM_APME		0x0400
+
+#define E1000_MNG_VLAN_NONE		(-1)
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS			(MAX_PS_BUFFERS - 1)
+
+#define DEFAULT_JUMBO			9234
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE                 769
+
+#define PHY_UPPER_SHIFT                   21
+#define BM_PHY_REG(page, reg) \
+	(((reg) & MAX_PHY_REG_ADDRESS) |\
+	 (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+	 (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL         PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC          PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC         PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS          PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i)    (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i)    (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i)    (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i)      (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE           0x0001          /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE           0x0002          /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT      3               /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK       (3 << 3)        /* Multicast Offset Mask */
+#define BM_RCTL_BAM           0x0020          /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF          0x0040          /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE          0x0080          /* Rx Flow Control Enable */
+
+#define HV_STATS_PAGE	778
+#define HV_SCC_UPPER	PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
+#define HV_SCC_LOWER	PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER	PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
+#define HV_ECOL_LOWER	PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER	PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
+#define HV_MCC_LOWER	PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER	PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
+#define HV_COLC_LOWER	PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER	PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER	PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER	PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER	PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH     0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS                      17
+#define BM_CS_STATUS_LINK_UP              0x0400
+#define BM_CS_STATUS_RESOLVED             0x0800
+#define BM_CS_STATUS_SPEED_MASK           0xC000
+#define BM_CS_STATUS_SPEED_1000           0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS                       26
+#define HV_M_STATUS_AUTONEG_COMPLETE      0x1000
+#define HV_M_STATUS_SPEED_MASK            0x0300
+#define HV_M_STATUS_SPEED_1000            0x0200
+#define HV_M_STATUS_LINK_UP               0x0040
+
+#define E1000_ICH_FWSM_PCIM2PCI		0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT	2000
+
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT		100
+
+#define DEFAULT_RDTR			0
+#define DEFAULT_RADV			8
+#define BURST_RDTR			0x20
+#define BURST_RADV			0x20
+
+/*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+ * WTHRESH=4, and since we want 64 bytes at a time written back, set
+ * it to 5
+ */
+#define E1000_TXDCTL_DMA_BURST_ENABLE                          \
+	(E1000_TXDCTL_GRAN | /* set descriptor granularity */  \
+	 E1000_TXDCTL_COUNT_DESC |                             \
+	 (5 << 16) | /* wthresh must be +1 more than desired */\
+	 (1 << 8)  | /* hthresh */                             \
+	 0x1f)       /* pthresh */
+
+#define E1000_RXDCTL_DMA_BURST_ENABLE                          \
+	(0x01000000 | /* set descriptor granularity */         \
+	 (4 << 16)  | /* set writeback threshold    */         \
+	 (4 << 8)   | /* set prefetch threshold     */         \
+	 0x20)        /* set hthresh                */
+
+#define E1000_TIDV_FPD (1 << 31)
+#define E1000_RDTR_FPD (1 << 31)
+
+enum e1000_boards {
+	board_82571,
+	board_82572,
+	board_82573,
+	board_82574,
+	board_82583,
+	board_80003es2lan,
+	board_ich8lan,
+	board_ich9lan,
+	board_ich10lan,
+	board_pchlan,
+	board_pch2lan,
+	board_pch_lpt,
+};
+
+struct e1000_ps_page {
+	struct page *page;
+	u64 dma; /* must be u64 - written to hw */
+};
+
+/*
+ * wrappers around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct e1000_buffer {
+	dma_addr_t dma;
+	struct rtskb *skb;
+	union {
+		/* Tx */
+		struct {
+			unsigned long time_stamp;
+			u16 length;
+			u16 next_to_watch;
+			unsigned int segs;
+			unsigned int bytecount;
+			u16 mapped_as_page;
+		};
+		/* Rx */
+		struct {
+			/* arrays of page information for packet split */
+			struct e1000_ps_page *ps_pages;
+			struct page *page;
+		};
+	};
+};
+
+struct e1000_ring {
+	void *desc;			/* pointer to ring memory  */
+	dma_addr_t dma;			/* phys address of ring    */
+	unsigned int size;		/* length of ring in bytes */
+	unsigned int count;		/* number of desc. in ring */
+
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	u16 head;
+	u16 tail;
+
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	char name[IFNAMSIZ + 5];
+	u32 ims_val;
+	u32 itr_val;
+	u16 itr_register;
+	int set_itr;
+
+	struct rtskb *rx_skb_top;
+
+	rtdm_lock_t lock;
+};
+
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+	u16 bmcr;		/* basic mode control register    */
+	u16 bmsr;		/* basic mode status register     */
+	u16 advertise;		/* auto-negotiation advertisement */
+	u16 lpa;		/* link partner ability register  */
+	u16 expansion;		/* auto-negotiation expansion reg */
+	u16 ctrl1000;		/* 1000BASE-T control register    */
+	u16 stat1000;		/* 1000BASE-T status register     */
+	u16 estatus;		/* extended status register       */
+};
+
+/* board specific private data structure */
+struct e1000_adapter {
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+	struct timer_list blink_timer;
+
+	struct work_struct reset_task;
+	struct work_struct watchdog_task;
+
+	const struct e1000_info *ei;
+
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u16 mng_vlan_id;
+	u16 link_speed;
+	u16 link_duplex;
+	u16 eeprom_vers;
+
+	/* track device up/down/testing state */
+	unsigned long state;
+
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	/*
+	 * Tx
+	 */
+	struct e1000_ring *tx_ring /* One per active queue */
+						____cacheline_aligned_in_smp;
+
+	struct napi_struct napi;
+
+	unsigned int restart_queue;
+	u32 txd_cmd;
+
+	bool detect_tx_hung;
+	u8 tx_timeout_factor;
+
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+
+	/* Tx stats */
+	u64 tpt_old;
+	u64 colc_old;
+	u32 gotc;
+	u64 gotc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u32 tx_dma_failed;
+
+	/*
+	 * Rx
+	 */
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			  nanosecs_abs_t *time_stamp)
+						____cacheline_aligned_in_smp;
+	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+			      int cleaned_count, gfp_t gfp);
+	struct e1000_ring *rx_ring;
+
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+
+	/* Rx stats */
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 gorc;
+	u64 gorc_old;
+	u32 alloc_rx_buff_failed;
+	u32 rx_dma_failed;
+
+	unsigned int rx_ps_pages;
+	u16 rx_ps_bsize0;
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+	/* OS defined structs */
+	struct rtnet_device *netdev;
+	struct pci_dev *pdev;
+
+	rtdm_irq_t irq_handle;
+	rtdm_irq_t rx_irq_handle;
+	rtdm_irq_t tx_irq_handle;
+	rtdm_nrtsig_t mod_timer_sig;
+	rtdm_nrtsig_t downshift_sig;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+
+	spinlock_t stats64_lock;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+	/* Snapshot of PHY registers */
+	struct e1000_phy_regs phy_regs;
+
+	struct e1000_ring test_tx_ring;
+	struct e1000_ring test_rx_ring;
+	u32 test_icr;
+
+	u32 msg_enable;
+	unsigned int num_vectors;
+	struct msix_entry *msix_entries;
+	int int_mode;
+	u32 eiac_mask;
+
+	u32 eeprom_wol;
+	u32 wol;
+	u32 pba;
+	u32 max_hw_frame_size;
+
+	bool fc_autoneg;
+
+	unsigned int flags;
+	unsigned int flags2;
+	struct work_struct downshift_task;
+	struct work_struct update_phy_task;
+	struct work_struct print_hang_task;
+
+	bool idle_check;
+	int phy_hang_count;
+};
+
+struct e1000_info {
+	enum e1000_mac_type	mac;
+	unsigned int		flags;
+	unsigned int		flags2;
+	u32			pba;
+	u32			max_hw_frame_size;
+	s32			(*get_variants)(struct e1000_adapter *);
+	const struct e1000_mac_operations *mac_ops;
+	const struct e1000_phy_operations *phy_ops;
+	const struct e1000_nvm_operations *nvm_ops;
+};
+
+/* hardware capability, feature, and workaround flags */
+#define FLAG_HAS_AMT                      (1 << 0)
+#define FLAG_HAS_FLASH                    (1 << 1)
+#define FLAG_HAS_HW_VLAN_FILTER           (1 << 2)
+#define FLAG_HAS_WOL                      (1 << 3)
+#define FLAG_HAS_ERT                      (1 << 4)
+#define FLAG_HAS_CTRLEXT_ON_LOAD          (1 << 5)
+#define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
+#define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
+#define FLAG_READ_ONLY_NVM                (1 << 8)
+#define FLAG_IS_ICH                       (1 << 9)
+#define FLAG_HAS_MSIX                     (1 << 10)
+#define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
+#define FLAG_IS_QUAD_PORT_A               (1 << 12)
+#define FLAG_IS_QUAD_PORT                 (1 << 13)
+#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN   (1 << 14)
+#define FLAG_APME_IN_WUC                  (1 << 15)
+#define FLAG_APME_IN_CTRL3                (1 << 16)
+#define FLAG_APME_CHECK_PORT_B            (1 << 17)
+#define FLAG_DISABLE_FC_PAUSE_TIME        (1 << 18)
+#define FLAG_NO_WAKE_UCAST                (1 << 19)
+#define FLAG_MNG_PT_ENABLED               (1 << 20)
+#define FLAG_RESET_OVERWRITES_LAA         (1 << 21)
+#define FLAG_TARC_SPEED_MODE_BIT          (1 << 22)
+#define FLAG_TARC_SET_BIT_ZERO            (1 << 23)
+#define FLAG_RX_NEEDS_RESTART             (1 << 24)
+#define FLAG_LSC_GIG_SPEED_DROP           (1 << 25)
+#define FLAG_SMART_POWER_DOWN             (1 << 26)
+#define FLAG_MSI_ENABLED                  (1 << 27)
+/* reserved (1 << 28) */
+#define FLAG_TSO_FORCE                    (1 << 29)
+#define FLAG_RX_RESTART_NOW               (1 << 30)
+#define FLAG_MSI_TEST_FAILED              (1 << 31)
+
+#define FLAG2_CRC_STRIPPING               (1 << 0)
+#define FLAG2_HAS_PHY_WAKEUP              (1 << 1)
+#define FLAG2_IS_DISCARDING               (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1             (1 << 3)
+#define FLAG2_HAS_PHY_STATS               (1 << 4)
+#define FLAG2_HAS_EEE                     (1 << 5)
+#define FLAG2_DMA_BURST                   (1 << 6)
+#define FLAG2_DISABLE_ASPM_L0S            (1 << 7)
+#define FLAG2_DISABLE_AIM                 (1 << 8)
+#define FLAG2_CHECK_PHY_HANG              (1 << 9)
+#define FLAG2_NO_DISABLE_RX               (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA         (1 << 11)
+
+#define E1000_RX_DESC_PS(R, i)	    \
+	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i)	    \
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+enum e1000_state_t {
+	__E1000_TESTING,
+	__E1000_RESETTING,
+	__E1000_ACCESS_SHARED_RESOURCE,
+	__E1000_DOWN
+};
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+extern char e1000e_driver_name[];
+extern const char e1000e_driver_version[];
+
+extern void e1000e_check_options(struct e1000_adapter *adapter);
+extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+extern int e1000e_up(struct e1000_adapter *adapter);
+extern void e1000e_down(struct e1000_adapter *adapter);
+extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000e_reset(struct e1000_adapter *adapter);
+extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
+extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
+extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+						    struct rtnl_link_stats64
+						    *stats);
+extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+
+extern unsigned int copybreak;
+
+extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
+
+extern const struct e1000_info e1000_82571_info;
+extern const struct e1000_info e1000_82572_info;
+extern const struct e1000_info e1000_82573_info;
+extern const struct e1000_info e1000_82574_info;
+extern const struct e1000_info e1000_82583_info;
+extern const struct e1000_info e1000_ich8_info;
+extern const struct e1000_info e1000_ich9_info;
+extern const struct e1000_info e1000_ich10_info;
+extern const struct e1000_info e1000_pch_info;
+extern const struct e1000_info e1000_pch2_info;
+extern const struct e1000_info e1000_pch_lpt_info;
+extern const struct e1000_info e1000_es2_info;
+
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+					 u32 pba_num_size);
+
+extern s32  e1000e_commit_phy(struct e1000_hw *hw);
+
+extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+						 bool state);
+extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+
+extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
+extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+extern s32 e1000e_id_led_init(struct e1000_hw *hw);
+extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+extern s32 e1000e_setup_link(struct e1000_hw *hw);
+extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
+extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+					       u8 *mc_addr_list,
+					       u32 mc_addr_count);
+extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+extern void e1000e_config_collision_dist(struct e1000_hw *hw);
+extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+extern void e1000e_reset_adaptive(struct e1000_hw *hw);
+extern void e1000e_update_adaptive(struct e1000_hw *hw);
+
+extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
+extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+					  u16 *data);
+extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+					   u16 data);
+extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+						 u16 *phy_reg);
+extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+						  u16 *phy_reg);
+extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+					u16 data);
+extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+				       u16 *data);
+extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+			       u32 usec_interval, bool *success);
+extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
+extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_check_downshift(struct e1000_hw *hw);
+extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+					u16 *data);
+extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+				      u16 *data);
+extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+					 u16 data);
+extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+				       u16 data);
+extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+
+static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+	return hw->phy.ops.reset(hw);
+}
+
+static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+	return hw->phy.ops.check_reset_block(hw);
+}
+
+static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return hw->phy.ops.read_reg(hw, offset, data);
+}
+
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return hw->phy.ops.read_reg_locked(hw, offset, data);
+}
+
+static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return hw->phy.ops.write_reg(hw, offset, data);
+}
+
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return hw->phy.ops.write_reg_locked(hw, offset, data);
+}
+
+static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+	return hw->phy.ops.get_cable_length(hw);
+}
+
+extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+extern void e1000e_release_nvm(struct e1000_hw *hw);
+extern void e1000e_reload_nvm(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+	if (hw->mac.ops.read_mac_addr)
+		return hw->mac.ops.read_mac_addr(hw);
+
+	return e1000_read_mac_addr_generic(hw);
+}
+
+static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	return hw->nvm.ops.validate(hw);
+}
+
+static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
+{
+	return hw->nvm.ops.update(hw);
+}
+
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	return hw->nvm.ops.read(hw, offset, words, data);
+}
+
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	return hw->nvm.ops.write(hw, offset, words, data);
+}
+
+static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+	return hw->phy.ops.get_info(hw);
+}
+
+static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
+{
+	return hw->mac.ops.check_mng_mode(hw);
+}
+
+extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+
+static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
+{
+	return readl(hw->hw_addr + reg);
+}
+
+static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+	writel(val, hw->hw_addr + reg);
+}
+
+#endif /* _E1000_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/rt_eth1394.h	2022-03-21 12:58:29.531887577 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/fec.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * eth1394.h -- Driver for Ethernet emulation over FireWire, (adapted from Linux1394)
+ *		working under RTnet.
+ *
+ * Copyright (C) 2005	Zhang Yuchen <yuchen623@gmail.com>
+ *
+ * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ETH1394_H
+#define __ETH1394_H
+
+#include <ieee1394.h>
+#include <rtskb.h>
+#include <linux/netdevice.h>
+#include <rtnet_port.h>
+
+
+/* Register for incoming packets. This is 4096 bytes, which supports up to
+ * S3200 (per Table 16-3 of IEEE 1394b-2002). */
+#define ETHER1394_REGION_ADDR_LEN	4096
+#define ETHER1394_REGION_ADDR		0xfffff0200000ULL
+#define ETHER1394_REGION_ADDR_END	(ETHER1394_REGION_ADDR + ETHER1394_REGION_ADDR_LEN)
+
+/* GASP identifier numbers for IPv4 over IEEE 1394 */
+#define ETHER1394_GASP_SPECIFIER_ID	0x00005E
+#define ETHER1394_GASP_SPECIFIER_ID_HI	((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
+#define ETHER1394_GASP_SPECIFIER_ID_LO	(ETHER1394_GASP_SPECIFIER_ID & 0xff)
+#define ETHER1394_GASP_VERSION		1
+
+#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t))  /* GASP header overhead */
+
+#define ETHER1394_GASP_BUFFERS 16
+
+#define ETH1394_BC_CHANNEL 31
+
+#define ALL_NODES	0x003f //stolen from ieee1394_types.h
+/* Node set == 64 */
+#define NODE_SET			(ALL_NODES + 1)
+
+enum eth1394_bc_states { ETHER1394_BC_CLOSED, ETHER1394_BC_OPENED,
+			 ETHER1394_BC_CHECK, ETHER1394_BC_ERROR,
+			 ETHER1394_BC_RUNNING,
+			 ETHER1394_BC_STOPPED  };
+
+#define TX_RING_SIZE	32
+#define RX_RING_SIZE	8 /* RX_RING_SIZE*2 rtskbs will be preallocated */
+
+struct pdg_list {
+	struct list_head list;		/* partial datagram list per node */
+	unsigned int sz;		/* partial datagram list size per node	*/
+	rtdm_lock_t lock;		/* partial datagram lock		*/
+};
+
+/* IP1394 headers */
+#include <asm/byteorder.h>
+
+/* Unfragmented */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_uf_hdr {
+	u16 lf:2;
+	u16 res:14;
+	u16 ether_type;		/* Ethernet packet type */
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_uf_hdr {
+	u16 res:14;
+	u16 lf:2;
+	u16 ether_type;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+/* End of IP1394 headers */
+
+/* Fragment types */
+#define ETH1394_HDR_LF_UF	0	/* unfragmented		*/
+#define ETH1394_HDR_LF_FF	1	/* first fragment	*/
+#define ETH1394_HDR_LF_LF	2	/* last fragment	*/
+#define ETH1394_HDR_LF_IF	3	/* interior fragment	*/
+
+#define IP1394_HW_ADDR_LEN	2	/* In RFC, the value is 16; here use the value for modified spec		*/
+
+/* Our arp packet (ARPHRD_IEEE1394) */
+struct eth1394_arp {
+	u16 hw_type;		/* 0x0018	*/
+	u16 proto_type;		/* 0x0080	*/
+	u8 hw_addr_len;		/* 2		*/
+	u8 ip_addr_len;		/* 4		*/
+	u16 opcode;		/* ARP Opcode: 1 for req, 2 for resp	*/
+	/* Above is exactly the same format as struct arphdr */
+
+	unsigned char s_uniq_id[ETH_ALEN];	/* Sender's node id padded with zeros	*/
+	u8 max_rec;		/* Sender's max packet size		*/
+	u8 sspd;		/* Sender's max speed			*/
+	u32 sip;		/* Sender's IP Address			*/
+	u32 tip;		/* IP Address of requested hw addr	*/
+};
+
+
+/* Network timeout */
+#define ETHER1394_TIMEOUT	100000
+
+/* First fragment */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_ff_hdr {
+	u16 lf:2;
+	u16 res1:2;
+	u16 dg_size:12;		/* Datagram size */
+	u16 ether_type;		/* Ethernet packet type */
+	u16 dgl;		/* Datagram label */
+	u16 res2;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_ff_hdr {
+	u16 dg_size:12;
+	u16 res1:2;
+	u16 lf:2;
+	u16 ether_type;
+	u16 dgl;
+	u16 res2;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+/* XXX: Subsequent fragments, including last */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_sf_hdr {
+	u16 lf:2;
+	u16 res1:2;
+	u16 dg_size:12;		/* Datagram size */
+	u16 res2:4;
+	u16 fg_off:12;		/* Fragment offset */
+	u16 dgl;		/* Datagram label */
+	u16 res3;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_sf_hdr {
+	u16 dg_size:12;
+	u16 res1:2;
+	u16 lf:2;
+	u16 fg_off:12;
+	u16 res2:4;
+	u16 dgl;
+	u16 res3;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_common_hdr {
+	u16 lf:2;
+	u16 pad1:14;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_common_hdr {
+	u16 pad1:14;
+	u16 lf:2;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+struct eth1394_hdr_words {
+	u16 word1;
+	u16 word2;
+	u16 word3;
+	u16 word4;
+};
+
+union eth1394_hdr {
+	struct eth1394_common_hdr common;
+	struct eth1394_uf_hdr uf;
+	struct eth1394_ff_hdr ff;
+	struct eth1394_sf_hdr sf;
+	struct eth1394_hdr_words words;
+};
+
+typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
+
+/* This is our task struct. It's used for the packet complete callback.  */
+struct packet_task {
+	struct list_head lh;
+	struct rtskb *skb;
+	int outstanding_pkts;
+	eth1394_tx_type tx_type;
+	int max_payload;
+	struct hpsb_packet *packet;
+	struct eth1394_priv *priv;
+	union eth1394_hdr hdr;
+	u64 addr;
+	u16 dest_node;
+	unsigned int priority; //the priority mapped to priority on 1394 transaction
+};
+
+/* Private structure for our ethernet driver */
+struct eth1394_priv {
+	struct net_device_stats stats;	/* Device stats			 */
+	struct hpsb_host *host;		/* The card for this dev	 */
+	u16 maxpayload[NODE_SET];	/* Max payload per node		 */
+	unsigned char sspd[NODE_SET];	/* Max speed per node		 */
+	rtdm_lock_t lock;		/* Private lock			 */
+	int broadcast_channel;		/* Async stream Broadcast Channel */
+	enum eth1394_bc_states bc_state; /* broadcast channel state	 */
+	struct hpsb_iso	*iso;
+	struct pdg_list pdg[ALL_NODES]; /* partial RX datagram lists     */
+	int dgl[NODE_SET];              /* Outgoing datagram label per node */
+
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	struct packet_task ptask_list[20]; //the list of pre-allocated ptask structure
+};
+
+
+
+struct host_info {
+	struct hpsb_host *host;
+	struct rtnet_device *dev;
+};
+
+
+#endif /* __ETH1394_H */
+++ linux-patched/drivers/xenomai/net/drivers/fec.c	2022-03-21 12:58:29.526887625 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/rt_at91_ether.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * Right now, I am very wasteful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Support for FEC controller of ColdFire processors.
+ * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
+ *
+ * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
+ * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Ported from v3.5 Linux drivers/net/ethernet/freescale/fec.[ch]
+ * (git tag v3.5-709-ga6be1fc)
+ *
+ * Copyright (c) 2012 Wolfgang Grandegger <wg@denx.de>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARM
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#endif
+
+/* RTnet */
+#include <rtnet_port.h>
+#include <rtskb.h>
+
+/* RTnet */
+#include "rt_fec.h"
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for the FEC Ethernet");
+MODULE_LICENSE("GPL");
+
+#if defined(CONFIG_ARM)
+#define FEC_ALIGNMENT	0xf
+#else
+#define FEC_ALIGNMENT	0x3
+#endif
+
+#define DRIVER_NAME	"rt_fec"
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC		(1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME		(1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET		(1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT		(1 << 3)
+
+static struct platform_device_id fec_devtype[] = {
+	{
+		.name = "fec",
+/* For legacy not devicetree based support */
+#if defined(CONFIG_SOC_IMX6Q)
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+#elif defined(CONFIG_SOC_IMX28)
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+#elif defined(CONFIG_SOC_IMX25)
+		.driver_data = FEC_QUIRK_USE_GASKET,
+#else
+		/* keep it for coldfire */
+		.driver_data = 0,
+#endif
+	}, {
+		.name = "imx25-fec",
+		.driver_data = FEC_QUIRK_USE_GASKET,
+	}, {
+		.name = "imx27-fec",
+		.driver_data = 0,
+	}, {
+		.name = "imx28-fec",
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+	}, {
+		.name = "imx6q-fec",
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
+	IMX27_FEC,	/* runs on i.mx27/35/51 */
+	IMX28_FEC,
+	IMX6Q_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
+
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define	FEC_FLASHMAC	0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define	FEC_FLASHMAC	0xf0006000
+#elif defined(CONFIG_CANCam)
+#define	FEC_FLASHMAC	0xf0020000
+#elif defined (CONFIG_M5272C3)
+#define	FEC_FLASHMAC	(0xffe04000 + 4)
+#elif defined(CONFIG_MOD5272)
+#define FEC_FLASHMAC	0xffc0406b
+#else
+#define	FEC_FLASHMAC	0
+#endif
+#endif /* CONFIG_M5272 */
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define FEC_ENET_RX_PAGES	8
+#define FEC_ENET_RX_FRSIZE	RTSKB_SIZE /* Maximum size for RTnet */
+#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE	2048
+#define FEC_ENET_TX_FRPPG	(PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE		16	/* Must be power of two */
+#define TX_RING_MOD_MASK	15	/*   for this to work */
+
+#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
+#error "FEC: descriptor ring size constants too large"
+#endif
+
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR	((uint)0x80000000)	/* Heartbeat error */
+#define FEC_ENET_BABR	((uint)0x40000000)	/* Babbling receiver */
+#define FEC_ENET_BABT	((uint)0x20000000)	/* Babbling transmitter */
+#define FEC_ENET_GRA	((uint)0x10000000)	/* Graceful stop complete */
+#define FEC_ENET_TXF	((uint)0x08000000)	/* Full frame transmitted */
+#define FEC_ENET_TXB	((uint)0x04000000)	/* A buffer was transmitted */
+#define FEC_ENET_RXF	((uint)0x02000000)	/* Full frame received */
+#define FEC_ENET_RXB	((uint)0x01000000)	/* A buffer was received */
+#define FEC_ENET_MII	((uint)0x00800000)	/* MII interrupt */
+#define FEC_ENET_EBERR	((uint)0x00400000)	/* SDMA bus error */
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+#define PKT_MAXBLR_SIZE		1520
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM		3
+
+/*
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+#define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
+#else
+#define	OPT_FRAME_SIZE	0
+#endif
+
+static unsigned int rx_pool_size = 2 * RX_RING_SIZE;
+module_param(rx_pool_size, int, 0444);
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+#ifndef rtnetdev_priv
+#define rtnetdev_priv(ndev) (ndev)->priv
+#endif
+
+/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+	/* Hardware registers of the FEC device */
+	void __iomem *hwp;
+
+	struct net_device *netdev; /* linux netdev needed for phy handling */
+
+	struct clk *clk_ipg;
+	struct clk *clk_ahb;
+
+	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
+	unsigned char *tx_bounce[TX_RING_SIZE];
+	struct	rtskb *tx_skbuff[TX_RING_SIZE];
+	struct	rtskb *rx_skbuff[RX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses */
+	dma_addr_t	bd_dma;
+	/* Address of Rx and Tx buffers */
+	struct bufdesc	*rx_bd_base;
+	struct bufdesc	*tx_bd_base;
+	/* The next free ring entry */
+	struct bufdesc	*cur_rx, *cur_tx;
+	/* The ring entries to be free()ed */
+	struct bufdesc	*dirty_tx;
+
+	uint	tx_full;
+	/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+	rtdm_lock_t hw_lock;
+
+	struct	platform_device *pdev;
+
+	int	opened;
+	int	dev_id;
+
+	/* Phylib and MDIO interface */
+	struct	mii_bus *mii_bus;
+	struct	phy_device *phy_dev;
+	int	mii_timeout;
+	uint	phy_speed;
+	phy_interface_t	phy_interface;
+	int	link;
+	int	full_duplex;
+	struct	completion mdio_done;
+	int	irq[FEC_IRQ_NUM];
+
+	/* RTnet */
+	struct device *dev;
+	rtdm_irq_t irq_handle[3];
+	rtdm_nrtsig_t mdio_done_sig;
+	struct net_device_stats stats;
+};
+
+/* For phy handling */
+struct fec_enet_netdev_priv {
+	struct rtnet_device *rtdev;
+};
+
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST		(1 << 30)
+#define FEC_MMFR_OP_READ	(2 << 28)
+#define FEC_MMFR_OP_WRITE	(1 << 28)
+#define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
+#define FEC_MMFR_TA		(2 << 16)
+#define FEC_MMFR_DATA(v)	(v & 0xffff)
+
+#define FEC_MII_TIMEOUT		30000 /* us */
+
+/* Transmitter timeout */
+#define TX_TIMEOUT (2 * HZ)
+
+static int mii_cnt;
+
+static void *swap_buffer(void *bufaddr, int len)
+{
+	int i;
+	unsigned int *buf = bufaddr;
+
+	for (i = 0; i < (len + 3) / 4; i++, buf++)
+		*buf = cpu_to_be32(*buf);
+
+	return bufaddr;
+}
+
+static int
+fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	struct bufdesc *bdp;
+	void *bufaddr;
+	unsigned short	status;
+	unsigned long context;
+
+	if (!fep->link) {
+		/* Link is down or autonegotiation is in progress. */
+		printk("%s: tx link down!.\n", ndev->name);
+		rtnetif_stop_queue(ndev);
+		return 1;	/* RTnet: will call kfree_rtskb() */
+	}
+
+	rtdm_lock_get_irqsave(&fep->hw_lock, context);
+
+	/* RTnet */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+					       *skb->xmit_stamp);
+
+	/* Fill in a Tx ring entry */
+	bdp = fep->cur_tx;
+
+	status = bdp->cbd_sc;
+
+	if (status & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since ndev->tbusy should be set.
+		 */
+		printk("%s: tx queue full!.\n", ndev->name);
+		rtdm_lock_put_irqrestore(&fep->hw_lock, context);
+		return 1;	/* RTnet: will call kfree_rtskb() */
+	}
+
+	/* Clear all of the status flags */
+	status &= ~BD_ENET_TX_STATS;
+
+	/* Set buffer length and buffer pointer */
+	bufaddr = skb->data;
+	bdp->cbd_datlen = skb->len;
+
+	/*
+	 * On some FEC implementations data must be aligned on
+	 * 4-byte boundaries. Use bounce buffers to copy data
+	 * and get it aligned. Ugh.
+	 */
+	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
+		unsigned int index;
+		index = bdp - fep->tx_bd_base;
+		memcpy(fep->tx_bounce[index], skb->data, skb->len);
+		bufaddr = fep->tx_bounce[index];
+	}
+
+	/*
+	 * Some design made an incorrect assumption on endian mode of
+	 * the system that it's running on. As the result, driver has to
+	 * swap every frame going to and coming from the controller.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+		swap_buffer(bufaddr, skb->len);
+
+	/* Save skb pointer */
+	fep->tx_skbuff[fep->skb_cur] = skb;
+
+	fep->stats.tx_bytes += skb->len;
+	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	/* Push the data cache so the CPM does not get stale memory
+	 * data.
+	 */
+	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+			FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+
+	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
+	 * it's the last BD of the frame, and to put the CRC on the end.
+	 */
+	status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
+	bdp->cbd_sc = status;
+
+	/* Trigger transmission start */
+	writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
+	/* If this was the last BD in the ring, start at the beginning again. */
+	if (status & BD_ENET_TX_WRAP)
+		bdp = fep->tx_bd_base;
+	else
+		bdp++;
+
+	if (bdp == fep->dirty_tx) {
+		fep->tx_full = 1;
+		rtnetif_stop_queue(ndev);
+	}
+
+	fep->cur_tx = bdp;
+
+	rtdm_lock_put_irqrestore(&fep->hw_lock, context);
+
+	return NETDEV_TX_OK;
+}
+
+/* This function is called to start or restart the FEC during a link
+ * change.  This only happens when switching between half and full
+ * duplex.
+ */
+static void
+fec_restart(struct rtnet_device *ndev, int duplex)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	int i;
+	u32 temp_mac[2];
+	u32 rcntl = OPT_FRAME_SIZE | 0x04;
+	u32 ecntl = 0x2; /* ETHEREN */
+
+	/* Whack a reset.  We should wait for this. */
+	writel(1, fep->hwp + FEC_ECNTRL);
+	udelay(10);
+
+	/*
+	 * enet-mac reset will reset mac address registers too,
+	 * so need to reconfigure it.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+	}
+
+	/* Clear any outstanding interrupt. */
+	writel(0xffc00000, fep->hwp + FEC_IEVENT);
+
+	/* Reset all multicast.	*/
+	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
+	/* Set maximum receive buffer size. */
+	writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+
+	/* Set receive and transmit descriptor base. */
+	writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+	writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+			fep->hwp + FEC_X_DES_START);
+
+	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+	fep->cur_rx = fep->rx_bd_base;
+
+	/* Reset SKB transmit buffers. */
+	fep->skb_cur = fep->skb_dirty = 0;
+	for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+		if (fep->tx_skbuff[i]) {
+			dev_kfree_rtskb(fep->tx_skbuff[i]);
+			fep->tx_skbuff[i] = NULL;
+		}
+	}
+
+	/* Enable MII mode */
+	if (duplex) {
+		/* FD enable */
+		writel(0x04, fep->hwp + FEC_X_CNTRL);
+	} else {
+		/* No Rcv on Xmit */
+		rcntl |= 0x02;
+		writel(0x0, fep->hwp + FEC_X_CNTRL);
+	}
+
+	fep->full_duplex = duplex;
+
+	/* Set MII speed */
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+	/*
+	 * The phy interface and speed need to get configured
+	 * differently on enet-mac.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		/* Enable flow control and length check */
+		rcntl |= 0x40000000 | 0x00000020;
+
+		/* RGMII, RMII or MII */
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
+			rcntl |= (1 << 6);
+		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+			rcntl |= (1 << 8);
+		else
+			rcntl &= ~(1 << 8);
+
+		/* 1G, 100M or 10M */
+		if (fep->phy_dev) {
+			if (fep->phy_dev->speed == SPEED_1000)
+				ecntl |= (1 << 5);
+			else if (fep->phy_dev->speed == SPEED_100)
+				rcntl &= ~(1 << 9);
+			else
+				rcntl |= (1 << 9);
+		}
+	} else {
+#ifdef FEC_MIIGSK_ENR
+		if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
+			u32 cfgr;
+			/* disable the gasket and wait */
+			writel(0, fep->hwp + FEC_MIIGSK_ENR);
+			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+				udelay(1);
+
+			/*
+			 * configure the gasket:
+			 *   RMII, 50 MHz, no loopback, no echo
+			 *   MII, 25 MHz, no loopback, no echo
+			 */
+			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+			if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
+
+			/* re-enable the gasket */
+			writel(2, fep->hwp + FEC_MIIGSK_ENR);
+		}
+#endif
+	}
+	writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		/* enable ENET endian swap */
+		ecntl |= (1 << 8);
+		/* enable ENET store and forward mode */
+		writel(1 << 8, fep->hwp + FEC_X_WMRK);
+	}
+
+	/* And last, enable the transmit and receive processing */
+	writel(ecntl, fep->hwp + FEC_ECNTRL);
+	writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+	/* Enable interrupts we wish to service */
+	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+static void
+fec_stop(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+
+	/* We cannot expect a graceful transmit stop without link !!! */
+	if (fep->link) {
+		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+		udelay(10);
+		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+			printk("fec_stop : Graceful transmit stop did not complete !\n");
+	}
+
+	/* Whack a reset.  We should wait for this. */
+	writel(1, fep->hwp + FEC_ECNTRL);
+	udelay(10);
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+	/* We have to keep ENET enabled to have MII interrupt stay working */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		writel(2, fep->hwp + FEC_ECNTRL);
+		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+	}
+}
+
+static void
+fec_enet_tx(struct rtnet_device *ndev)
+{
+	struct	fec_enet_private *fep;
+	struct bufdesc *bdp;
+	unsigned short status;
+	struct	rtskb	*skb;
+
+	fep = rtnetdev_priv(ndev);
+	rtdm_lock_get(&fep->hw_lock);
+	bdp = fep->dirty_tx;
+
+	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
+		if (bdp == fep->cur_tx && fep->tx_full == 0)
+			break;
+
+		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+		bdp->cbd_bufaddr = 0;
+
+		skb = fep->tx_skbuff[fep->skb_dirty];
+		/* Check for errors. */
+		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+				   BD_ENET_TX_RL | BD_ENET_TX_UN |
+				   BD_ENET_TX_CSL)) {
+			fep->stats.tx_errors++;
+			if (status & BD_ENET_TX_HB)  /* No heartbeat */
+				fep->stats.tx_heartbeat_errors++;
+			if (status & BD_ENET_TX_LC)  /* Late collision */
+				fep->stats.tx_window_errors++;
+			if (status & BD_ENET_TX_RL)  /* Retrans limit */
+				fep->stats.tx_aborted_errors++;
+			if (status & BD_ENET_TX_UN)  /* Underrun */
+				fep->stats.tx_fifo_errors++;
+			if (status & BD_ENET_TX_CSL) /* Carrier lost */
+				fep->stats.tx_carrier_errors++;
+		} else {
+			fep->stats.tx_packets++;
+		}
+
+		if (status & BD_ENET_TX_READY)
+			printk("HEY! Enet xmit interrupt and TX_READY.\n");
+
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (status & BD_ENET_TX_DEF)
+			fep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit */
+		dev_kfree_rtskb(skb); /* RTnet */
+		fep->tx_skbuff[fep->skb_dirty] = NULL;
+		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted */
+		if (status & BD_ENET_TX_WRAP)
+			bdp = fep->tx_bd_base;
+		else
+			bdp++;
+
+		/* Since we have freed up a buffer, the ring is no longer full
+		 */
+		if (fep->tx_full) {
+			fep->tx_full = 0;
+			if (rtnetif_queue_stopped(ndev))
+				rtnetif_wake_queue(ndev);
+		}
+	}
+	fep->dirty_tx = bdp;
+	rtdm_lock_put(&fep->hw_lock);
+}
+
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static void
+fec_enet_rx(struct rtnet_device *ndev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	struct bufdesc *bdp;
+	unsigned short status;
+	struct	rtskb	*skb;
+	ushort	pkt_len;
+	__u8 *data;
+
+#ifdef CONFIG_M532x
+	flush_cache_all();
+#endif
+	rtdm_lock_get(&fep->hw_lock);
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = fep->cur_rx;
+
+	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+
+		/* Since we have allocated space to hold a complete frame,
+		 * the last indicator should be set.
+		 */
+		if ((status & BD_ENET_RX_LAST) == 0)
+			printk("FEC ENET: rcv is not +last\n");
+
+		if (!fep->opened)
+			goto rx_processing_done;
+
+		/* Check for errors. */
+		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+			fep->stats.rx_errors++;
+			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+				/* Frame too long or too short. */
+				fep->stats.rx_length_errors++;
+			}
+			if (status & BD_ENET_RX_NO)	/* Frame alignment */
+				fep->stats.rx_frame_errors++;
+			if (status & BD_ENET_RX_CR)	/* CRC Error */
+				fep->stats.rx_crc_errors++;
+			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
+				fep->stats.rx_fifo_errors++;
+		}
+
+		/* Report late collisions as a frame error.
+		 * On this error, the BD is closed, but we don't know what we
+		 * have in the buffer.  So, just drop this frame on the floor.
+		 */
+		if (status & BD_ENET_RX_CL) {
+			fep->stats.rx_errors++;
+			fep->stats.rx_frame_errors++;
+			goto rx_processing_done;
+		}
+
+		/* Process the incoming frame. */
+		fep->stats.rx_packets++;
+		pkt_len = bdp->cbd_datlen;
+		fep->stats.rx_bytes += pkt_len;
+		data = (__u8*)__va(bdp->cbd_bufaddr);
+
+		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+
+		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+			swap_buffer(data, pkt_len);
+
+		/* This does 16 byte alignment, exactly what we need.
+		 * The packet length includes FCS, but we don't want to
+		 * include that when passing upstream as it messes up
+		 * bridging applications.
+		 */
+		skb = rtnetdev_alloc_rtskb(ndev, pkt_len - 4 + NET_IP_ALIGN); /* RTnet */
+
+		if (unlikely(!skb)) {
+			printk("%s: Memory squeeze, dropping packet.\n",
+					ndev->name);
+			fep->stats.rx_dropped++;
+		} else {
+			rtskb_reserve(skb, NET_IP_ALIGN);
+			rtskb_put(skb, pkt_len - 4);	/* Make room */
+			memcpy(skb->data, data, pkt_len - 4);
+			skb->protocol = rt_eth_type_trans(skb, ndev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			(*packets)++; /* RTnet */
+		}
+
+		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+rx_processing_done:
+		/* Clear the status flags for this buffer */
+		status &= ~BD_ENET_RX_STATS;
+
+		/* Mark the buffer empty */
+		status |= BD_ENET_RX_EMPTY;
+		bdp->cbd_sc = status;
+
+		/* Update BD pointer to next entry */
+		if (status & BD_ENET_RX_WRAP)
+			bdp = fep->rx_bd_base;
+		else
+			bdp++;
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.  On a heavily loaded network, we should be
+		 * able to keep up at the expense of system resources.
+		 */
+		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+	}
+	fep->cur_rx = bdp;
+
+	rtdm_lock_put(&fep->hw_lock);
+}
+
+static int
+fec_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *ndev =
+		rtdm_irq_get_arg(irq_handle, struct rtnet_device); /* RTnet */
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	uint int_events;
+	irqreturn_t ret = RTDM_IRQ_NONE;
+	/* RTnet */
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	int packets = 0;
+
+	do {
+		int_events = readl(fep->hwp + FEC_IEVENT);
+		writel(int_events, fep->hwp + FEC_IEVENT);
+
+		if (int_events & FEC_ENET_RXF) {
+			ret = RTDM_IRQ_HANDLED;
+			fec_enet_rx(ndev, &packets, &time_stamp);
+		}
+
+		/* Transmit OK, or non-fatal error. Update the buffer
+		 * descriptors. FEC handles all errors, we just discover
+		 * them as part of the transmit process.
+		 */
+		if (int_events & FEC_ENET_TXF) {
+			ret = RTDM_IRQ_HANDLED;
+			fec_enet_tx(ndev);
+		}
+
+		if (int_events & FEC_ENET_MII) {
+			ret = RTDM_IRQ_HANDLED;
+			rtdm_nrtsig_pend(&fep->mdio_done_sig);
+		}
+	} while (int_events);
+
+	if (packets > 0)
+		rt_mark_stack_mgr(ndev);
+
+	return ret;
+}
+
+
+
+/* ------------------------------------------------------------------------- */
+static void __inline__ fec_get_mac(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+	unsigned char *iap, tmpaddr[ETH_ALEN];
+
+	/*
+	 * try to get mac address in following order:
+	 *
+	 * 1) module parameter via kernel command line in form
+	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+	 */
+	iap = macaddr;
+
+#ifdef CONFIG_OF
+	/*
+	 * 2) from device tree data
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		struct device_node *np = fep->pdev->dev.of_node;
+		if (np) {
+			const char *mac = of_get_mac_address(np);
+			if (mac)
+				iap = (unsigned char *) mac;
+		}
+	}
+#endif
+
+	/*
+	 * 3) from flash or fuse (via platform data)
+	 */
+	if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+		if (FEC_FLASHMAC)
+			iap = (unsigned char *)FEC_FLASHMAC;
+#else
+		if (pdata)
+			iap = (unsigned char *)&pdata->mac;
+#endif
+	}
+
+	/*
+	 * 4) FEC mac registers set by bootloader
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		*((unsigned long *) &tmpaddr[0]) =
+			be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+		*((unsigned short *) &tmpaddr[4]) =
+			be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+		iap = &tmpaddr[0];
+	}
+
+	memcpy(ndev->dev_addr, iap, ETH_ALEN);
+
+	/* Adjust MAC if using macaddr */
+	if (iap == macaddr)
+		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Phy section
+ */
+static void fec_enet_mdio_done(rtdm_nrtsig_t *nrt_sig, void* data)
+{
+	struct fec_enet_private *fep = data;
+
+	complete(&fep->mdio_done);
+}
+
+static void fec_enet_adjust_link(struct net_device *netdev)
+{
+	struct fec_enet_netdev_priv *npriv = netdev_priv(netdev);
+	struct rtnet_device *ndev = npriv->rtdev;
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct phy_device *phy_dev = fep->phy_dev;
+	unsigned long context;
+
+	int status_change = 0;
+
+	rtdm_lock_get_irqsave(&fep->hw_lock, context);
+
+	/* Prevent a state halted on mii error */
+	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+		phy_dev->state = PHY_RESUMING;
+		goto spin_unlock;
+	}
+
+	/* Duplex link change */
+	if (phy_dev->link) {
+		if (fep->full_duplex != phy_dev->duplex) {
+			fec_restart(ndev, phy_dev->duplex);
+			/* prevent unnecessary second fec_restart() below */
+			fep->link = phy_dev->link;
+			status_change = 1;
+		}
+	}
+
+	/* Link on or off change */
+	if (phy_dev->link != fep->link) {
+		fep->link = phy_dev->link;
+		if (phy_dev->link)
+			fec_restart(ndev, phy_dev->duplex);
+		else
+			fec_stop(ndev);
+		status_change = 1;
+	}
+
+spin_unlock:
+	rtdm_lock_put_irqrestore(&fep->hw_lock, context);
+
+	if (status_change)
+		phy_print_status(phy_dev);
+}
+
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct fec_enet_private *fep = bus->priv;
+	unsigned long time_left;
+
+	fep->mii_timeout = 0;
+	init_completion(&fep->mdio_done);
+
+	/* start a read op */
+	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+	/* wait for end of transfer */
+	time_left = wait_for_completion_timeout(&fep->mdio_done,
+			usecs_to_jiffies(FEC_MII_TIMEOUT));
+	if (time_left == 0) {
+		fep->mii_timeout = 1;
+		printk(KERN_ERR "FEC: MDIO read timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	/* return value */
+	return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+}
+
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+			   u16 value)
+{
+	struct fec_enet_private *fep = bus->priv;
+	unsigned long time_left;
+
+	fep->mii_timeout = 0;
+	init_completion(&fep->mdio_done);
+
+	/* start a write op */
+	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
+		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+		FEC_MMFR_TA | FEC_MMFR_DATA(value),
+		fep->hwp + FEC_MII_DATA);
+
+	/* wait for end of transfer */
+	time_left = wait_for_completion_timeout(&fep->mdio_done,
+			usecs_to_jiffies(FEC_MII_TIMEOUT));
+	if (time_left == 0) {
+		fep->mii_timeout = 1;
+		printk(KERN_ERR "FEC: MDIO write timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int fec_enet_mdio_reset(struct mii_bus *bus)
+{
+	return 0;
+}
+
+static int fec_enet_mii_probe(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	struct phy_device *phy_dev = NULL;
+	char mdio_bus_id[MII_BUS_ID_SIZE];
+	char phy_name[MII_BUS_ID_SIZE + 3];
+	int phy_id;
+	int dev_id = fep->dev_id;
+
+	fep->phy_dev = NULL;
+
+	/* check for attached phy */
+	for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+		if ((fep->mii_bus->phy_mask & (1 << phy_id)))
+			continue;
+		if (fep->mii_bus->phy_map[phy_id] == NULL)
+			continue;
+		if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
+			continue;
+		if (dev_id--)
+			continue;
+		strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+		break;
+	}
+
+	if (phy_id >= PHY_MAX_ADDR) {
+		printk(KERN_INFO
+			"%s: no PHY, assuming direct connection to switch\n",
+			ndev->name);
+		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+		phy_id = 0;
+	}
+
+	snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
+	/* attach the mac to the phy using the dummy linux netdev */
+	phy_dev = phy_connect(fep->netdev, phy_name, &fec_enet_adjust_link, 0,
+			      fep->phy_interface);
+	if (IS_ERR(phy_dev)) {
+		printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
+		return PTR_ERR(phy_dev);
+	}
+
+	/* mask with MAC supported features */
+	if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
+		phy_dev->supported &= PHY_GBIT_FEATURES;
+	else
+		phy_dev->supported &= PHY_BASIC_FEATURES;
+
+	phy_dev->advertising = phy_dev->supported;
+
+	fep->phy_dev = phy_dev;
+	fep->link = 0;
+	fep->full_duplex = 0;
+
+	printk(KERN_INFO
+		"%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		ndev->name,
+		fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+		fep->phy_dev->irq);
+
+	return 0;
+}
+
+static int fec_enet_mii_init(struct platform_device *pdev)
+{
+	static struct mii_bus *fec0_mii_bus;
+	struct rtnet_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	int err = -ENXIO, i;
+
+	/*
+	 * The dual fec interfaces are not equivalent with enet-mac.
+	 * Here are the differences:
+	 *
+	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
+	 *  - fec0 acts as the 1588 time master while fec1 is slave
+	 *  - external phys can only be configured by fec0
+	 *
+	 * That is to say fec1 can not work independently. It only works
+	 * when fec0 is working. The reason behind this design is that the
+	 * second interface is added primarily for Switch mode.
+	 *
+	 * Because of the last point above, both phys are attached on fec0
+	 * mdio interface in board design, and need to be configured by
+	 * fec0 mii_bus.
+	 */
+	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+		/* fec1 uses fec0 mii_bus */
+		if (mii_cnt && fec0_mii_bus) {
+			fep->mii_bus = fec0_mii_bus;
+			mii_cnt++;
+			return 0;
+		}
+		return -ENOENT;
+	}
+
+	fep->mii_timeout = 0;
+
+	/*
+	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+	 *
+	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
+	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
+	 * document.
+	 */
+	fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+		fep->phy_speed--;
+	fep->phy_speed <<= 1;
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+	fep->mii_bus = mdiobus_alloc();
+	if (fep->mii_bus == NULL) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	fep->mii_bus->name = "fec_enet_mii_bus";
+	fep->mii_bus->read = fec_enet_mdio_read;
+	fep->mii_bus->write = fec_enet_mdio_write;
+	fep->mii_bus->reset = fec_enet_mdio_reset;
+	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		pdev->name, fep->dev_id + 1);
+	fep->mii_bus->priv = fep;
+	fep->mii_bus->parent = &pdev->dev;
+
+	fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!fep->mii_bus->irq) {
+		err = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		fep->mii_bus->irq[i] = PHY_POLL;
+
+	rtdm_nrtsig_init(&fep->mdio_done_sig, fec_enet_mdio_done, fep);
+
+	if (mdiobus_register(fep->mii_bus))
+		goto err_out_destroy_nrt;
+
+	mii_cnt++;
+
+	/* save fec0 mii_bus */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+		fec0_mii_bus = fep->mii_bus;
+
+	return 0;
+
+err_out_destroy_nrt:
+	rtdm_nrtsig_destroy(&fep->mdio_done_sig);
+	kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+	mdiobus_free(fep->mii_bus);
+err_out:
+	return err;
+}
+
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
+{
+	if (--mii_cnt == 0) {
+		mdiobus_unregister(fep->mii_bus);
+		kfree(fep->mii_bus->irq);
+		mdiobus_free(fep->mii_bus);
+	}
+	rtdm_nrtsig_destroy(&fep->mdio_done_sig);
+}
+
+static int
+fec_enet_ioctl(struct rtnet_device *ndev, unsigned int request, void *arg)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct phy_device *phydev = fep->phy_dev;
+	struct ifreq *ifr = arg;
+	struct ethtool_value *value;
+	struct ethtool_cmd cmd;
+	int err = 0;
+
+	if (!rtnetif_running(ndev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	switch (request) {
+	case SIOCETHTOOL:
+		value = (struct ethtool_value *)ifr->ifr_data;
+		switch (value->cmd) {
+		case ETHTOOL_GLINK:
+			value->data = fep->link;
+			if (copy_to_user(&value->data, &fep->link,
+					 sizeof(value->data)))
+				err = -EFAULT;
+			break;
+		case ETHTOOL_GSET:
+			memset(&cmd, 0, sizeof(cmd));
+			cmd.cmd = ETHTOOL_GSET;
+			err = phy_ethtool_gset(phydev, &cmd);
+			if (err)
+				break;
+			if (copy_to_user(ifr->ifr_data, &cmd, sizeof(cmd)))
+				err = -EFAULT;
+			break;
+		case ETHTOOL_SSET:
+			if (copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)))
+				err = -EFAULT;
+			else
+				err = phy_ethtool_sset(phydev, &cmd);
+			break;
+		}
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static void fec_enet_free_buffers(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	int i;
+	struct rtskb *skb;
+	struct bufdesc	*bdp;
+
+	bdp = fep->rx_bd_base;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		skb = fep->rx_skbuff[i];
+
+		if (bdp->cbd_bufaddr)
+			dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+					FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+		if (skb)
+			dev_kfree_rtskb(skb); /* RTnet */
+		bdp++;
+	}
+
+	bdp = fep->tx_bd_base;
+	for (i = 0; i < TX_RING_SIZE; i++)
+		kfree(fep->tx_bounce[i]);
+}
+
+static int fec_enet_alloc_buffers(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	int i;
+	struct rtskb *skb;
+	struct bufdesc	*bdp;
+
+	bdp = fep->rx_bd_base;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		skb = rtnetdev_alloc_rtskb(netdev, FEC_ENET_RX_FRSIZE); /* RTnet */
+		if (!skb) {
+			fec_enet_free_buffers(ndev);
+			return -ENOMEM;
+		}
+		fep->rx_skbuff[i] = skb;
+
+		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+				FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+		bdp->cbd_sc = BD_ENET_RX_EMPTY;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap. */
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	bdp = fep->tx_bd_base;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+
+		bdp->cbd_sc = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap. */
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	return 0;
+}
+
+static int
+fec_enet_open(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	int ret;
+
+	/* I should reset the ring buffers here, but I don't yet know
+	 * a simple way to do that.
+	 */
+
+	ret = fec_enet_alloc_buffers(ndev);
+	if (ret)
+		return ret;
+
+	/* RTnet */
+	rt_stack_connect(ndev, &STACK_manager);
+
+	/* Probe and connect to PHY when open the interface */
+	ret = fec_enet_mii_probe(ndev);
+	if (ret) {
+		fec_enet_free_buffers(ndev);
+		return ret;
+	}
+	phy_start(fep->phy_dev);
+	rtnetif_carrier_on(ndev);
+	rtnetif_start_queue(ndev);
+	fep->opened = 1;
+	return 0;
+}
+
+static int
+fec_enet_close(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+
+	/* Don't know what to do yet. */
+	fep->opened = 0;
+	rtnetif_stop_queue(ndev);
+	fec_stop(ndev);
+
+	if (fep->phy_dev) {
+		phy_stop(fep->phy_dev);
+		phy_disconnect(fep->phy_dev);
+	}
+
+	fec_enet_free_buffers(ndev);
+
+	/* RTnet */
+	rt_stack_disconnect(ndev);
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_MULTICAST
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+#define HASH_BITS	6		/* #bits in hash */
+#define CRC32_POLY	0xEDB88320
+
+static void set_multicast_list(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct netdev_hw_addr *ha;
+	unsigned int i, bit, data, crc, tmp;
+	unsigned char hash;
+
+	if (ndev->flags & IFF_PROMISC) {
+		tmp = readl(fep->hwp + FEC_R_CNTRL);
+		tmp |= 0x8;
+		writel(tmp, fep->hwp + FEC_R_CNTRL);
+		return;
+	}
+
+	tmp = readl(fep->hwp + FEC_R_CNTRL);
+	tmp &= ~0x8;
+	writel(tmp, fep->hwp + FEC_R_CNTRL);
+
+	if (ndev->flags & IFF_ALLMULTI) {
+		/* Catch all multicast addresses, so set the
+		 * filter to all 1's
+		 */
+		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+		return;
+	}
+
+	/* Clear filter and add the addresses in hash register
+	 */
+	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+	rtnetdev_for_each_mc_addr(ha, ndev) {
+		/* calculate crc32 value of mac address */
+		crc = 0xffffffff;
+
+		for (i = 0; i < ndev->addr_len; i++) {
+			data = ha->addr[i];
+			for (bit = 0; bit < 8; bit++, data >>= 1) {
+				crc = (crc >> 1) ^
+				(((crc ^ data) & 1) ? CRC32_POLY : 0);
+			}
+		}
+
+		/* only upper 6 bits (HASH_BITS) are used
+		 * which point to specific bit in he hash registers
+		 */
+		hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+
+		if (hash > 31) {
+			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+			tmp |= 1 << (hash - 32);
+			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+		} else {
+			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+			tmp |= 1 << hash;
+			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+		}
+	}
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_MULTICAST */
+
+#ifdef ORIGINAL_CODE
+/* Set a MAC change in hardware. */
+static int
+fec_set_mac_address(struct rtnet_device *ndev, void *p)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+		fep->hwp + FEC_ADDR_LOW);
+	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+		fep->hwp + FEC_ADDR_HIGH);
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * fec_poll_controller: FEC Poll controller function
+ * @dev: The FEC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+void fec_poll_controller(struct rtnet_device *dev)
+{
+	int i;
+	struct fec_enet_private *fep = rtnetdev_priv(dev);
+
+	for (i = 0; i < FEC_IRQ_NUM; i++) {
+		if (fep->irq[i] > 0) {
+			disable_irq(fep->irq[i]);
+			fec_enet_interrupt(fep->irq[i], dev);
+			enable_irq(fep->irq[i]);
+		}
+	}
+}
+#endif /* ORIGINAL_CODE */
+
+static const struct rtnet_device_ops fec_netdev_ops = {
+	.ndo_open		= fec_enet_open,
+	.ndo_stop		= fec_enet_close,
+	.ndo_start_xmit		= fec_enet_start_xmit,
+	.ndo_set_rx_mode	= set_multicast_list,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_tx_timeout		= fec_timeout,
+	.ndo_set_mac_address	= fec_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= fec_poll_controller,
+#endif
+};
+#endif
+
+/* RTnet: get statistics */
+static struct net_device_stats *fec_get_stats(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	return &fep->stats;
+}
+
+ /*
+  * XXX:  We need to clean up on failure exits here.
+  *
+  */
+static int fec_enet_init(struct rtnet_device *ndev)
+{
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct bufdesc *cbd_base;
+	struct bufdesc *bdp;
+	int i;
+
+	/* Allocate memory for buffer descriptors. */
+	cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+			GFP_KERNEL);
+	if (!cbd_base) {
+		printk("FEC: allocate descriptor memory failed?\n");
+		return -ENOMEM;
+	}
+
+	rtdm_lock_init(&fep->hw_lock);
+
+	/* Get the Ethernet address */
+	fec_get_mac(ndev);
+
+	/* Set receive and transmit descriptor base. */
+	fep->rx_bd_base = cbd_base;
+	fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+
+	/* RTnet: specific entries in the device structure */
+	ndev->open = fec_enet_open;
+	ndev->stop = fec_enet_close;
+	ndev->hard_start_xmit = fec_enet_start_xmit;
+	ndev->get_stats = fec_get_stats;
+	ndev->do_ioctl = fec_enet_ioctl;
+#ifdef CONFIG_XENO_DRIVERS_NET_MULTICAST
+	ndev->set_multicast_list = &set_multicast_list;
+#endif
+
+	/* Initialize the receive buffer descriptors. */
+	bdp = fep->rx_bd_base;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page. */
+		bdp->cbd_sc = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap */
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* ...and the same for transmit */
+	bdp = fep->tx_bd_base;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page. */
+		bdp->cbd_sc = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap */
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	fec_restart(ndev, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static int fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+
+	if (np)
+		return of_get_phy_mode(np);
+
+	return -ENODEV;
+}
+
+static void fec_reset_phy(struct platform_device *pdev)
+{
+	int err, phy_reset;
+	struct device_node *np = pdev->dev.of_node;
+
+	if (!np)
+		return;
+
+	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+	err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+	if (err) {
+		pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
+		return;
+	}
+	msleep(1);
+	gpio_set_value(phy_reset, 1);
+}
+#else /* CONFIG_OF */
+static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+
+static inline void fec_reset_phy(struct platform_device *pdev)
+{
+	/*
+	 * In case of platform probe, the reset has been done
+	 * by machine code.
+	 */
+}
+#endif /* CONFIG_OF */
+
+static int fec_probe(struct platform_device *pdev)
+{
+	struct fec_enet_netdev_priv *npriv;
+	struct fec_enet_private *fep;
+	struct fec_platform_data *pdata;
+	struct rtnet_device *ndev;
+	int i, irq, ret = 0;
+	struct resource *r;
+	const struct of_device_id *of_id;
+	static int dev_id;
+	struct pinctrl *pinctrl;
+
+	of_id = of_match_device(fec_dt_ids, &pdev->dev);
+	if (of_id)
+		pdev->id_entry = of_id->data;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -ENXIO;
+
+	r = request_mem_region(r->start, resource_size(r), pdev->name);
+	if (!r)
+		return -EBUSY;
+
+	/* Init network device */
+	ndev = rt_alloc_etherdev(sizeof(struct fec_enet_private),
+				rx_pool_size + TX_RING_SIZE);
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto failed_alloc_etherdev;
+	}
+
+	/* RTnet */
+	rtdev_alloc_name(ndev, "rteth%d");
+	rt_rtdev_connect(ndev, &RTDEV_manager);
+	ndev->vers = RTDEV_VERS_2_0;
+	ndev->sysbind = &pdev->dev;
+
+	/* setup board info structure */
+	fep = rtnetdev_priv(ndev);
+	memset(fep, 0, sizeof(*fep));
+
+	/* RTnet: allocate dummy linux netdev structure for phy handling */
+	fep->netdev = alloc_etherdev(sizeof(struct fec_enet_netdev_priv));
+	if (!fep->netdev)
+		goto failed_alloc_netdev;
+	SET_NETDEV_DEV(fep->netdev, &pdev->dev);
+	npriv = netdev_priv(fep->netdev);
+	npriv->rtdev = ndev;
+
+	fep->hwp = ioremap(r->start, resource_size(r));
+	fep->pdev = pdev;
+	fep->dev_id = dev_id++;
+
+	if (!fep->hwp) {
+		ret = -ENOMEM;
+		goto failed_ioremap;
+	}
+
+	platform_set_drvdata(pdev, ndev);
+
+	ret = fec_get_phy_mode_dt(pdev);
+	if (ret < 0) {
+		pdata = pdev->dev.platform_data;
+		if (pdata)
+			fep->phy_interface = pdata->phy;
+		else
+			fep->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+		fep->phy_interface = ret;
+	}
+
+	fec_reset_phy(pdev);
+
+	for (i = 0; i < FEC_IRQ_NUM; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			if (i)
+				break;
+			ret = irq;
+			goto failed_irq;
+		}
+		ret = rtdm_irq_request(&fep->irq_handle[i], irq,
+				       fec_enet_interrupt, 0, ndev->name, ndev);
+		if (ret) {
+			while (--i >= 0) {
+				irq = platform_get_irq(pdev, i);
+				rtdm_irq_free(&fep->irq_handle[i]);
+			}
+			goto failed_irq;
+		}
+	}
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		ret = PTR_ERR(pinctrl);
+		goto failed_pin;
+	}
+
+	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(fep->clk_ipg)) {
+		ret = PTR_ERR(fep->clk_ipg);
+		goto failed_clk;
+	}
+
+	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(fep->clk_ahb)) {
+		ret = PTR_ERR(fep->clk_ahb);
+		goto failed_clk;
+	}
+
+	clk_prepare_enable(fep->clk_ahb);
+	clk_prepare_enable(fep->clk_ipg);
+
+	ret = fec_enet_init(ndev);
+	if (ret)
+		goto failed_init;
+
+	ret = fec_enet_mii_init(pdev);
+	if (ret)
+		goto failed_mii_init;
+
+	/* Carrier starts down, phylib will bring it up */
+	rtnetif_carrier_off(ndev);
+
+	/* RTnet: register the network interface */
+	ret = rt_register_rtnetdev(ndev);
+	if (ret)
+		goto failed_register;
+
+	return 0;
+
+failed_register:
+	fec_enet_mii_remove(fep);
+failed_mii_init:
+failed_init:
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
+failed_pin:
+failed_clk:
+	for (i = 0; i < FEC_IRQ_NUM; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq > 0)
+			rtdm_irq_free(&fep->irq_handle[i]);
+	}
+failed_irq:
+	iounmap(fep->hwp);
+failed_ioremap:
+	free_netdev(fep->netdev);
+failed_alloc_netdev:
+	rtdev_free(ndev); /* RTnet */
+failed_alloc_etherdev:
+	release_mem_region(r->start, resource_size(r));
+
+	return ret;
+}
+
+static int fec_drv_remove(struct platform_device *pdev)
+{
+	struct rtnet_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+	struct resource *r;
+	int i;
+
+	/* RTnet */
+	rt_unregister_rtnetdev(ndev);
+	rt_rtdev_disconnect(ndev);
+
+	fec_enet_mii_remove(fep);
+	for (i = 0; i < FEC_IRQ_NUM; i++) {
+		int irq = platform_get_irq(pdev, i);
+		if (irq > 0)
+			rtdm_irq_free(&fep->irq_handle[i]);
+	}
+
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
+	iounmap(fep->hwp);
+
+	/* RTnet */
+	free_netdev(fep->netdev);
+	rtdev_free(ndev);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	BUG_ON(!r);
+	release_mem_region(r->start, resource_size(r));
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+fec_suspend(struct device *dev)
+{
+	struct rtnet_device *ndev = dev_get_drvdata(dev);
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+
+	if (rtnetif_running(ndev)) {
+		fec_stop(ndev);
+		rtnetif_device_detach(ndev);
+	}
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
+	return 0;
+}
+
+static int
+fec_resume(struct device *dev)
+{
+	struct rtnet_device *ndev = dev_get_drvdata(dev);
+	struct fec_enet_private *fep = rtnetdev_priv(ndev);
+
+	clk_prepare_enable(fep->clk_ahb);
+	clk_prepare_enable(fep->clk_ipg);
+	if (rtnetif_running(ndev)) {
+		fec_restart(ndev, fep->full_duplex);
+		rtnetif_device_attach(ndev);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+	.suspend	= fec_suspend,
+	.resume		= fec_resume,
+	.freeze		= fec_suspend,
+	.thaw		= fec_resume,
+	.poweroff	= fec_suspend,
+	.restore	= fec_resume,
+};
+#endif
+
+static struct platform_driver fec_driver = {
+	.driver	= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &fec_pm_ops,
+#endif
+		.of_match_table = fec_dt_ids,
+	},
+	.id_table = fec_devtype,
+	.probe	= fec_probe,
+	.remove	= fec_drv_remove,
+};
+
+module_platform_driver(fec_driver);
+++ linux-patched/drivers/xenomai/net/drivers/rt_at91_ether.h	2022-03-21 12:58:29.521887674 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/pcnet32.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ *  Copyright (C) SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AT91_ETHERNET
+#define AT91_ETHERNET
+
+#include <rtdm/driver.h>
+#include <rtskb.h>
+
+/* Davicom 9161 PHY */
+#define MII_DM9161_ID	0x0181b880
+#define MII_DM9161A_ID	0x0181b8a0
+
+/* Davicom specific registers */
+#define MII_DSCR_REG	16
+#define MII_DSCSR_REG	17
+#define MII_DSINTR_REG	21
+
+/* Intel LXT971A PHY */
+#define MII_LXT971A_ID	0x001378E0
+
+/* Intel specific registers */
+#define MII_ISINTE_REG	18
+#define MII_ISINTS_REG	19
+#define MII_LEDCTRL_REG	20
+
+/* Realtek RTL8201 PHY */
+#define MII_RTL8201_ID	0x00008200
+
+/* Broadcom BCM5221 PHY */
+#define MII_BCM5221_ID	0x004061e0
+
+/* Broadcom specific registers */
+#define MII_BCMINTR_REG	26
+
+/* National Semiconductor DP83847 */
+#define MII_DP83847_ID	0x20005c30
+
+/* Altima AC101L PHY */
+#define MII_AC101L_ID	0x00225520
+
+/* Micrel KS8721 PHY */
+#define MII_KS8721_ID	0x00221610
+
+/* ........................................................................ */
+
+#define MAX_RBUFF_SZ	0x600		/* 1518 rounded up */
+#define MAX_RX_DESCR	9		/* max number of receive buffers */
+
+#define EMAC_DESC_DONE	0x00000001	/* bit for if DMA is done */
+#define EMAC_DESC_WRAP	0x00000002	/* bit for wrap */
+
+#define EMAC_BROADCAST	0x80000000	/* broadcast address */
+#define EMAC_MULTICAST	0x40000000	/* multicast address */
+#define EMAC_UNICAST	0x20000000	/* unicast address */
+
+struct rbf_t
+{
+	unsigned int addr;
+	unsigned long size;
+};
+
+struct recv_desc_bufs
+{
+	struct rbf_t descriptors[MAX_RX_DESCR];		/* must be on sizeof (rbf_t) boundary */
+	char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ];	/* must be on long boundary */
+};
+
+struct at91_private
+{
+	struct net_device_stats stats;
+	struct mii_if_info mii;			/* ethtool support */
+	struct at91_eth_data board_data;	/* board-specific configuration */
+	struct clk *ether_clk;			/* clock */
+
+	/* PHY */
+	unsigned long phy_type;			/* type of PHY (PHY_ID) */
+	rtdm_lock_t lock;			/* lock for MDI interface */
+	short phy_media;			/* media interface type */
+	unsigned short phy_address;		/* 5-bit MDI address of PHY (0..31) */
+	struct timer_list check_timer;		/* Poll link status */
+
+	/* Transmit */
+	struct rtskb *skb;			/* holds skb until xmit interrupt completes */
+	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
+	int skb_length;				/* saved skb length for pci_unmap_single */
+
+	/* Receive */
+	int rxBuffIndex;			/* index into receive descriptor list */
+	struct recv_desc_bufs *dlist;		/* descriptor list address */
+	struct recv_desc_bufs *dlist_phys;	/* descriptor list physical address */
+
+	/* RT Net */
+	rtdm_irq_t irq_handle;
+	rtdm_irq_t phy_irq_handle;
+};
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/pcnet32.c	2022-03-21 12:58:29.516887723 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/natsemi.c	1970-01-01 01:00:00.000000000 +0100
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ *	Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ *	Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ *	Copyright 1993 United States Government as represented by the
+ *	Director, National Security Agency.
+ *
+ *	This software may be used and distributed according to the terms
+ *	of the GNU General Public License, incorporated herein by reference.
+ *
+ *	This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ *  23 Oct, 2000.
+ *  Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ *  Carsten Langgaard, carstenl@mips.com
+ *  Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ *
+ *  Ported to RTnet: September 2003, Jan Kiszka <Jan.Kiszka@web.de>
+ *************************************************************************/
+
+#define DRV_NAME "pcnet32-rt"
+#define DRV_VERSION "1.27a-RTnet-0.2"
+#define DRV_RELDATE "2003-09-24"
+#define PFX DRV_NAME ": "
+
+static const char *version =
+	DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Jan.Kiszka@web.de\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+/*** RTnet ***/
+#include <rtnet_port.h>
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+#define DEFAULT_RX_POOL_SIZE 16
+
+static int cards[MAX_UNITS] = { [0 ...(MAX_UNITS - 1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** RTnet ***/
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static struct pci_device_id pcnet32_pci_tbl[] = {
+	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0,
+	  0, 0 },
+	{
+		0,
+	}
+};
+
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
+
+static int cards_found = -1;
+static int pcnet32_have_pci;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0 };
+
+static int pcnet32_debug = 1;
+static int tx_start =
+	1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct rtnet_device *pcnet32_dev; /*** RTnet ***/
+
+static int max_interrupt_work = 80;
+/*** RTnet ***
+static int rx_copybreak = 200;
+ *** RTnet ***/
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static unsigned char options_mapping[] = {
+	PCNET32_PORT_ASEL, /*  0 Auto-select	  */
+	PCNET32_PORT_AUI, /*  1 BNC/AUI	  */
+	PCNET32_PORT_AUI, /*  2 AUI/BNC	  */
+	PCNET32_PORT_ASEL, /*  3 not supported	  */
+	PCNET32_PORT_10BT | PCNET32_PORT_FD, /*  4 10baseT-FD	  */
+	PCNET32_PORT_ASEL, /*  5 not supported	  */
+	PCNET32_PORT_ASEL, /*  6 not supported	  */
+	PCNET32_PORT_ASEL, /*  7 not supported	  */
+	PCNET32_PORT_ASEL, /*  8 not supported	  */
+	PCNET32_PORT_MII, /*  9 MII 10baseT	  */
+	PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD	  */
+	PCNET32_PORT_MII, /* 11 MII (autosel)	  */
+	PCNET32_PORT_10BT, /* 12 10BaseT	  */
+	PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx	  */
+	PCNET32_PORT_MII | PCNET32_PORT_100 |
+		PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */
+	PCNET32_PORT_ASEL /* 15 not supported	  */
+};
+
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/*
+ *				Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01:  Initial version
+ *	   only tested on Alpha Noname Board
+ * v0.02:  changed IRQ handling for new interrupt scheme (dev_id)
+ *	   tested on a ASUS SP3G
+ * v0.10:  fixed an odd problem with the 79C974 in a Compaq Deskpro XL
+ *	   looks like the 974 doesn't like stopping and restarting in a
+ *	   short period of time; now we do a reinit of the lance; the
+ *	   bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ *	   and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12:  by suggestion from Donald Becker: Renamed driver to pcnet32,
+ *	   made it standalone (no need for lance.c)
+ * v0.13:  added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14:  stripped down additional PCI probe (thanks to David C Niemi
+ *	   and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15:  added 79C965 (VLB) probe
+ *	   added interrupt sharing for PCI chips
+ * v0.16:  fixed set_multicast_list on Alpha machines
+ * v0.17:  removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19:  changed setting of autoselect bit
+ * v0.20:  removed additional Compaq PCI probe; there is now a working one
+ *	   in arch/i386/bios32.c
+ * v0.21:  added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22:  added printing of status to ring dump
+ * v0.23:  changed enet_statistics to net_devive_stats
+ * v0.90:  added multicast filter
+ *	   added module support
+ *	   changed irq probe to new style
+ *	   added PCnetFast chip id
+ *	   added fix for receive stalls with Intel saturn chipsets
+ *	   added in-place rx skbs like in the tulip driver
+ *	   minor cleanups
+ * v0.91:  added PCnetFast+ chip id
+ *	   back port to 2.0.x
+ * v1.00:  added some stuff from Donald Becker's 2.0.34 version
+ *	   added support for byte counters in net_dev_stats
+ * v1.01:  do ring dumps, only when debugging the driver
+ *	   increased the transmit timeout
+ * v1.02:  fixed memory leak in pcnet32_init_ring()
+ * v1.10:  workaround for stopped transmitter
+ *	   added port selection for modules
+ *	   detect special T1/E1 WAN card and setup port selection
+ * v1.11:  fixed wrong checking of Tx errors
+ * v1.20:  added check of return value kmalloc (cpeterso@cs.washington.edu)
+ *	   added save original kmalloc addr for freeing (mcr@solidum.com)
+ *	   added support for PCnetHome chip (joe@MIT.EDU)
+ *	   rewritten PCI card detection
+ *	   added dwio mode to get driver working on some PPC machines
+ * v1.21:  added mii selection and mii ioctl
+ * v1.22:  changed pci scanning code to make PPC people happy
+ *	   fixed switching to 32bit mode in pcnet32_open() (thanks
+ *	   to Michael Richard <mcr@solidum.com> for noticing this one)
+ *	   added sub vendor/device id matching (thanks again to
+ *	   Michael Richard <mcr@solidum.com>)
+ *	   added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
+ * v1.23   fixed small bug, when manual selecting MII speed/duplex
+ * v1.24   Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
+ *	   underflows.	Added tx_start_pt module parameter. Increased
+ *	   TX_RING_SIZE from 16 to 32.	Added #ifdef'd code to use DXSUFLO
+ *	   for FAST[+] chipsets. <kaf@fc.hp.com>
+ * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
+ * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
+ * v1.26   Converted to pci_alloc_consistent, Jamey Hicks / George France
+ *                                           <jamey@crl.dec.com>
+ * -	   Fixed a few bugs, related to running the controller in 32bit mode.
+ *	   23 Oct, 2000.  Carsten Langgaard, carstenl@mips.com
+ *	   Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ * v1.26p  Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
+ * v1.27   improved CSR/PROM address detection, lots of cleanups,
+ *	   new pcnet32vlb module option, HP-PARISC support,
+ *	   added module parameter descriptions,
+ *	   initial ethtool support - Helge Deller <deller@gmx.de>
+ * v1.27a  Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
+ *	   use alloc_etherdev and register_netdev
+ *	   fix pci probe not increment cards_found
+ *	   FD auto negotiate error workaround for xSeries250
+ *	   clean up and using new mii module
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 3 /*** RTnet ***/
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+	u32 base;
+	s16 buf_length;
+	s16 status;
+	u32 msg_length;
+	u32 reserved;
+};
+
+struct pcnet32_tx_head {
+	u32 base;
+	s16 length;
+	s16 status;
+	u32 misc;
+	u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+	u16 mode;
+	u16 tlen_rlen;
+	u8 phys_addr[6];
+	u16 reserved;
+	u32 filter[2];
+	/* Receive and transmit ring base, along with extra bits. */
+	u32 rx_ring;
+	u32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+	u16 (*read_csr)(unsigned long, int);
+	void (*write_csr)(unsigned long, int, u16);
+	u16 (*read_bcr)(unsigned long, int);
+	void (*write_bcr)(unsigned long, int, u16);
+	u16 (*read_rap)(unsigned long);
+	void (*write_rap)(unsigned long, u16);
+	void (*reset)(unsigned long);
+};
+
+/*
+ * The first three fields of pcnet32_private are read by the ethernet device
+ * so we allocate the structure should be allocated by pci_alloc_consistent().
+ */
+struct pcnet32_private {
+	/* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+	struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+	struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+	struct pcnet32_init_block init_block;
+	dma_addr_t dma_addr; /* DMA address of beginning of this object,
+					   returned by pci_alloc_consistent */
+	struct pci_dev
+		*pci_dev; /* Pointer to the associated pci device structure */
+	const char *name;
+	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
+	/*** RTnet ***/
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	/*** RTnet ***/
+	dma_addr_t tx_dma_addr[TX_RING_SIZE];
+	dma_addr_t rx_dma_addr[RX_RING_SIZE];
+	struct pcnet32_access a;
+	rtdm_lock_t lock; /* Guard lock */
+	unsigned int cur_rx, cur_tx; /* The next free ring entry */
+	unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+	struct net_device_stats stats;
+	char tx_full;
+	int options;
+	int shared_irq : 1, /* shared irq possible */
+		ltint : 1, /* enable TxDone-intr inhibitor */
+		dxsuflo : 1, /* disable transmit stop on uflo */
+		mii : 1; /* mii port available */
+	struct rtnet_device *next; /*** RTnet ***/
+	struct mii_if_info mii_if;
+	rtdm_irq_t irq_handle;
+};
+
+static void pcnet32_probe_vlbus(void);
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, unsigned int, int, struct pci_dev *);
+/*** RTnet ***/
+static int pcnet32_open(struct rtnet_device *);
+static int pcnet32_init_ring(struct rtnet_device *);
+static int pcnet32_start_xmit(struct rtskb *, struct rtnet_device *);
+static int pcnet32_rx(struct rtnet_device *, nanosecs_abs_t *time_stamp);
+//static void pcnet32_tx_timeout (struct net_device *dev);
+static int pcnet32_interrupt(rtdm_irq_t *irq_handle);
+static int pcnet32_close(struct rtnet_device *);
+static struct net_device_stats *pcnet32_get_stats(struct rtnet_device *);
+//static void pcnet32_set_multicast_list(struct net_device *);
+//static int  pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+//static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+//static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
+/*** RTnet ***/
+
+enum pci_flags_bit {
+	PCI_USES_IO = 1,
+	PCI_USES_MEM = 2,
+	PCI_USES_MASTER = 4,
+	PCI_ADDR0 = 0x10 << 0,
+	PCI_ADDR1 = 0x10 << 1,
+	PCI_ADDR2 = 0x10 << 2,
+	PCI_ADDR3 = 0x10 << 3,
+};
+
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	return inw(addr + PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	outw(val, addr + PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	return inw(addr + PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	outw(val, addr + PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap(unsigned long addr)
+{
+	return inw(addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
+{
+	outw(val, addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset(unsigned long addr)
+{
+	inw(addr + PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check(unsigned long addr)
+{
+	outw(88, addr + PCNET32_WIO_RAP);
+	return (inw(addr + PCNET32_WIO_RAP) == 88);
+}
+
+static struct pcnet32_access pcnet32_wio = {
+	read_csr: pcnet32_wio_read_csr,
+	write_csr: pcnet32_wio_write_csr,
+	read_bcr: pcnet32_wio_read_bcr,
+	write_bcr: pcnet32_wio_write_bcr,
+	read_rap: pcnet32_wio_read_rap,
+	write_rap: pcnet32_wio_write_rap,
+	reset: pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	outl(val, addr + PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	outl(val, addr + PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
+{
+	return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
+{
+	outl(val, addr + PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset(unsigned long addr)
+{
+	inl(addr + PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check(unsigned long addr)
+{
+	outl(88, addr + PCNET32_DWIO_RAP);
+	return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
+}
+
+static struct pcnet32_access pcnet32_dwio = {
+	read_csr: pcnet32_dwio_read_csr,
+	write_csr: pcnet32_dwio_write_csr,
+	read_bcr: pcnet32_dwio_read_bcr,
+	write_bcr: pcnet32_dwio_write_bcr,
+	read_rap: pcnet32_dwio_read_rap,
+	write_rap: pcnet32_dwio_write_rap,
+	reset: pcnet32_dwio_reset
+};
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void pcnet32_probe_vlbus(void)
+{
+	unsigned int *port, ioaddr;
+
+	/* search for PCnet32 VLB cards at known addresses */
+	for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+		if (!request_region(ioaddr, PCNET32_TOTAL_SIZE,
+				    "pcnet32_probe_vlbus")) {
+			/* check if there is really a pcnet chip on that ioaddr */
+			if ((inb(ioaddr + 14) == 0x57) &&
+			    (inb(ioaddr + 15) == 0x57)) {
+				pcnet32_probe1(ioaddr, 0, 0, NULL);
+			} else {
+				release_region(ioaddr, PCNET32_TOTAL_SIZE);
+			}
+		}
+	}
+}
+
+static int pcnet32_probe_pci(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	unsigned long ioaddr;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err < 0) {
+		printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err);
+		return err;
+	}
+	pci_set_master(pdev);
+
+	ioaddr = pci_resource_start(pdev, 0);
+	if (!ioaddr) {
+		printk(KERN_ERR PFX "card has no PCI IO resources, aborting\n");
+		return -ENODEV;
+	}
+
+	if (!dma_supported(&pdev->dev, PCNET32_DMA_MASK)) {
+		printk(KERN_ERR PFX
+		       "architecture does not support 32bit PCI busmaster DMA\n");
+		return -ENODEV;
+	}
+
+	return pcnet32_probe1(ioaddr, pdev->irq, 1, pdev);
+}
+
+/* pcnet32_probe1
+ *  Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ *  pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int pcnet32_probe1(unsigned long ioaddr, unsigned int irq_line,
+			  int shared, struct pci_dev *pdev)
+{
+	struct pcnet32_private *lp;
+	dma_addr_t lp_dma_addr;
+	int i, media;
+	int fdx, mii, fset, dxsuflo, ltint;
+	int chip_version;
+	char *chipname;
+	struct rtnet_device *dev; /*** RTnet ***/
+	struct pcnet32_access *a = NULL;
+	u8 promaddr[6];
+
+	// *** RTnet ***
+	cards_found++;
+	if (cards[cards_found] == 0)
+		return -ENODEV;
+	// *** RTnet ***
+
+	/* reset the chip */
+	pcnet32_wio_reset(ioaddr);
+
+	/* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+	if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+		a = &pcnet32_wio;
+	} else {
+		pcnet32_dwio_reset(ioaddr);
+		if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
+		    pcnet32_dwio_check(ioaddr)) {
+			a = &pcnet32_dwio;
+		} else
+			return -ENODEV;
+	}
+
+	chip_version =
+		a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+	if (pcnet32_debug > 2)
+		printk(KERN_INFO "  PCnet chip version is %#x.\n",
+		       chip_version);
+	if ((chip_version & 0xfff) != 0x003)
+		return -ENODEV;
+
+	/* initialize variables */
+	fdx = mii = fset = dxsuflo = ltint = 0;
+	chip_version = (chip_version >> 12) & 0xffff;
+
+	switch (chip_version) {
+	case 0x2420:
+		chipname = "PCnet/PCI 79C970"; /* PCI */
+		break;
+	case 0x2430:
+		if (shared)
+			chipname =
+				"PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+		else
+			chipname = "PCnet/32 79C965"; /* 486/VL bus */
+		break;
+	case 0x2621:
+		chipname = "PCnet/PCI II 79C970A"; /* PCI */
+		fdx = 1;
+		break;
+	case 0x2623:
+		chipname = "PCnet/FAST 79C971"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		fset = 1;
+		ltint = 1;
+		break;
+	case 0x2624:
+		chipname = "PCnet/FAST+ 79C972"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		fset = 1;
+		break;
+	case 0x2625:
+		chipname = "PCnet/FAST III 79C973"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		break;
+	case 0x2626:
+		chipname = "PCnet/Home 79C978"; /* PCI */
+		fdx = 1;
+		/*
+	 * This is based on specs published at www.amd.com.  This section
+	 * assumes that a card with a 79C978 wants to go into 1Mb HomePNA
+	 * mode.  The 79C978 can also go into standard ethernet, and there
+	 * probably should be some sort of module option to select the
+	 * mode by which the card should operate
+	 */
+		/* switch to home wiring mode */
+		media = a->read_bcr(ioaddr, 49);
+		if (pcnet32_debug > 2)
+			printk(KERN_DEBUG PFX "media reset to %#x.\n", media);
+		a->write_bcr(ioaddr, 49, media);
+		break;
+	case 0x2627:
+		chipname = "PCnet/FAST III 79C975"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		break;
+	default:
+		printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
+		       chip_version);
+		return -ENODEV;
+	}
+
+	/*
+     *	On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+     *	starting until the packet is loaded. Strike one for reliability, lose
+     *	one for latency - although on PCI this isnt a big loss. Older chips
+     *	have FIFO's smaller than a packet, so you can't do this.
+     */
+
+	if (fset) {
+		a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0800));
+		a->write_csr(ioaddr, 80,
+			     (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+		dxsuflo = 1;
+		ltint = 1;
+	}
+
+	/*** RTnet ***/
+	dev = rt_alloc_etherdev(0, RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL)
+		return -ENOMEM;
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+	/*** RTnet ***/
+
+	printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+
+	/* In most chips, after a chip reset, the ethernet address is read from the
+     * station address PROM at the base address and programmed into the
+     * "Physical Address Registers" CSR12-14.
+     * As a precautionary measure, we read the PROM values and complain if
+     * they disagree with the CSRs.  Either way, we use the CSR values, and
+     * double check that they are valid.
+     */
+	for (i = 0; i < 3; i++) {
+		unsigned int val;
+		val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+		/* There may be endianness issues here. */
+		dev->dev_addr[2 * i] = val & 0x0ff;
+		dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+	}
+
+	/* read PROM address and compare with CSR address */
+	for (i = 0; i < 6; i++)
+		promaddr[i] = inb(ioaddr + i);
+
+	if (memcmp(promaddr, dev->dev_addr, 6) ||
+	    !is_valid_ether_addr(dev->dev_addr)) {
+#ifndef __powerpc__
+		if (is_valid_ether_addr(promaddr)) {
+#else
+		if (!is_valid_ether_addr(dev->dev_addr) &&
+		    is_valid_ether_addr(promaddr)) {
+#endif
+			printk(" warning: CSR address invalid,\n");
+			printk(KERN_INFO "    using instead PROM address of");
+			memcpy(dev->dev_addr, promaddr, 6);
+		}
+	}
+
+	/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+	if (!is_valid_ether_addr(dev->dev_addr))
+		memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+
+	for (i = 0; i < 6; i++)
+		printk(" %2.2x", dev->dev_addr[i]);
+
+	if (((chip_version + 1) & 0xfffe) ==
+	    0x2624) { /* Version 0x2623 or 0x2624 */
+		i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+		printk("\n" KERN_INFO "    tx_start_pt(0x%04x):", i);
+		switch (i >> 10) {
+		case 0:
+			printk("  20 bytes,");
+			break;
+		case 1:
+			printk("  64 bytes,");
+			break;
+		case 2:
+			printk(" 128 bytes,");
+			break;
+		case 3:
+			printk("~220 bytes,");
+			break;
+		}
+		i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+		printk(" BCR18(%x):", i & 0xffff);
+		if (i & (1 << 5))
+			printk("BurstWrEn ");
+		if (i & (1 << 6))
+			printk("BurstRdEn ");
+		if (i & (1 << 7))
+			printk("DWordIO ");
+		if (i & (1 << 11))
+			printk("NoUFlow ");
+		i = a->read_bcr(ioaddr, 25);
+		printk("\n" KERN_INFO "    SRAMSIZE=0x%04x,", i << 8);
+		i = a->read_bcr(ioaddr, 26);
+		printk(" SRAM_BND=0x%04x,", i << 8);
+		i = a->read_bcr(ioaddr, 27);
+		if (i & (1 << 14))
+			printk("LowLatRx");
+	}
+
+	dev->base_addr = ioaddr;
+	if (request_region(ioaddr, PCNET32_TOTAL_SIZE, chipname) == NULL)
+		return -EBUSY;
+
+	/* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+	if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) ==
+	    NULL) {
+		release_region(ioaddr, PCNET32_TOTAL_SIZE);
+		return -ENOMEM;
+	}
+
+	memset(lp, 0, sizeof(*lp));
+	lp->dma_addr = lp_dma_addr;
+	lp->pci_dev = pdev;
+
+	rtdm_lock_init(&lp->lock);
+
+	dev->priv = lp;
+	lp->name = chipname;
+	lp->shared_irq = shared;
+	lp->mii_if.full_duplex = fdx;
+	lp->dxsuflo = dxsuflo;
+	lp->ltint = ltint;
+	lp->mii = mii;
+	if ((cards_found >= MAX_UNITS) ||
+	    (options[cards_found] > (int)sizeof(options_mapping)))
+		lp->options = PCNET32_PORT_ASEL;
+	else
+		lp->options = options_mapping[options[cards_found]];
+	/*** RTnet ***
+    lp->mii_if.dev = dev;
+    lp->mii_if.mdio_read = mdio_read;
+    lp->mii_if.mdio_write = mdio_write;
+ *** RTnet ***/
+
+	if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+	    ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+		lp->options |= PCNET32_PORT_FD;
+
+	if (!a) {
+		printk(KERN_ERR PFX "No access methods\n");
+		pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+		release_region(ioaddr, PCNET32_TOTAL_SIZE);
+		return -ENODEV;
+	}
+	lp->a = *a;
+
+	/* detect special T1/E1 WAN card by checking for MAC address */
+	if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
+	    dev->dev_addr[2] == 0x75)
+		lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+	lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+	lp->init_block.tlen_rlen =
+		le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+	for (i = 0; i < 6; i++)
+		lp->init_block.phys_addr[i] = dev->dev_addr[i];
+	lp->init_block.filter[0] = 0x00000000;
+	lp->init_block.filter[1] = 0x00000000;
+	lp->init_block.rx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, rx_ring));
+	lp->init_block.tx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, tx_ring));
+
+	/* switch pcnet32 to 32bit mode */
+	a->write_bcr(ioaddr, 20, 2);
+
+	a->write_csr(
+		ioaddr, 1,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &
+			0xffff);
+	a->write_csr(
+		ioaddr, 2,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >>
+			16);
+
+	if (irq_line) {
+		dev->irq = irq_line;
+	}
+
+	if (dev->irq >= 2)
+		printk(" assigned IRQ %d.\n", dev->irq);
+	else {
+		unsigned long irq_mask = probe_irq_on();
+
+		/*
+	 * To auto-IRQ we enable the initialization-done and DMA error
+	 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+	 * boards will work.
+	 */
+		/* Trigger an initialization just for the interrupt. */
+		a->write_csr(ioaddr, 0, 0x41);
+		mdelay(1);
+
+		dev->irq = probe_irq_off(irq_mask);
+		if (dev->irq)
+			printk(", probed IRQ %d.\n", dev->irq);
+		else {
+			printk(", failed to detect IRQ line.\n");
+			pci_free_consistent(lp->pci_dev, sizeof(*lp), lp,
+					    lp->dma_addr);
+			release_region(ioaddr, PCNET32_TOTAL_SIZE);
+			return -ENODEV;
+		}
+	}
+
+	/* The PCNET32-specific entries in the device structure. */
+	dev->open = &pcnet32_open;
+	dev->hard_start_xmit = &pcnet32_start_xmit;
+	dev->stop = &pcnet32_close;
+	dev->get_stats = &pcnet32_get_stats;
+	/*** RTnet ***
+    dev->set_multicast_list = &pcnet32_set_multicast_list;
+    dev->do_ioctl = &pcnet32_ioctl;
+    dev->tx_timeout = pcnet32_tx_timeout;
+    dev->watchdog_timeo = (5*HZ);
+ *** RTnet ***/
+
+	lp->next = pcnet32_dev;
+	pcnet32_dev = dev;
+
+	/* Fill in the generic fields of the device structure. */
+	/*** RTnet ***/
+	if ((i = rt_register_rtnetdev(dev))) {
+		pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+		release_region(ioaddr, PCNET32_TOTAL_SIZE);
+		rtdev_free(dev);
+		return i;
+	}
+	/*** RTnet ***/
+
+	printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+	return 0;
+}
+
+static int pcnet32_open(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	unsigned long ioaddr = dev->base_addr;
+	u16 val;
+	int i;
+
+	/*** RTnet ***/
+	if (dev->irq == 0)
+		return -EAGAIN;
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	i = rtdm_irq_request(&lp->irq_handle, dev->irq, pcnet32_interrupt,
+			     RTDM_IRQTYPE_SHARED, "rt_pcnet32", dev);
+	if (i)
+		return i;
+	/*** RTnet ***/
+
+	/* Check for a valid station address */
+	if (!is_valid_ether_addr(dev->dev_addr))
+		return -EINVAL;
+
+	/* Reset the PCNET32 */
+	lp->a.reset(ioaddr);
+
+	/* switch pcnet32 to 32bit mode */
+	lp->a.write_bcr(ioaddr, 20, 2);
+
+	if (pcnet32_debug > 1)
+		printk(KERN_DEBUG
+		       "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+		       dev->name, dev->irq,
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, tx_ring)),
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, rx_ring)),
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, init_block)));
+
+	/* set/reset autoselect bit */
+	val = lp->a.read_bcr(ioaddr, 2) & ~2;
+	if (lp->options & PCNET32_PORT_ASEL)
+		val |= 2;
+	lp->a.write_bcr(ioaddr, 2, val);
+
+	/* handle full duplex setting */
+	if (lp->mii_if.full_duplex) {
+		val = lp->a.read_bcr(ioaddr, 9) & ~3;
+		if (lp->options & PCNET32_PORT_FD) {
+			val |= 1;
+			if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+				val |= 2;
+		} else if (lp->options & PCNET32_PORT_ASEL) {
+			/* workaround of xSeries250, turn on for 79C975 only */
+			i = ((lp->a.read_csr(ioaddr, 88) |
+			      (lp->a.read_csr(ioaddr, 89) << 16)) >>
+			     12) &
+			    0xffff;
+			if (i == 0x2627)
+				val |= 3;
+		}
+		lp->a.write_bcr(ioaddr, 9, val);
+	}
+
+	/* set/reset GPSI bit in test register */
+	val = lp->a.read_csr(ioaddr, 124) & ~0x10;
+	if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+		val |= 0x10;
+	lp->a.write_csr(ioaddr, 124, val);
+
+	if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+		val = lp->a.read_bcr(ioaddr, 32) &
+		      ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */
+		if (lp->options & PCNET32_PORT_FD)
+			val |= 0x10;
+		if (lp->options & PCNET32_PORT_100)
+			val |= 0x08;
+		lp->a.write_bcr(ioaddr, 32, val);
+	} else {
+		if (lp->options &
+		    PCNET32_PORT_ASEL) { /* enable auto negotiate, setup, disable fd */
+			val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+			val |= 0x20;
+			lp->a.write_bcr(ioaddr, 32, val);
+		}
+	}
+
+#ifdef DO_DXSUFLO
+	if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+		val = lp->a.read_csr(ioaddr, 3);
+		val |= 0x40;
+		lp->a.write_csr(ioaddr, 3, val);
+	}
+#endif
+
+	if (lp->ltint) { /* Enable TxDone-intr inhibitor */
+		val = lp->a.read_csr(ioaddr, 5);
+		val |= (1 << 14);
+		lp->a.write_csr(ioaddr, 5, val);
+	}
+
+	lp->init_block.mode =
+		le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+	lp->init_block.filter[0] = 0x00000000;
+	lp->init_block.filter[1] = 0x00000000;
+	if (pcnet32_init_ring(dev))
+		return -ENOMEM;
+
+	/* Re-initialize the PCNET32, and start it when done. */
+	lp->a.write_csr(
+		ioaddr, 1,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &
+			0xffff);
+	lp->a.write_csr(
+		ioaddr, 2,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >>
+			16);
+
+	lp->a.write_csr(ioaddr, 4, 0x0915);
+	lp->a.write_csr(ioaddr, 0, 0x0001);
+
+	rtnetif_start_queue(dev); /*** RTnet ***/
+
+	i = 0;
+	while (i++ < 100)
+		if (lp->a.read_csr(ioaddr, 0) & 0x0100)
+			break;
+	/*
+     * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+     * reports that doing so triggers a bug in the '974.
+     */
+	lp->a.write_csr(ioaddr, 0, 0x0042);
+
+	if (pcnet32_debug > 2)
+		printk(KERN_DEBUG
+		       "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+		       dev->name, i,
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, init_block)),
+		       lp->a.read_csr(ioaddr, 0));
+
+	return 0; /* Always succeed */
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.).  Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting.  As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit.  It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
+ */
+
+/*** RTnet ***
+static void
+pcnet32_purge_tx_ring(struct net_device *dev)
+{
+    struct pcnet32_private *lp = dev->priv;
+    int i;
+
+    for (i = 0; i < TX_RING_SIZE; i++) {
+	if (lp->tx_skbuff[i]) {
+	    pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+	    dev_kfree_skb(lp->tx_skbuff[i]);
+	    lp->tx_skbuff[i] = NULL;
+	    lp->tx_dma_addr[i] = 0;
+	}
+    }
+}
+ *** RTnet ***/
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int pcnet32_init_ring(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	int i;
+
+	lp->tx_full = 0;
+	lp->cur_rx = lp->cur_tx = 0;
+	lp->dirty_rx = lp->dirty_tx = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *rx_skbuff = lp->rx_skbuff[i]; /*** RTnet ***/
+		if (rx_skbuff == NULL) {
+			if (!(rx_skbuff = lp->rx_skbuff[i] =
+				      rtnetdev_alloc_rtskb(
+					      dev,
+					      PKT_BUF_SZ))) { /*** RTnet ***/
+				/* there is not much, we can do at this point */
+				printk(KERN_ERR
+				       "%s: pcnet32_init_ring rtnetdev_alloc_rtskb failed.\n",
+				       dev->name);
+				return -1;
+			}
+			rtskb_reserve(rx_skbuff, 2); /*** RTnet ***/
+		}
+		lp->rx_dma_addr[i] =
+			pci_map_single(lp->pci_dev, rx_skbuff->tail,
+				       rx_skbuff->len, PCI_DMA_FROMDEVICE);
+		lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
+		lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+		lp->rx_ring[i].status = le16_to_cpu(0x8000);
+	}
+	/* The Tx buffer address is filled in as needed, but we do need to clear
+       the upper ownership bit. */
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		lp->tx_ring[i].base = 0;
+		lp->tx_ring[i].status = 0;
+		lp->tx_dma_addr[i] = 0;
+	}
+
+	lp->init_block.tlen_rlen =
+		le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+	for (i = 0; i < 6; i++)
+		lp->init_block.phys_addr[i] = dev->dev_addr[i];
+	lp->init_block.rx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, rx_ring));
+	lp->init_block.tx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, tx_ring));
+	return 0;
+}
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int pcnet32_start_xmit(struct rtskb *skb,
+			      struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	unsigned long ioaddr = dev->base_addr;
+	u16 status;
+	int entry;
+	rtdm_lockctx_t context;
+
+	if (pcnet32_debug > 3) {
+		rtdm_printk(KERN_DEBUG
+			    "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
+			    dev->name, lp->a.read_csr(ioaddr, 0));
+	}
+
+	/*** RTnet ***/
+	rtdm_lock_get_irqsave(&lp->lock, context);
+	/*** RTnet ***/
+
+	/* Default status -- will not enable Successful-TxDone
+     * interrupt when that option is available to us.
+     */
+	status = 0x8300;
+	if ((lp->ltint) && ((lp->cur_tx - lp->dirty_tx == TX_RING_SIZE / 2) ||
+			    (lp->cur_tx - lp->dirty_tx >= TX_RING_SIZE - 2))) {
+		/* Enable Successful-TxDone interrupt if we have
+	 * 1/2 of, or nearly all of, our ring buffer Tx'd
+	 * but not yet cleaned up.  Thus, most of the time,
+	 * we will not enable Successful-TxDone interrupts.
+	 */
+		status = 0x9300;
+	}
+
+	/* Fill in a Tx ring entry */
+
+	/* Mask to ring buffer boundary. */
+	entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+	/* Caution: the write order is important here, set the base address
+       with the "ownership" bits last. */
+
+	lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+	lp->tx_ring[entry].misc = 0x00000000;
+
+	lp->tx_skbuff[entry] = skb;
+	lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data,
+						skb->len, PCI_DMA_TODEVICE);
+	lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
+
+	/*** RTnet ***/
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+	/*** RTnet ***/
+
+	wmb();
+	lp->tx_ring[entry].status = le16_to_cpu(status);
+
+	lp->cur_tx++;
+	lp->stats.tx_bytes += skb->len;
+
+	/* Trigger an immediate send poll. */
+	lp->a.write_csr(ioaddr, 0, 0x0048);
+
+	//dev->trans_start = jiffies; /*** RTnet ***/
+
+	if (lp->tx_ring[(entry + 1) & TX_RING_MOD_MASK].base == 0)
+		rtnetif_start_queue(dev); /*** RTnet ***/
+	else {
+		lp->tx_full = 1;
+		rtnetif_stop_queue(dev); /*** RTnet ***/
+	}
+	/*** RTnet ***/
+	rtdm_lock_put_irqrestore(&lp->lock, context);
+	/*** RTnet ***/
+	return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+static int pcnet32_interrupt(rtdm_irq_t *irq_handle) /*** RTnet ***/
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+	struct rtnet_device *dev = rtdm_irq_get_arg(
+		irq_handle, struct rtnet_device); /*** RTnet ***/
+	struct pcnet32_private *lp;
+	unsigned long ioaddr;
+	u16 csr0, rap;
+	int boguscnt = max_interrupt_work;
+	int must_restart;
+	unsigned int old_packet_cnt; /*** RTnet ***/
+	int ret = RTDM_IRQ_NONE;
+
+	/*** RTnet ***
+    if (!dev) {
+	rtdm_printk (KERN_DEBUG "%s(): irq %d for unknown device\n",
+		__FUNCTION__, irq);
+	return;
+    }
+ *** RTnet ***/
+
+	ioaddr = dev->base_addr;
+	lp = dev->priv;
+	old_packet_cnt = lp->stats.rx_packets; /*** RTnet ***/
+
+	rtdm_lock_get(&lp->lock); /*** RTnet ***/
+
+	rap = lp->a.read_rap(ioaddr);
+	while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8600 && --boguscnt >= 0) {
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
+
+		ret = RTDM_IRQ_HANDLED;
+
+		must_restart = 0;
+
+		if (pcnet32_debug > 5)
+			rtdm_printk(
+				KERN_DEBUG
+				"%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
+				dev->name, csr0, lp->a.read_csr(ioaddr, 0));
+
+		if (csr0 & 0x0400) /* Rx interrupt */
+			pcnet32_rx(dev, &time_stamp);
+
+		if (csr0 & 0x0200) { /* Tx-done interrupt */
+			unsigned int dirty_tx = lp->dirty_tx;
+
+			while (dirty_tx < lp->cur_tx) {
+				int entry = dirty_tx & TX_RING_MOD_MASK;
+				int status = (short)le16_to_cpu(
+					lp->tx_ring[entry].status);
+
+				if (status < 0)
+					break; /* It still hasn't been Txed */
+
+				lp->tx_ring[entry].base = 0;
+
+				if (status & 0x4000) {
+					/* There was an major error, log it. */
+					int err_status = le32_to_cpu(
+						lp->tx_ring[entry].misc);
+					lp->stats.tx_errors++;
+					if (err_status & 0x04000000)
+						lp->stats.tx_aborted_errors++;
+					if (err_status & 0x08000000)
+						lp->stats.tx_carrier_errors++;
+					if (err_status & 0x10000000)
+						lp->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+					if (err_status & 0x40000000) {
+						lp->stats.tx_fifo_errors++;
+						/* Ackk!  On FIFO errors the Tx unit is turned off! */
+						/* Remove this verbosity later! */
+						rtdm_printk(
+							KERN_ERR
+							"%s: Tx FIFO error! CSR0=%4.4x\n",
+							dev->name, csr0);
+						must_restart = 1;
+					}
+#else
+					if (err_status & 0x40000000) {
+						lp->stats.tx_fifo_errors++;
+						if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+							/* Ackk!  On FIFO errors the Tx unit is turned off! */
+							/* Remove this verbosity later! */
+							rtdm_printk(
+								KERN_ERR
+								"%s: Tx FIFO error! CSR0=%4.4x\n",
+								dev->name,
+								csr0);
+							must_restart = 1;
+						}
+					}
+#endif
+				} else {
+					if (status & 0x1800)
+						lp->stats.collisions++;
+					lp->stats.tx_packets++;
+				}
+
+				/* We must free the original skb */
+				if (lp->tx_skbuff[entry]) {
+					pci_unmap_single(
+						lp->pci_dev,
+						lp->tx_dma_addr[entry],
+						lp->tx_skbuff[entry]->len,
+						PCI_DMA_TODEVICE);
+					dev_kfree_rtskb(
+						lp->tx_skbuff[entry]); /*** RTnet ***/
+					lp->tx_skbuff[entry] = 0;
+					lp->tx_dma_addr[entry] = 0;
+				}
+				dirty_tx++;
+			}
+
+			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+				rtdm_printk(
+					KERN_ERR
+					"%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+					dev->name, dirty_tx, lp->cur_tx,
+					lp->tx_full);
+				dirty_tx += TX_RING_SIZE;
+			}
+
+			if (lp->tx_full &&
+			    rtnetif_queue_stopped(dev) && /*** RTnet ***/
+			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+				/* The ring is no longer full, clear tbusy. */
+				lp->tx_full = 0;
+				rtnetif_wake_queue(dev); /*** RTnet ***/
+			}
+			lp->dirty_tx = dirty_tx;
+		}
+
+		/* Log misc errors. */
+		if (csr0 & 0x4000)
+			lp->stats.tx_errors++; /* Tx babble. */
+		if (csr0 & 0x1000) {
+			/*
+	     * this happens when our receive ring is full. This shouldn't
+	     * be a problem as we will see normal rx interrupts for the frames
+	     * in the receive ring. But there are some PCI chipsets (I can reproduce
+	     * this on SP3G with Intel saturn chipset) which have sometimes problems
+	     * and will fill up the receive ring with error descriptors. In this
+	     * situation we don't get a rx interrupt, but a missed frame interrupt sooner
+	     * or later. So we try to clean up our receive ring here.
+	     */
+			pcnet32_rx(dev, &time_stamp);
+			lp->stats.rx_errors++; /* Missed a Rx frame. */
+		}
+		if (csr0 & 0x0800) {
+			rtdm_printk(
+				KERN_ERR
+				"%s: Bus master arbitration failure, status %4.4x.\n",
+				dev->name, csr0);
+			/* unlike for the lance, there is no restart needed */
+		}
+
+		/*** RTnet ***/
+		/*** RTnet ***/
+	}
+
+	/* Clear any other interrupt, and set interrupt enable. */
+	lp->a.write_csr(ioaddr, 0, 0x7940);
+	lp->a.write_rap(ioaddr, rap);
+
+	if (pcnet32_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
+			    dev->name, lp->a.read_csr(ioaddr, 0));
+
+	/*** RTnet ***/
+	rtdm_lock_put(&lp->lock);
+
+	if (old_packet_cnt != lp->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+
+	return ret;
+	/*** RTnet ***/
+}
+
+static int pcnet32_rx(struct rtnet_device *dev,
+		      nanosecs_abs_t *time_stamp) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	int entry = lp->cur_rx & RX_RING_MOD_MASK;
+
+	/* If we own the next entry, it's a new packet. Send it up. */
+	while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+		int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+		if (status != 0x03) { /* There was an error. */
+			/*
+	     * There is a tricky error noted by John Murphy,
+	     * <murf@perftech.com> to Russ Nelson: Even with full-sized
+	     * buffers it's possible for a jabber packet to use two
+	     * buffers, with only the last correctly noting the error.
+	     */
+			if (status &
+			    0x01) /* Only count a general error at the */
+				lp->stats.rx_errors++; /* end of a packet.*/
+			if (status & 0x20)
+				lp->stats.rx_frame_errors++;
+			if (status & 0x10)
+				lp->stats.rx_over_errors++;
+			if (status & 0x08)
+				lp->stats.rx_crc_errors++;
+			if (status & 0x04)
+				lp->stats.rx_fifo_errors++;
+			lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+		} else {
+			/* Malloc up new buffer, compatible with net-2e. */
+			short pkt_len =
+				(le32_to_cpu(lp->rx_ring[entry].msg_length) &
+				 0xfff) -
+				4;
+			struct rtskb *skb; /*** RTnet ***/
+
+			if (pkt_len < 60) {
+				rtdm_printk(KERN_ERR "%s: Runt packet!\n",
+					    dev->name);
+				lp->stats.rx_errors++;
+			} else {
+				/*** RTnet ***/
+				/*int rx_in_place = 0;*/
+
+				/*if (pkt_len > rx_copybreak)*/ {
+					struct rtskb *newskb;
+
+					if ((newskb = rtnetdev_alloc_rtskb(
+						     dev, PKT_BUF_SZ))) {
+						rtskb_reserve(newskb, 2);
+						skb = lp->rx_skbuff[entry];
+						pci_unmap_single(
+							lp->pci_dev,
+							lp->rx_dma_addr[entry],
+							skb->len,
+							PCI_DMA_FROMDEVICE);
+						rtskb_put(skb, pkt_len);
+						lp->rx_skbuff[entry] = newskb;
+						lp->rx_dma_addr
+							[entry] = pci_map_single(
+							lp->pci_dev,
+							newskb->tail,
+							newskb->len,
+							PCI_DMA_FROMDEVICE);
+						lp->rx_ring[entry]
+							.base = le32_to_cpu(
+							lp->rx_dma_addr[entry]);
+						/*rx_in_place = 1;*/
+					} else
+						skb = NULL;
+				} /*else {
+		    skb = dev_alloc_skb(pkt_len+2);
+		}*/
+				/*** RTnet ***/
+
+				if (skb == NULL) {
+					int i;
+					rtdm_printk(
+						KERN_ERR
+						"%s: Memory squeeze, deferring packet.\n",
+						dev->name);
+					for (i = 0; i < RX_RING_SIZE; i++)
+						if ((short)le16_to_cpu(
+							    lp->rx_ring[(entry +
+									 i) &
+									RX_RING_MOD_MASK]
+								    .status) <
+						    0)
+							break;
+
+					if (i > RX_RING_SIZE - 2) {
+						lp->stats.rx_dropped++;
+						lp->rx_ring[entry].status |=
+							le16_to_cpu(0x8000);
+						lp->cur_rx++;
+					}
+					break;
+				}
+				/*** RTnet ***/
+				lp->stats.rx_bytes += skb->len;
+				skb->protocol = rt_eth_type_trans(skb, dev);
+				skb->time_stamp = *time_stamp;
+				rtnetif_rx(skb);
+				///dev->last_rx = jiffies;
+				/*** RTnet ***/
+				lp->stats.rx_packets++;
+			}
+		}
+		/*
+	 * The docs say that the buffer length isn't touched, but Andrew Boyd
+	 * of QNX reports that some revs of the 79C965 clear it.
+	 */
+		lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+		lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+	}
+
+	return 0;
+}
+
+static int pcnet32_close(struct rtnet_device *dev) /*** RTnet ***/
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct pcnet32_private *lp = dev->priv;
+	int i;
+
+	rtnetif_stop_queue(dev); /*** RTnet ***/
+
+	lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+
+	if (pcnet32_debug > 1)
+		printk(KERN_DEBUG
+		       "%s: Shutting down ethercard, status was %2.2x.\n",
+		       dev->name, lp->a.read_csr(ioaddr, 0));
+
+	/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+	lp->a.write_csr(ioaddr, 0, 0x0004);
+
+	/*
+     * Switch back to 16bit mode to avoid problems with dumb
+     * DOS packet driver after a warm reboot
+     */
+	lp->a.write_bcr(ioaddr, 20, 4);
+
+	/*** RTnet ***/
+	if ((i = rtdm_irq_free(&lp->irq_handle)) < 0)
+		return i;
+
+	rt_stack_disconnect(dev);
+	/*** RTnet ***/
+
+	/* free all allocated skbuffs */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		lp->rx_ring[i].status = 0;
+		if (lp->rx_skbuff[i]) {
+			pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+					 lp->rx_skbuff[i]->len,
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_rtskb(lp->rx_skbuff[i]); /*** RTnet ***/
+		}
+		lp->rx_skbuff[i] = NULL;
+		lp->rx_dma_addr[i] = 0;
+	}
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (lp->tx_skbuff[i]) {
+			pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+					 lp->tx_skbuff[i]->len,
+					 PCI_DMA_TODEVICE);
+			dev_kfree_rtskb(lp->tx_skbuff[i]); /*** RTnet ***/
+		}
+		lp->tx_skbuff[i] = NULL;
+		lp->tx_dma_addr[i] = 0;
+	}
+
+	return 0;
+}
+
+/*** RTnet ***/
+static struct net_device_stats *pcnet32_get_stats(struct rtnet_device *rtdev)
+{
+	struct pcnet32_private *lp = rtdev->priv;
+	unsigned long ioaddr = rtdev->base_addr;
+	rtdm_lockctx_t context;
+	u16 saved_addr;
+
+	rtdm_lock_get_irqsave(&lp->lock, context);
+	saved_addr = lp->a.read_rap(ioaddr);
+	lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+	lp->a.write_rap(ioaddr, saved_addr);
+	rtdm_lock_put_irqrestore(&lp->lock, context);
+
+	return &lp->stats;
+}
+
+/*** RTnet ***/
+
+static struct pci_driver pcnet32_driver = {
+	name: DRV_NAME,
+	probe: pcnet32_probe_pci,
+	id_table: pcnet32_pci_tbl,
+};
+
+/* An additional parameter that may be passed in... */
+static int local_debug = -1;
+static int tx_start_pt = -1;
+
+module_param_named(debug, local_debug, int, 0444);
+MODULE_PARM_DESC(debug, DRV_NAME " debug level (0-6)");
+module_param(max_interrupt_work, int, 0444);
+MODULE_PARM_DESC(max_interrupt_work,
+		 DRV_NAME " maximum events handled per interrupt");
+/*** RTnet ***
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+ *** RTnet ***/
+module_param(tx_start_pt, int, 0444);
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+module_param(pcnet32vlb, int, 0444);
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+module_param_array(options, int, NULL, 0444);
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+module_param_array(full_duplex, int, NULL, 0444);
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_DESCRIPTION("RTnet Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+static int __init pcnet32_init_module(void)
+{
+	printk(KERN_INFO "%s", version);
+
+	if (local_debug > 0)
+		pcnet32_debug = local_debug;
+
+	if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+		tx_start = tx_start_pt;
+
+	/* find the PCI devices */
+	if (!pci_register_driver(&pcnet32_driver))
+		pcnet32_have_pci = 1;
+
+	/* should we find any remaining VLbus devices ? */
+	if (pcnet32vlb)
+		pcnet32_probe_vlbus();
+
+	if (cards_found)
+		printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+
+	return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+	struct rtnet_device *next_dev; /*** RTnet ***/
+
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (pcnet32_dev) {
+		struct pcnet32_private *lp = pcnet32_dev->priv;
+		next_dev = lp->next;
+		/*** RTnet ***/
+		rt_unregister_rtnetdev(pcnet32_dev);
+		rt_rtdev_disconnect(pcnet32_dev);
+		/*** RTnet ***/
+		release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+		pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+		/*** RTnet ***/
+		rtdev_free(pcnet32_dev);
+		/*** RTnet ***/
+		pcnet32_dev = next_dev;
+	}
+
+	if (pcnet32_have_pci)
+		pci_unregister_driver(&pcnet32_driver);
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
+++ linux-patched/drivers/xenomai/net/drivers/natsemi.c	2022-03-21 12:58:29.511887772 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/e1000_hw.c	1970-01-01 01:00:00.000000000 +0100
+/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
+/*
+	Written/copyright 1999-2001 by Donald Becker.
+	Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+	Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.  License for under other terms may be
+	available.  Contact the original author for details.
+
+	The original author may be reached as becker@scyld.com, or at
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support information and updates available at
+	http://www.scyld.com/network/netsemi.html
+
+
+	Linux kernel modifications:
+
+	Version 1.0.1:
+		- Spinlock fixes
+		- Bug fixes and better intr performance (Tjeerd)
+	Version 1.0.2:
+		- Now reads correct MAC address from eeprom
+	Version 1.0.3:
+		- Eliminate redundant priv->tx_full flag
+		- Call netif_start_queue from dev->tx_timeout
+		- wmb() in start_tx() to flush data
+		- Update Tx locking
+		- Clean up PCI enable (davej)
+	Version 1.0.4:
+		- Merge Donald Becker's natsemi.c version 1.07
+	Version 1.0.5:
+		- { fill me in }
+	Version 1.0.6:
+		* ethtool support (jgarzik)
+		* Proper initialization of the card (which sometimes
+		fails to occur and leaves the card in a non-functional
+		state). (uzi)
+
+		* Some documented register settings to optimize some
+		of the 100Mbit autodetection circuitry in rev C cards. (uzi)
+
+		* Polling of the PHY intr for stuff like link state
+		change and auto- negotiation to finally work properly. (uzi)
+
+		* One-liner removal of a duplicate declaration of
+		netdev_error(). (uzi)
+
+	Version 1.0.7: (Manfred Spraul)
+		* pci dma
+		* SMP locking update
+		* full reset added into tx_timeout
+		* correct multicast hash generation (both big and little endian)
+			[copied from a natsemi driver version
+			 from Myrio Corporation, Greg Smith]
+		* suspend/resume
+
+	version 1.0.8 (Tim Hockin <thockin@sun.com>)
+		* ETHTOOL_* support
+		* Wake on lan support (Erik Gilling)
+		* MXDMA fixes for serverworks
+		* EEPROM reload
+
+	version 1.0.9 (Manfred Spraul)
+		* Main change: fix lack of synchronize
+		netif_close/netif_suspend against a last interrupt
+		or packet.
+		* do not enable superflous interrupts (e.g. the
+		drivers relies on TxDone - TxIntr not needed)
+		* wait that the hardware has really stopped in close
+		and suspend.
+		* workaround for the (at least) gcc-2.95.1 compiler
+		problem. Also simplifies the code a bit.
+		* disable_irq() in tx_timeout - needed to protect
+		against rx interrupts.
+		* stop the nic before switching into silent rx mode
+		for wol (required according to docu).
+
+	version 1.0.10:
+		* use long for ee_addr (various)
+		* print pointers properly (DaveM)
+		* include asm/irq.h (?)
+
+	version 1.0.11:
+		* check and reset if PHY errors appear (Adrian Sun)
+		* WoL cleanup (Tim Hockin)
+		* Magic number cleanup (Tim Hockin)
+		* Don't reload EEPROM on every reset (Tim Hockin)
+		* Save and restore EEPROM state across reset (Tim Hockin)
+		* MDIO Cleanup (Tim Hockin)
+		* Reformat register offsets/bits (jgarzik)
+
+	version 1.0.12:
+		* ETHTOOL_* further support (Tim Hockin)
+
+	version 1.0.13:
+		* ETHTOOL_[G]EEPROM support (Tim Hockin)
+
+	version 1.0.13:
+		* crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
+
+	version 1.0.14:
+		* Cleanup some messages and autoneg in ethtool (Tim Hockin)
+
+	version 1.0.15:
+		* Get rid of cable_magic flag
+		* use new (National provided) solution for cable magic issue
+
+	version 1.0.16:
+		* call netdev_rx() for RxErrors (Manfred Spraul)
+		* formatting and cleanups
+		* change options and full_duplex arrays to be zero
+		  initialized
+		* enable only the WoL and PHY interrupts in wol mode
+
+	version 1.0.17:
+		* only do cable_magic on 83815 and early 83816 (Tim Hockin)
+		* create a function for rx refill (Manfred Spraul)
+		* combine drain_ring and init_ring (Manfred Spraul)
+		* oom handling (Manfred Spraul)
+		* hands_off instead of playing with netif_device_{de,a}ttach
+		  (Manfred Spraul)
+		* be sure to write the MAC back to the chip (Manfred Spraul)
+		* lengthen EEPROM timeout, and always warn about timeouts
+		  (Manfred Spraul)
+		* comments update (Manfred)
+		* do the right thing on a phy-reset (Manfred and Tim)
+
+	TODO:
+	* big endian support with CFG:BEM instead of cpu_to_le32
+	* support for an external PHY
+	* NAPI
+
+	Ported to RTNET: December 2003, Erik Buit <e.buit@student.utwente.nl>
+*/
+
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/mii.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>	/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/*** RTnet ***/
+#include <rtnet_port.h>
+
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+#define DEFAULT_RX_POOL_SIZE    16
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** RTnet ***/
+
+#define DRV_NAME	"natsemi-rt"
+#define DRV_VERSION	"1.07+LK1.0.17-RTnet-0.2"
+#define DRV_RELDATE	"Dec 16, 2003"
+
+/* Updated to recommendations in pci-skeleton v2.03. */
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+
+#define NATSEMI_DEF_MSG		(NETIF_MSG_DRV		| \
+				 NETIF_MSG_LINK		| \
+				 NETIF_MSG_WOL		| \
+				 NETIF_MSG_RX_ERR	| \
+				 NETIF_MSG_TX_ERR)
+static int local_debug = -1;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+static int mtu;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+/*** RTnet ***
+static int rx_copybreak;
+ *** RTnet ***/
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability.
+   The media type is usually passed in 'options[]'.
+*/
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE	8 /*** RTnet ***/
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (2*HZ)
+
+#define NATSEMI_HW_TIMEOUT	400
+#define NATSEMI_TIMER_FREQ	3*HZ
+#define NATSEMI_PG0_NREGS	64
+#define NATSEMI_RFDR_NREGS	8
+#define NATSEMI_PG1_NREGS	4
+#define NATSEMI_NREGS		(NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
+				 NATSEMI_PG1_NREGS)
+#define NATSEMI_REGS_VER	1 /* v1 added RFDR registers */
+#define NATSEMI_REGS_SIZE	(NATSEMI_NREGS * sizeof(u32))
+#define NATSEMI_EEPROM_SIZE	24 /* 12 16-bit values */
+
+#define PKT_BUF_SZ		1536 /* Size of each temporary Rx buffer. */
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+  KERN_INFO DRV_NAME " dp8381x driver, version "
+      DRV_VERSION ", " DRV_RELDATE "\n"
+  KERN_INFO "  originally by Donald Becker <becker@scyld.com>\n"
+  KERN_INFO "  http://www.scyld.com/network/natsemi.html\n"
+  KERN_INFO "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"
+  KERN_INFO "  RTnet port by Erik Buit\n";
+
+MODULE_AUTHOR("Erik Buit");
+MODULE_DESCRIPTION("RTnet National Semiconductor DP8381x series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0444);
+module_param(mtu, int, 0444);
+module_param_named(debug, local_debug, int, 0444);
+/*** RTnet ***
+MODULE_PARM(rx_copybreak, "i");
+ *** RTnet ***/
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+MODULE_PARM_DESC(max_interrupt_work,
+	"DP8381x maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
+MODULE_PARM_DESC(debug, "DP8381x default debug level");
+/*** RTnet ***
+MODULE_PARM_DESC(rx_copybreak,
+	"DP8381x copy breakpoint for copy-only-tiny-frames");
+ *** RTnet ***/
+MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
+It also works with other chips in in the DP83810 series.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be valid.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing.  On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+Most operations are synchronized on the np->lock irq spinlock, except the
+performance critical codepaths:
+
+The rx process only runs in the interrupt handler. Access from outside
+the interrupt handler is only permitted after disable_irq().
+
+The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
+is set, then access is permitted under spin_lock_irq(&np->lock).
+
+Thus configuration functions that want to access everything must call
+	disable_irq(dev->irq);
+	spin_lock_bh(dev->xmit_lock);
+	spin_lock_irq(&np->lock);
+
+IV. Notes
+
+NatSemi PCI network controllers are very uncommon.
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+Datasheet is available from:
+http://www.national.com/pf/DP/DP83815.html
+
+IVc. Errata
+
+None characterised.
+*/
+
+
+
+enum pcistuff {
+	PCI_USES_IO = 0x01,
+	PCI_USES_MEM = 0x02,
+	PCI_USES_MASTER = 0x04,
+	PCI_ADDR0 = 0x08,
+	PCI_ADDR1 = 0x10,
+};
+
+/* MMIO operations required */
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+
+
+/* array of board data directly indexed by pci_tbl[x].driver_data */
+static struct {
+	const char *name;
+	unsigned long flags;
+} natsemi_pci_info[] = {
+	{ "NatSemi DP8381[56]", PCI_IOTYPE },
+};
+
+static struct pci_device_id natsemi_pci_tbl[] = {
+	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
+
+/* Offsets to the device registers.
+   Unlike software-only systems, device drivers interact with complex hardware.
+   It's not useful to define symbolic names for every register bit in the
+   device.
+*/
+enum register_offsets {
+	ChipCmd			= 0x00,
+	ChipConfig		= 0x04,
+	EECtrl			= 0x08,
+	PCIBusCfg		= 0x0C,
+	IntrStatus		= 0x10,
+	IntrMask		= 0x14,
+	IntrEnable		= 0x18,
+	IntrHoldoff		= 0x16, /* DP83816 only */
+	TxRingPtr		= 0x20,
+	TxConfig		= 0x24,
+	RxRingPtr		= 0x30,
+	RxConfig		= 0x34,
+	ClkRun			= 0x3C,
+	WOLCmd			= 0x40,
+	PauseCmd		= 0x44,
+	RxFilterAddr		= 0x48,
+	RxFilterData		= 0x4C,
+	BootRomAddr		= 0x50,
+	BootRomData		= 0x54,
+	SiliconRev		= 0x58,
+	StatsCtrl		= 0x5C,
+	StatsData		= 0x60,
+	RxPktErrs		= 0x60,
+	RxMissed		= 0x68,
+	RxCRCErrs		= 0x64,
+	BasicControl		= 0x80,
+	BasicStatus		= 0x84,
+	AnegAdv			= 0x90,
+	AnegPeer		= 0x94,
+	PhyStatus		= 0xC0,
+	MIntrCtrl		= 0xC4,
+	MIntrStatus		= 0xC8,
+	PhyCtrl			= 0xE4,
+
+	/* These are from the spec, around page 78... on a separate table.
+	 * The meaning of these registers depend on the value of PGSEL. */
+	PGSEL			= 0xCC,
+	PMDCSR			= 0xE4,
+	TSTDAT			= 0xFC,
+	DSPCFG			= 0xF4,
+	SDCFG			= 0xF8
+};
+/* the values for the 'magic' registers above (PGSEL=1) */
+#define PMDCSR_VAL	0x189c	/* enable preferred adaptation circuitry */
+#define TSTDAT_VAL	0x0
+#define DSPCFG_VAL	0x5040
+#define SDCFG_VAL	0x008c	/* set voltage thresholds for Signal Detect */
+#define DSPCFG_LOCK	0x20	/* coefficient lock bit in DSPCFG */
+#define TSTDAT_FIXED	0xe8	/* magic number for bad coefficients */
+
+/* misc PCI space registers */
+enum pci_register_offsets {
+	PCIPM			= 0x44,
+};
+
+enum ChipCmd_bits {
+	ChipReset		= 0x100,
+	RxReset			= 0x20,
+	TxReset			= 0x10,
+	RxOff			= 0x08,
+	RxOn			= 0x04,
+	TxOff			= 0x02,
+	TxOn			= 0x01,
+};
+
+enum ChipConfig_bits {
+	CfgPhyDis		= 0x200,
+	CfgPhyRst		= 0x400,
+	CfgExtPhy		= 0x1000,
+	CfgAnegEnable		= 0x2000,
+	CfgAneg100		= 0x4000,
+	CfgAnegFull		= 0x8000,
+	CfgAnegDone		= 0x8000000,
+	CfgFullDuplex		= 0x20000000,
+	CfgSpeed100		= 0x40000000,
+	CfgLink			= 0x80000000,
+};
+
+enum EECtrl_bits {
+	EE_ShiftClk		= 0x04,
+	EE_DataIn		= 0x01,
+	EE_ChipSelect		= 0x08,
+	EE_DataOut		= 0x02,
+};
+
+enum PCIBusCfg_bits {
+	EepromReload		= 0x4,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum IntrStatus_bits {
+	IntrRxDone		= 0x0001,
+	IntrRxIntr		= 0x0002,
+	IntrRxErr		= 0x0004,
+	IntrRxEarly		= 0x0008,
+	IntrRxIdle		= 0x0010,
+	IntrRxOverrun		= 0x0020,
+	IntrTxDone		= 0x0040,
+	IntrTxIntr		= 0x0080,
+	IntrTxErr		= 0x0100,
+	IntrTxIdle		= 0x0200,
+	IntrTxUnderrun		= 0x0400,
+	StatsMax		= 0x0800,
+	SWInt			= 0x1000,
+	WOLPkt			= 0x2000,
+	LinkChange		= 0x4000,
+	IntrHighBits		= 0x8000,
+	RxStatusFIFOOver	= 0x10000,
+	IntrPCIErr		= 0xf00000,
+	RxResetDone		= 0x1000000,
+	TxResetDone		= 0x2000000,
+	IntrAbnormalSummary	= 0xCD20,
+};
+
+/*
+ * Default Interrupts:
+ * Rx OK, Rx Packet Error, Rx Overrun,
+ * Tx OK, Tx Packet Error, Tx Underrun,
+ * MIB Service, Phy Interrupt, High Bits,
+ * Rx Status FIFO overrun,
+ * Received Target Abort, Received Master Abort,
+ * Signalled System Error, Received Parity Error
+ */
+#define DEFAULT_INTR 0x00f1cd65
+
+enum TxConfig_bits {
+	TxDrthMask		= 0x3f,
+	TxFlthMask		= 0x3f00,
+	TxMxdmaMask		= 0x700000,
+	TxMxdma_512		= 0x0,
+	TxMxdma_4		= 0x100000,
+	TxMxdma_8		= 0x200000,
+	TxMxdma_16		= 0x300000,
+	TxMxdma_32		= 0x400000,
+	TxMxdma_64		= 0x500000,
+	TxMxdma_128		= 0x600000,
+	TxMxdma_256		= 0x700000,
+	TxCollRetry		= 0x800000,
+	TxAutoPad		= 0x10000000,
+	TxMacLoop		= 0x20000000,
+	TxHeartIgn		= 0x40000000,
+	TxCarrierIgn		= 0x80000000
+};
+
+enum RxConfig_bits {
+	RxDrthMask		= 0x3e,
+	RxMxdmaMask		= 0x700000,
+	RxMxdma_512		= 0x0,
+	RxMxdma_4		= 0x100000,
+	RxMxdma_8		= 0x200000,
+	RxMxdma_16		= 0x300000,
+	RxMxdma_32		= 0x400000,
+	RxMxdma_64		= 0x500000,
+	RxMxdma_128		= 0x600000,
+	RxMxdma_256		= 0x700000,
+	RxAcceptLong		= 0x8000000,
+	RxAcceptTx		= 0x10000000,
+	RxAcceptRunt		= 0x40000000,
+	RxAcceptErr		= 0x80000000
+};
+
+enum ClkRun_bits {
+	PMEEnable		= 0x100,
+	PMEStatus		= 0x8000,
+};
+
+enum WolCmd_bits {
+	WakePhy			= 0x1,
+	WakeUnicast		= 0x2,
+	WakeMulticast		= 0x4,
+	WakeBroadcast		= 0x8,
+	WakeArp			= 0x10,
+	WakePMatch0		= 0x20,
+	WakePMatch1		= 0x40,
+	WakePMatch2		= 0x80,
+	WakePMatch3		= 0x100,
+	WakeMagic		= 0x200,
+	WakeMagicSecure		= 0x400,
+	SecureHack		= 0x100000,
+	WokePhy			= 0x400000,
+	WokeUnicast		= 0x800000,
+	WokeMulticast		= 0x1000000,
+	WokeBroadcast		= 0x2000000,
+	WokeArp			= 0x4000000,
+	WokePMatch0		= 0x8000000,
+	WokePMatch1		= 0x10000000,
+	WokePMatch2		= 0x20000000,
+	WokePMatch3		= 0x40000000,
+	WokeMagic		= 0x80000000,
+	WakeOptsSummary		= 0x7ff
+};
+
+enum RxFilterAddr_bits {
+	RFCRAddressMask		= 0x3ff,
+	AcceptMulticast		= 0x00200000,
+	AcceptMyPhys		= 0x08000000,
+	AcceptAllPhys		= 0x10000000,
+	AcceptAllMulticast	= 0x20000000,
+	AcceptBroadcast		= 0x40000000,
+	RxFilterEnable		= 0x80000000
+};
+
+enum StatsCtrl_bits {
+	StatsWarn		= 0x1,
+	StatsFreeze		= 0x2,
+	StatsClear		= 0x4,
+	StatsStrobe		= 0x8,
+};
+
+enum MIntrCtrl_bits {
+	MICRIntEn		= 0x2,
+};
+
+enum PhyCtrl_bits {
+	PhyAddrMask		= 0xf,
+};
+
+/* values we might find in the silicon revision register */
+#define SRR_DP83815_C	0x0302
+#define SRR_DP83815_D	0x0403
+#define SRR_DP83816_A4	0x0504
+#define SRR_DP83816_A5	0x0505
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+   architectures. */
+struct netdev_desc {
+	u32 next_desc;
+	s32 cmd_status;
+	u32 addr;
+	u32 software_use;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+	DescNoCRC=0x10000000, DescPktOK=0x08000000,
+	DescSizeMask=0xfff,
+
+	DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
+	DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
+	DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
+	DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
+
+	DescRxAbort=0x04000000, DescRxOver=0x02000000,
+	DescRxDest=0x01800000, DescRxLong=0x00400000,
+	DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
+	DescRxCRC=0x00080000, DescRxAlign=0x00040000,
+	DescRxLoop=0x00020000, DesRxColl=0x00010000,
+};
+
+struct netdev_private {
+	/* Descriptor rings first for alignment */
+	dma_addr_t ring_dma;
+	struct netdev_desc *rx_ring;
+	struct netdev_desc *tx_ring;
+	/* The addresses of receive-in-place skbuffs */
+	struct rtskb *rx_skbuff[RX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t rx_dma[RX_RING_SIZE];
+	/* address of a sent-in-place packet/buffer, for later free() */
+	struct rtskb *tx_skbuff[TX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t tx_dma[TX_RING_SIZE];
+	struct net_device_stats stats;
+	/* Media monitoring timer */
+	struct timer_list timer;
+	/* Frequently used values: keep some adjacent for cache effect */
+	struct pci_dev *pci_dev;
+	struct netdev_desc *rx_head_desc;
+	/* Producer/consumer ring indices */
+	unsigned int cur_rx, dirty_rx;
+	unsigned int cur_tx, dirty_tx;
+	/* Based on MTU+slack. */
+	unsigned int rx_buf_sz;
+	int oom;
+	/* Do not touch the nic registers */
+	int hands_off;
+	/* These values are keep track of the transceiver/media in use */
+	unsigned int full_duplex;
+	/* Rx filter */
+	u32 cur_rx_mode;
+	u32 rx_filter[16];
+	/* FIFO and PCI burst thresholds */
+	u32 tx_config, rx_config;
+	/* original contents of ClkRun register */
+	u32 SavedClkRun;
+	/* silicon revision */
+	u32 srr;
+	/* expected DSPCFG value */
+	u16 dspcfg;
+	/* MII transceiver section */
+	u16 advertising;
+	unsigned int iosize;
+	rtdm_lock_t lock;
+	u32 msg_enable;
+
+	rtdm_irq_t irq_handle;
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct rtnet_device *dev, int phy_id, int reg);
+/*static void mdio_write(struct rtnet_device *dev, int phy_id, int reg, u16 data);*/
+static void natsemi_reset(struct rtnet_device *dev);
+static void natsemi_reload_eeprom(struct rtnet_device *dev);
+static void natsemi_stop_rxtx(struct rtnet_device *dev);
+static int netdev_open(struct rtnet_device *dev);
+static void do_cable_magic(struct rtnet_device *dev);
+static void undo_cable_magic(struct rtnet_device *dev);
+static void check_link(struct rtnet_device *dev);
+/*static void netdev_timer(unsigned long data);*/
+static void dump_ring(struct rtnet_device *dev);
+/*static void tx_timeout(struct rtnet_device *dev);*/
+static int alloc_ring(struct rtnet_device *dev);
+static void refill_rx(struct rtnet_device *dev);
+static void init_ring(struct rtnet_device *dev);
+static void drain_tx(struct rtnet_device *dev);
+static void drain_ring(struct rtnet_device *dev);
+static void free_ring(struct rtnet_device *dev);
+/*static void reinit_ring(struct rtnet_device *dev);*/
+static void init_registers(struct rtnet_device *dev);
+static int start_tx(struct rtskb *skb, struct rtnet_device *dev);
+static int intr_handler(rtdm_irq_t *irq_handle);
+static void netdev_error(struct rtnet_device *dev, int intr_status);
+static void netdev_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp);
+static void netdev_tx_done(struct rtnet_device *dev);
+static void __set_rx_mode(struct rtnet_device *dev);
+/*static void set_rx_mode(struct rtnet_device *dev);*/
+static void __get_stats(struct rtnet_device *rtdev);
+static struct net_device_stats *get_stats(struct rtnet_device *dev);
+/*static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_set_wol(struct rtnet_device *dev, u32 newval);
+static int netdev_get_wol(struct rtnet_device *dev, u32 *supported, u32 *cur);
+static int netdev_set_sopass(struct rtnet_device *dev, u8 *newval);
+static int netdev_get_sopass(struct rtnet_device *dev, u8 *data);
+static int netdev_get_ecmd(struct rtnet_device *dev, struct ethtool_cmd *ecmd);
+static int netdev_set_ecmd(struct rtnet_device *dev, struct ethtool_cmd *ecmd);
+static void enable_wol_mode(struct rtnet_device *dev, int enable_intr);*/
+static int netdev_close(struct rtnet_device *dev);
+/*static int netdev_get_regs(struct rtnet_device *dev, u8 *buf);
+static int netdev_get_eeprom(struct rtnet_device *dev, u8 *buf);*/
+
+
+static int natsemi_probe1 (struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct rtnet_device *dev; /*** RTnet ***/
+	struct netdev_private *np;
+	int i, option, irq, chip_idx = ent->driver_data;
+	static int find_cnt = -1;
+	unsigned long ioaddr, iosize;
+	const int pcibar = 1; /* PCI base address register */
+	int prev_eedata;
+	u32 tmp;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+	static int printed_version;
+	if (!printed_version++)
+		rtdm_printk(version);
+#endif
+
+	i = pci_enable_device(pdev);
+	if (i) return i;
+
+	/* natsemi has a non-standard PM control register
+	 * in PCI config space.  Some boards apparently need
+	 * to be brought to D0 in this manner.
+	 */
+	pci_read_config_dword(pdev, PCIPM, &tmp);
+	if (tmp & PCI_PM_CTRL_STATE_MASK) {
+		/* D0 state, disable PME assertion */
+		u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
+		pci_write_config_dword(pdev, PCIPM, newtmp);
+	}
+
+	find_cnt++;
+	ioaddr = pci_resource_start(pdev, pcibar);
+	iosize = pci_resource_len(pdev, pcibar);
+	irq = pdev->irq;
+
+/*** RTnet ***/
+	if (cards[find_cnt] == 0)
+		goto err_out;
+/*** RTnet ***/
+
+	if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
+		pci_set_master(pdev);
+
+/*** RTnet ***/
+	dev = rt_alloc_etherdev(sizeof(struct netdev_private),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL) {
+		rtdm_printk(KERN_ERR "init_ethernet failed for card #%d\n", find_cnt);
+		goto err_out;
+	}
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+/*** RTnet ***/
+
+	i = pci_request_regions(pdev, dev->name);
+	if (i) {
+/*** RTnet ***/
+		rt_rtdev_disconnect(dev);
+		rtdev_free(dev);
+/*** RTnet ***/
+		return i;
+	}
+
+	{
+		void *mmio = ioremap (ioaddr, iosize);
+		if (!mmio) {
+			pci_release_regions(pdev);
+/*** RTnet ***/
+			rt_rtdev_disconnect(dev);
+			rtdev_free(dev);
+/*** RTnet ***/
+			return -ENOMEM;
+		}
+		ioaddr = (unsigned long) mmio;
+	}
+
+	/* Work around the dropped serial bit. */
+	prev_eedata = eeprom_read(ioaddr, 6);
+	for (i = 0; i < 3; i++) {
+		int eedata = eeprom_read(ioaddr, i + 7);
+		dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+		dev->dev_addr[i*2+1] = eedata >> 7;
+		prev_eedata = eedata;
+	}
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	np = dev->priv;
+
+	np->pci_dev = pdev;
+	pci_set_drvdata(pdev, dev);
+	np->iosize = iosize;
+	rtdm_lock_init(&np->lock);
+	np->msg_enable = (local_debug >= 0) ? (1<<local_debug)-1 : NATSEMI_DEF_MSG;
+	np->hands_off = 0;
+
+	/* Reset the chip to erase previous misconfiguration. */
+	natsemi_reload_eeprom(dev);
+	natsemi_reset(dev);
+
+	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The lower four bits are the media type. */
+	if (option) {
+		if (option & 0x200)
+			np->full_duplex = 1;
+		if (option & 15)
+			rtdm_printk(KERN_INFO
+				"%s: ignoring user supplied media type %d",
+				dev->name, option & 15);
+	}
+	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
+		np->full_duplex = 1;
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = &netdev_open;
+	dev->hard_start_xmit = &start_tx;
+	dev->stop = &netdev_close;
+	dev->get_stats = &get_stats;
+/*** RTnet ***
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &netdev_ioctl;
+	dev->tx_timeout = &tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+  *** RTnet ***/
+
+	if (mtu)
+		dev->mtu = mtu;
+
+/*** RTnet ***/
+	i = rt_register_rtnetdev(dev);
+	if (i) {
+		goto err_out_unmap;
+	}
+/*** RTnet ***/
+
+	rtnetif_carrier_off(dev);
+
+	if (netif_msg_drv(np)) {
+		rtdm_printk(KERN_INFO "%s: %s at %#08lx, ",
+			dev->name, natsemi_pci_info[chip_idx].name, ioaddr);
+		for (i = 0; i < ETH_ALEN-1; i++)
+				rtdm_printk("%02x:", dev->dev_addr[i]);
+		rtdm_printk("%02x, IRQ %d.\n", dev->dev_addr[i], irq);
+	}
+
+	np->advertising = mdio_read(dev, 1, MII_ADVERTISE);
+	if ((readl((void *)(ioaddr + ChipConfig)) & 0xe000) != 0xe000
+	 && netif_msg_probe(np)) {
+		u32 chip_config = readl((void *)(ioaddr + ChipConfig));
+		rtdm_printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
+			"10%s %s duplex.\n",
+			dev->name,
+			chip_config & CfgAnegEnable ?
+			  "enabled, advertise" : "disabled, force",
+			chip_config & CfgAneg100 ? "0" : "",
+			chip_config & CfgAnegFull ? "full" : "half");
+	}
+	if (netif_msg_probe(np))
+		rtdm_printk(KERN_INFO
+			"%s: Transceiver status %#04x advertising %#04x.\n",
+			dev->name, mdio_read(dev, 1, MII_BMSR),
+			np->advertising);
+
+	/* save the silicon revision for later querying */
+	np->srr = readl((void *)(ioaddr + SiliconRev));
+	if (netif_msg_hw(np))
+		rtdm_printk(KERN_INFO "%s: silicon revision %#04x.\n",
+				dev->name, np->srr);
+
+
+	return 0;
+
+err_out_unmap:
+#ifdef USE_MEM
+	iounmap((void *)ioaddr);
+err_out_free_res:
+#endif
+	pci_release_regions(pdev);
+/*err_out_free_netdev:*/
+/*** RTnet ***/
+	rt_rtdev_disconnect(dev);
+	rtdev_free(dev);
+/*** RTnet ***/
+err_out:
+	return -ENODEV;
+
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+   The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
+
+/* Delay between EEPROM clock transitions.
+   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
+   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
+   made udelay() unreliable.
+   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
+   depricated.
+*/
+#define eeprom_delay(ee_addr)	readl((void *)(ee_addr))
+
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds {
+	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(long addr, int location)
+{
+	int i;
+	int retval = 0;
+	long ee_addr = addr + EECtrl;
+	int read_cmd = location | EE_ReadCmd;
+	writel(EE_Write0, (void *)ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 10; i >= 0; i--) {
+		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+		writel(dataval, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+		writel(dataval | EE_ShiftClk, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+	}
+	writel(EE_ChipSelect, (void *)ee_addr);
+	eeprom_delay(ee_addr);
+
+	for (i = 0; i < 16; i++) {
+		writel(EE_ChipSelect | EE_ShiftClk, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+		retval |= (readl((void *)ee_addr) & EE_DataOut) ? 1 << i : 0;
+		writel(EE_ChipSelect, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+	}
+
+	/* Terminate the EEPROM access. */
+	writel(EE_Write0, (void *)ee_addr);
+	writel(0, (void *)ee_addr);
+	return retval;
+}
+
+/* MII transceiver control section.
+ * The 83815 series has an internal transceiver, and we present the
+ * management registers as if they were MII connected. */
+
+static int mdio_read(struct rtnet_device *dev, int phy_id, int reg)
+{
+	if (phy_id == 1 && reg < 32)
+		return readl((void *)(dev->base_addr+BasicControl+(reg<<2)))&0xffff;
+	else
+		return 0xffff;
+}
+/*** RTnet
+static void mdio_write(struct rtnet_device *dev, int phy_id, int reg, u16 data)
+{
+	struct netdev_private *np = dev->priv;
+	if (phy_id == 1 && reg < 32) {
+		writew(data, dev->base_addr+BasicControl+(reg<<2));
+		switch (reg) {
+			case MII_ADVERTISE: np->advertising = data; break;
+		}
+	}
+}
+RTnet ***/
+/* CFG bits [13:16] [18:23] */
+#define CFG_RESET_SAVE 0xfde000
+/* WCSR bits [0:4] [9:10] */
+#define WCSR_RESET_SAVE 0x61f
+/* RFCR bits [20] [22] [27:31] */
+#define RFCR_RESET_SAVE 0xf8500000;
+
+static void natsemi_reset(struct rtnet_device *dev)
+{
+	int i;
+	u32 cfg;
+	u32 wcsr;
+	u32 rfcr;
+	u16 pmatch[3];
+	u16 sopass[3];
+	struct netdev_private *np = dev->priv;
+
+	/*
+	 * Resetting the chip causes some registers to be lost.
+	 * Natsemi suggests NOT reloading the EEPROM while live, so instead
+	 * we save the state that would have been loaded from EEPROM
+	 * on a normal power-up (see the spec EEPROM map).  This assumes
+	 * whoever calls this will follow up with init_registers() eventually.
+	 */
+
+	/* CFG */
+	cfg = readl((void *)(dev->base_addr + ChipConfig)) & CFG_RESET_SAVE;
+	/* WCSR */
+	wcsr = readl((void *)(dev->base_addr + WOLCmd)) & WCSR_RESET_SAVE;
+	/* RFCR */
+	rfcr = readl((void *)(dev->base_addr + RxFilterAddr)) & RFCR_RESET_SAVE;
+	/* PMATCH */
+	for (i = 0; i < 3; i++) {
+		writel(i*2, (void *)(dev->base_addr + RxFilterAddr));
+		pmatch[i] = readw((void *)(dev->base_addr + RxFilterData));
+	}
+	/* SOPAS */
+	for (i = 0; i < 3; i++) {
+		writel(0xa+(i*2), (void *)(dev->base_addr + RxFilterAddr));
+		sopass[i] = readw((void *)(dev->base_addr + RxFilterData));
+	}
+
+	/* now whack the chip */
+	writel(ChipReset, (void *)(dev->base_addr + ChipCmd));
+	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+		if (!(readl((void *)(dev->base_addr + ChipCmd)) & ChipReset))
+			break;
+		udelay(5);
+	}
+	if (i==NATSEMI_HW_TIMEOUT) {
+		rtdm_printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
+			dev->name, i*5);
+	} else if (netif_msg_hw(np)) {
+		rtdm_printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
+			dev->name, i*5);
+	}
+
+	/* restore CFG */
+	cfg |= readl((void *)(dev->base_addr + ChipConfig)) & ~CFG_RESET_SAVE;
+	writel(cfg, (void *)(dev->base_addr + ChipConfig));
+	/* restore WCSR */
+	wcsr |= readl((void *)(dev->base_addr + WOLCmd)) & ~WCSR_RESET_SAVE;
+	writel(wcsr, (void *)(dev->base_addr + WOLCmd));
+	/* read RFCR */
+	rfcr |= readl((void *)(dev->base_addr + RxFilterAddr)) & ~RFCR_RESET_SAVE;
+	/* restore PMATCH */
+	for (i = 0; i < 3; i++) {
+		writel(i*2, (void *)(dev->base_addr + RxFilterAddr));
+		writew(pmatch[i], (void *)(dev->base_addr + RxFilterData));
+	}
+	for (i = 0; i < 3; i++) {
+		writel(0xa+(i*2), (void *)(dev->base_addr + RxFilterAddr));
+		writew(sopass[i], (void *)(dev->base_addr + RxFilterData));
+	}
+	/* restore RFCR */
+	writel(rfcr, (void *)(dev->base_addr + RxFilterAddr));
+}
+
+static void natsemi_reload_eeprom(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	writel(EepromReload, (void *)(dev->base_addr + PCIBusCfg));
+	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+		udelay(50);
+		if (!(readl((void *)(dev->base_addr + PCIBusCfg)) & EepromReload))
+			break;
+	}
+	if (i==NATSEMI_HW_TIMEOUT) {
+		rtdm_printk(KERN_WARNING "%s: EEPROM did not reload in %d usec.\n",
+			dev->name, i*50);
+	} else if (netif_msg_hw(np)) {
+		rtdm_printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n",
+			dev->name, i*50);
+	}
+}
+
+static void natsemi_stop_rxtx(struct rtnet_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	writel(RxOff | TxOff, (void *)(ioaddr + ChipCmd));
+	for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
+		if ((readl((void *)(ioaddr + ChipCmd)) & (TxOn|RxOn)) == 0)
+			break;
+		udelay(5);
+	}
+	if (i==NATSEMI_HW_TIMEOUT) {
+		rtdm_printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
+			dev->name, i*5);
+	} else if (netif_msg_hw(np)) {
+		rtdm_printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
+			dev->name, i*5);
+	}
+}
+
+static int netdev_open(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	/* Reset the chip, just in case. */
+	natsemi_reset(dev);
+
+/*** RTnet ***/
+	rt_stack_connect(dev, &STACK_manager);
+	i = rtdm_irq_request(&np->irq_handle, dev->irq, intr_handler,
+			     RTDM_IRQTYPE_SHARED, "rt_natsemi", dev);
+/*** RTnet ***/
+/*	i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);*/
+	if (i) {
+		return i;
+	}
+
+	if (netif_msg_ifup(np))
+		rtdm_printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+			dev->name, dev->irq);
+	i = alloc_ring(dev);
+	if (i < 0) {
+		rtdm_irq_free(&np->irq_handle);
+		return i;
+	}
+	init_ring(dev);
+	init_registers(dev);
+	/* now set the MAC address according to dev->dev_addr */
+	for (i = 0; i < 3; i++) {
+		u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
+
+		writel(i*2, (void *)(ioaddr + RxFilterAddr));
+		writew(mac, (void *)(ioaddr + RxFilterData));
+	}
+	writel(np->cur_rx_mode, (void *)(ioaddr + RxFilterAddr));
+
+	rtnetif_start_queue(dev); /*** RTnet ***/
+
+	if (netif_msg_ifup(np))
+		rtdm_printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
+			dev->name, (int)readl((void *)(ioaddr + ChipCmd)));
+
+/*** RTnet ***/
+	/* Set the timer to check for link beat. */
+/*** RTnet ***/
+
+	return 0;
+}
+
+static void do_cable_magic(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	if (np->srr >= SRR_DP83816_A5)
+		return;
+
+	/*
+	 * 100 MBit links with short cables can trip an issue with the chip.
+	 * The problem manifests as lots of CRC errors and/or flickering
+	 * activity LED while idle.  This process is based on instructions
+	 * from engineers at National.
+	 */
+	if (readl((void *)(dev->base_addr + ChipConfig)) & CfgSpeed100) {
+		u16 data;
+
+		writew(1, (void *)(dev->base_addr + PGSEL));
+		/*
+		 * coefficient visibility should already be enabled via
+		 * DSPCFG | 0x1000
+		 */
+		data = readw((void *)(dev->base_addr + TSTDAT)) & 0xff;
+		/*
+		 * the value must be negative, and within certain values
+		 * (these values all come from National)
+		 */
+		if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
+			struct netdev_private *np = dev->priv;
+
+			/* the bug has been triggered - fix the coefficient */
+			writew(TSTDAT_FIXED, (void *)(dev->base_addr + TSTDAT));
+			/* lock the value */
+			data = readw((void *)(dev->base_addr + DSPCFG));
+			np->dspcfg = data | DSPCFG_LOCK;
+			writew(np->dspcfg, (void *)(dev->base_addr + DSPCFG));
+		}
+		writew(0, (void *)(dev->base_addr + PGSEL));
+	}
+}
+
+static void undo_cable_magic(struct rtnet_device *dev)
+{
+	u16 data;
+	struct netdev_private *np = dev->priv;
+
+	if (np->srr >= SRR_DP83816_A5)
+		return;
+
+	writew(1, (void *)(dev->base_addr + PGSEL));
+	/* make sure the lock bit is clear */
+	data = readw((void *)(dev->base_addr + DSPCFG));
+	np->dspcfg = data & ~DSPCFG_LOCK;
+	writew(np->dspcfg, (void *)(dev->base_addr + DSPCFG));
+	writew(0, (void *)(dev->base_addr + PGSEL));
+}
+
+static void check_link(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+	int duplex;
+	int chipcfg = readl((void *)(ioaddr + ChipConfig));
+
+	if (!(chipcfg & CfgLink)) {
+		if (rtnetif_carrier_ok(dev)) {
+			if (netif_msg_link(np))
+				rtdm_printk(KERN_NOTICE "%s: link down.\n",
+					dev->name);
+			rtnetif_carrier_off(dev);
+			undo_cable_magic(dev);
+		}
+		return;
+	}
+	if (!rtnetif_carrier_ok(dev)) {
+		if (netif_msg_link(np))
+			rtdm_printk(KERN_NOTICE "%s: link up.\n", dev->name);
+		rtnetif_carrier_on(dev);
+		do_cable_magic(dev);
+	}
+
+	duplex = np->full_duplex || (chipcfg & CfgFullDuplex ? 1 : 0);
+
+	/* if duplex is set then bit 28 must be set, too */
+	if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
+		if (netif_msg_link(np))
+			rtdm_printk(KERN_INFO
+				"%s: Setting %s-duplex based on negotiated "
+				"link capability.\n", dev->name,
+				duplex ? "full" : "half");
+		if (duplex) {
+			np->rx_config |= RxAcceptTx;
+			np->tx_config |= TxCarrierIgn | TxHeartIgn;
+		} else {
+			np->rx_config &= ~RxAcceptTx;
+			np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
+		}
+		writel(np->tx_config, (void *)(ioaddr + TxConfig));
+		writel(np->rx_config, (void *)(ioaddr + RxConfig));
+	}
+}
+
+static void init_registers(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+		if (readl((void *)(dev->base_addr + ChipConfig)) & CfgAnegDone)
+			break;
+		udelay(10);
+	}
+	if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
+		rtdm_printk(KERN_INFO
+			"%s: autonegotiation did not complete in %d usec.\n",
+			dev->name, i*10);
+	}
+
+	/* On page 78 of the spec, they recommend some settings for "optimum
+	   performance" to be done in sequence.  These settings optimize some
+	   of the 100Mbit autodetection circuitry.  They say we only want to
+	   do this for rev C of the chip, but engineers at NSC (Bradley
+	   Kennedy) recommends always setting them.  If you don't, you get
+	   errors on some autonegotiations that make the device unusable.
+	*/
+	writew(1, (void *)(ioaddr + PGSEL));
+	writew(PMDCSR_VAL, (void *)(ioaddr + PMDCSR));
+	writew(TSTDAT_VAL, (void *)(ioaddr + TSTDAT));
+	writew(DSPCFG_VAL, (void *)(ioaddr + DSPCFG));
+	writew(SDCFG_VAL, (void *)(ioaddr + SDCFG));
+	writew(0, (void *)(ioaddr + PGSEL));
+	np->dspcfg = DSPCFG_VAL;
+
+	/* Enable PHY Specific event based interrupts.  Link state change
+	   and Auto-Negotiation Completion are among the affected.
+	   Read the intr status to clear it (needed for wake events).
+	*/
+	readw((void *)(ioaddr + MIntrStatus));
+	writew(MICRIntEn, (void *)(ioaddr + MIntrCtrl));
+
+	/* clear any interrupts that are pending, such as wake events */
+	readl((void *)(ioaddr + IntrStatus));
+
+	writel(np->ring_dma, (void *)(ioaddr + RxRingPtr));
+	writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
+		(void *)(ioaddr + TxRingPtr));
+
+	/* Initialize other registers.
+	 * Configure the PCI bus bursts and FIFO thresholds.
+	 * Configure for standard, in-spec Ethernet.
+	 * Start with half-duplex. check_link will update
+	 * to the correct settings.
+	 */
+
+	/* DRTH: 2: start tx if 64 bytes are in the fifo
+	 * FLTH: 0x10: refill with next packet if 512 bytes are free
+	 * MXDMA: 0: up to 256 byte bursts.
+	 *	MXDMA must be <= FLTH
+	 * ECRETRY=1
+	 * ATP=1
+	 */
+	np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | (0x1002);
+	writel(np->tx_config, (void *)(ioaddr + TxConfig));
+
+	/* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
+	 * MXDMA 0: up to 256 byte bursts
+	 */
+	np->rx_config = RxMxdma_256 | 0x20;
+	writel(np->rx_config, (void *)(ioaddr + RxConfig));
+
+	/* Disable PME:
+	 * The PME bit is initialized from the EEPROM contents.
+	 * PCI cards probably have PME disabled, but motherboard
+	 * implementations may have PME set to enable WakeOnLan.
+	 * With PME set the chip will scan incoming packets but
+	 * nothing will be written to memory. */
+	np->SavedClkRun = readl((void *)(ioaddr + ClkRun));
+	writel(np->SavedClkRun & ~PMEEnable, (void *)(ioaddr + ClkRun));
+	if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
+		rtdm_printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
+			dev->name, readl((void *)(ioaddr + WOLCmd)));
+	}
+
+	check_link(dev);
+	__set_rx_mode(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	writel(DEFAULT_INTR, (void *)(ioaddr + IntrMask));
+	writel(1, (void *)(ioaddr + IntrEnable));
+
+	writel(RxOn | TxOn, (void *)(ioaddr + ChipCmd));
+	writel(StatsClear, (void *)(ioaddr + StatsCtrl)); /* Clear Stats */
+}
+
+/*
+ * netdev_timer:
+ * Purpose:
+ * 1) check for link changes. Usually they are handled by the MII interrupt
+ *    but it doesn't hurt to check twice.
+ * 2) check for sudden death of the NIC:
+ *    It seems that a reference set for this chip went out with incorrect info,
+ *    and there exist boards that aren't quite right.  An unexpected voltage
+ *    drop can cause the PHY to get itself in a weird state (basically reset).
+ *    NOTE: this only seems to affect revC chips.
+ * 3) check of death of the RX path due to OOM
+ */
+/*** RTnet ***/
+/*** RTnet ***/
+
+static void dump_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	if (netif_msg_pktdata(np)) {
+		int i;
+		rtdm_printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++) {
+			rtdm_printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+				i, np->tx_ring[i].next_desc,
+				np->tx_ring[i].cmd_status,
+				np->tx_ring[i].addr);
+		}
+		rtdm_printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			rtdm_printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+				i, np->rx_ring[i].next_desc,
+				np->rx_ring[i].cmd_status,
+				np->rx_ring[i].addr);
+		}
+	}
+}
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int alloc_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	np->rx_ring = pci_alloc_consistent(np->pci_dev,
+		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+		&np->ring_dma);
+	if (!np->rx_ring)
+		return -ENOMEM;
+	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
+	return 0;
+}
+
+static void refill_rx(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct rtskb *skb;
+		int entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break; /* Better luck next round. */
+			np->rx_dma[entry] = pci_map_single(np->pci_dev,
+				skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
+		}
+		np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
+	}
+	if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
+		if (netif_msg_rx_err(np))
+			rtdm_printk(KERN_WARNING "%s: going OOM.\n", dev->name);
+		np->oom = 1;
+	}
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	/* 1) TX ring */
+	np->dirty_tx = np->cur_tx = 0;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = NULL;
+		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+			+sizeof(struct netdev_desc)
+			*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
+		np->tx_ring[i].cmd_status = 0;
+	}
+
+	/* 2) RX ring */
+	np->dirty_rx = 0;
+	np->cur_rx = RX_RING_SIZE;
+	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+	np->oom = 0;
+	np->rx_head_desc = &np->rx_ring[0];
+
+	/* Please be carefull before changing this loop - at least gcc-2.95.1
+	 * miscompiles it otherwise.
+	 */
+	/* Initialize all Rx descriptors. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+				+sizeof(struct netdev_desc)
+				*((i+1)%RX_RING_SIZE));
+		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+		np->rx_skbuff[i] = NULL;
+	}
+	refill_rx(dev);
+	dump_ring(dev);
+}
+
+static void drain_tx(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (np->tx_skbuff[i]) {
+			pci_unmap_single(np->pci_dev,
+				np->rx_dma[i], np->tx_skbuff[i]->len,
+				PCI_DMA_TODEVICE);
+			dev_kfree_rtskb(np->tx_skbuff[i]);
+			np->stats.tx_dropped++;
+		}
+		np->tx_skbuff[i] = NULL;
+	}
+}
+
+static void drain_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].cmd_status = 0;
+		np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+			pci_unmap_single(np->pci_dev,
+				np->rx_dma[i], np->rx_skbuff[i]->len,
+				PCI_DMA_FROMDEVICE);
+			dev_kfree_rtskb(np->rx_skbuff[i]);
+		}
+		np->rx_skbuff[i] = NULL;
+	}
+	drain_tx(dev);
+}
+
+static void free_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	pci_free_consistent(np->pci_dev,
+		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+		np->rx_ring, np->ring_dma);
+}
+
+static int start_tx(struct rtskb *skb, struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	unsigned entry;
+/*** RTnet ***/
+	rtdm_lockctx_t context;
+/*** RTnet ***/
+
+	/* Note: Ordering is important here, set the field with the
+	   "ownership" bit last, and only then increment cur_tx. */
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	np->tx_skbuff[entry] = skb;
+	np->tx_dma[entry] = pci_map_single(np->pci_dev,
+				skb->data,skb->len, PCI_DMA_TODEVICE);
+
+	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
+
+/*	spin_lock_irq(&np->lock);*/
+/*** RTnet ***/
+	rtdm_lock_get_irqsave(&np->lock, context);
+/*** RTnet ***/
+
+	if (!np->hands_off) {
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp)
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+				*skb->xmit_stamp);
+		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
+		/* StrongARM: Explicitly cache flush np->tx_ring and
+		 * skb->data,skb->len. */
+		wmb();
+		np->cur_tx++;
+		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+			netdev_tx_done(dev);
+			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
+				rtnetif_stop_queue(dev);
+		}
+		/* Wake the potentially-idle transmit channel. */
+		writel(TxOn, (void *)(dev->base_addr + ChipCmd));
+	} else {
+		dev_kfree_rtskb(skb); /*** RTnet ***/
+		np->stats.tx_dropped++;
+	}
+
+/*	spin_unlock_irq(&np->lock);*/
+/*** RTnet ***/
+	rtdm_lock_put_irqrestore(&np->lock, context);
+/*** RTnet ***/
+
+/*	dev->trans_start = jiffies;*/
+
+	if (netif_msg_tx_queued(np)) {
+		rtdm_printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+			dev->name, np->cur_tx, entry);
+	}
+	return 0;
+}
+
+static void netdev_tx_done(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+		int entry = np->dirty_tx % TX_RING_SIZE;
+		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+			break;
+		if (netif_msg_tx_done(np))
+			rtdm_printk(KERN_DEBUG
+				"%s: tx frame #%d finished, status %#08x.\n",
+					dev->name, np->dirty_tx,
+					le32_to_cpu(np->tx_ring[entry].cmd_status));
+		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
+			np->stats.tx_packets++;
+			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+		} else { /* Various Tx errors */
+			int tx_status =
+				le32_to_cpu(np->tx_ring[entry].cmd_status);
+			if (tx_status & (DescTxAbort|DescTxExcColl))
+				np->stats.tx_aborted_errors++;
+			if (tx_status & DescTxFIFO)
+				np->stats.tx_fifo_errors++;
+			if (tx_status & DescTxCarrier)
+				np->stats.tx_carrier_errors++;
+			if (tx_status & DescTxOOWCol)
+				np->stats.tx_window_errors++;
+			np->stats.tx_errors++;
+		}
+		pci_unmap_single(np->pci_dev,np->tx_dma[entry],
+					np->tx_skbuff[entry]->len,
+					PCI_DMA_TODEVICE);
+		/* Free the original skb. */
+		dev_kfree_rtskb(np->tx_skbuff[entry]); /*** RTnet ***/
+/*		dev_kfree_skb_irq(np->tx_skbuff[entry]);*/
+		np->tx_skbuff[entry] = NULL;
+	}
+	if (rtnetif_queue_stopped(dev)
+		&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+		/* The ring is no longer full, wake queue. */
+		rtnetif_wake_queue(dev);
+	}
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int intr_handler(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+	struct rtnet_device *dev =
+	    rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/
+	struct netdev_private *np = dev->priv;
+	unsigned int old_packet_cnt = np->stats.rx_packets; /*** RTnet ***/
+	long ioaddr = dev->base_addr;
+	int boguscnt = max_interrupt_work;
+	int ret = RTDM_IRQ_NONE;
+
+	if (np->hands_off)
+		return ret;
+	do {
+		/* Reading automatically acknowledges all int sources. */
+		u32 intr_status = readl((void *)(ioaddr + IntrStatus));
+
+		if (netif_msg_intr(np))
+			rtdm_printk(KERN_DEBUG
+				"%s: Interrupt, status %#08x, mask %#08x.\n",
+				dev->name, intr_status,
+				readl((void *)(ioaddr + IntrMask)));
+
+		if (intr_status == 0)
+			break;
+
+		ret = RTDM_IRQ_HANDLED;
+
+		if (intr_status &
+		   (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
+		    IntrRxErr | IntrRxOverrun)) {
+			netdev_rx(dev, &time_stamp);
+		}
+
+		if (intr_status &
+		   (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
+			rtdm_lock_get(&np->lock);
+			netdev_tx_done(dev);
+			rtdm_lock_put(&np->lock);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & IntrAbnormalSummary)
+			netdev_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			if (netif_msg_intr(np))
+				rtdm_printk(KERN_WARNING
+					"%s: Too much work at interrupt, "
+					"status=%#08x.\n",
+					dev->name, intr_status);
+			break;
+		}
+	} while (1);
+
+	if (netif_msg_intr(np))
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name);
+
+/*** RTnet ***/
+	if (old_packet_cnt != np->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+	return ret;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static void netdev_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp)
+{
+	struct netdev_private *np = dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+	/* If the driver owns the next entry it's a new packet. Send it up. */
+	while (desc_status < 0) { /* e.g. & DescOwn */
+		if (netif_msg_rx_status(np))
+			rtdm_printk(KERN_DEBUG
+				"  netdev_rx() entry %d status was %#08x.\n",
+				entry, desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
+			if (desc_status & DescMore) {
+				if (netif_msg_rx_err(np))
+					rtdm_printk(KERN_WARNING
+						"%s: Oversized(?) Ethernet "
+						"frame spanned multiple "
+						"buffers, entry %#08x "
+						"status %#08x.\n", dev->name,
+						np->cur_rx, desc_status);
+				np->stats.rx_length_errors++;
+			} else {
+				/* There was an error. */
+				np->stats.rx_errors++;
+				if (desc_status & (DescRxAbort|DescRxOver))
+					np->stats.rx_over_errors++;
+				if (desc_status & (DescRxLong|DescRxRunt))
+					np->stats.rx_length_errors++;
+				if (desc_status & (DescRxInvalid|DescRxAlign))
+					np->stats.rx_frame_errors++;
+				if (desc_status & DescRxCRC)
+					np->stats.rx_crc_errors++;
+			}
+		} else {
+			struct rtskb *skb;
+			/* Omit CRC size. */
+			int pkt_len = (desc_status & DescSizeMask) - 4;
+			/* Check if the packet is long enough to accept
+			 * without copying to a minimally-sized skbuff. */
+/*** RTnet ***/
+			{
+				skb = np->rx_skbuff[entry];
+				pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+					np->rx_skbuff[entry]->len,
+					PCI_DMA_FROMDEVICE);
+				rtskb_put(skb, pkt_len);
+				np->rx_skbuff[entry] = NULL;
+			}
+/*** RTnet ***/
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			/*dev->last_rx = jiffies;*/
+/*** RTnet ***/
+			np->stats.rx_packets++;
+			np->stats.rx_bytes += pkt_len;
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+	}
+	refill_rx(dev);
+
+	/* Restart Rx engine if stopped. */
+	if (np->oom)
+		;
+/*		mod_timer(&np->timer, jiffies + 1);*/
+	else
+		writel(RxOn, (void *)(dev->base_addr + ChipCmd));
+}
+
+static void netdev_error(struct rtnet_device *dev, int intr_status)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+
+	rtdm_lock_get(&np->lock);
+	if (intr_status & LinkChange) {
+		u16 adv = mdio_read(dev, 1, MII_ADVERTISE);
+		u16 lpa = mdio_read(dev, 1, MII_LPA);
+		if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE
+		 && netif_msg_link(np)) {
+			rtdm_printk(KERN_INFO
+				"%s: Autonegotiation advertising"
+				" %#04x  partner %#04x.\n", dev->name,
+				adv, lpa);
+		}
+
+		/* read MII int status to clear the flag */
+		readw((void *)(ioaddr + MIntrStatus));
+		check_link(dev);
+	}
+	if (intr_status & StatsMax) {
+		__get_stats(dev);
+	}
+	if (intr_status & IntrTxUnderrun) {
+		if ((np->tx_config & TxDrthMask) < 62)
+			np->tx_config += 2;
+		if (netif_msg_tx_err(np))
+			rtdm_printk(KERN_NOTICE
+				"%s: increased Tx threshold, txcfg %#08x.\n",
+				dev->name, np->tx_config);
+		writel(np->tx_config, (void *)(ioaddr + TxConfig));
+	}
+	if (intr_status & WOLPkt && netif_msg_wol(np)) {
+		int wol_status = readl((void *)(ioaddr + WOLCmd));
+		rtdm_printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
+			dev->name, wol_status);
+	}
+	if (intr_status & RxStatusFIFOOver) {
+		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
+			rtdm_printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
+				dev->name);
+		}
+		np->stats.rx_fifo_errors++;
+	}
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrPCIErr) {
+		rtdm_printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
+			intr_status & IntrPCIErr);
+		np->stats.tx_fifo_errors++;
+		np->stats.rx_fifo_errors++;
+	}
+	rtdm_lock_put(&np->lock);
+}
+
+static void __get_stats(struct rtnet_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+
+	/* The chip only need report frame silently dropped. */
+	np->stats.rx_crc_errors	+= readl((void *)(ioaddr + RxCRCErrs));
+	np->stats.rx_missed_errors += readl((void *)(ioaddr + RxMissed));
+}
+
+static struct net_device_stats *get_stats(struct rtnet_device *rtdev)
+{
+	struct netdev_private *np = rtdev->priv;
+	rtdm_lockctx_t context;
+
+	/* The chip only need report frame silently dropped. */
+	rtdm_lock_get_irqsave(&np->lock, context);
+	if (rtnetif_running(rtdev) && !np->hands_off)
+		__get_stats(rtdev);
+	rtdm_lock_put_irqrestore(&np->lock, context);
+
+	return &np->stats;
+}
+
+#define HASH_TABLE	0x200
+static void __set_rx_mode(struct rtnet_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	u8 mc_filter[64]; /* Multicast hash filter */
+	u32 rx_mode;
+
+	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		rtdm_printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+			dev->name);
+		rx_mode = RxFilterEnable | AcceptBroadcast
+			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
+	} else if (dev->flags & IFF_ALLMULTI) {
+		rx_mode = RxFilterEnable | AcceptBroadcast
+			| AcceptAllMulticast | AcceptMyPhys;
+	} else {
+		int i;
+
+		memset(mc_filter, 0, sizeof(mc_filter));
+		rx_mode = RxFilterEnable | AcceptBroadcast
+			| AcceptMulticast | AcceptMyPhys;
+		for (i = 0; i < 64; i += 2) {
+			writew(HASH_TABLE + i, (void *)(ioaddr + RxFilterAddr));
+			writew((mc_filter[i+1]<<8) + mc_filter[i],
+				(void *)(ioaddr + RxFilterData));
+		}
+	}
+	writel(rx_mode, (void *)(ioaddr + RxFilterAddr));
+	np->cur_rx_mode = rx_mode;
+}
+/*** RTnet
+static void set_rx_mode(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	spin_lock_irq(&np->lock);
+	if (!np->hands_off)
+		__set_rx_mode(dev);
+	spin_unlock_irq(&np->lock);
+}
+RTnet ***/
+/*** RTnet ***/
+/*** RTnet ***/
+
+static void enable_wol_mode(struct rtnet_device *dev, int enable_intr)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+
+	if (netif_msg_wol(np))
+		rtdm_printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
+			dev->name);
+
+	/* For WOL we must restart the rx process in silent mode.
+	 * Write NULL to the RxRingPtr. Only possible if
+	 * rx process is stopped
+	 */
+	writel(0, (void *)(ioaddr + RxRingPtr));
+
+	/* read WoL status to clear */
+	readl((void *)(ioaddr + WOLCmd));
+
+	/* PME on, clear status */
+	writel(np->SavedClkRun | PMEEnable | PMEStatus, (void *)(ioaddr + ClkRun));
+
+	/* and restart the rx process */
+	writel(RxOn, (void *)(ioaddr + ChipCmd));
+
+	if (enable_intr) {
+		/* enable the WOL interrupt.
+		 * Could be used to send a netlink message.
+		 */
+		writel(WOLPkt | LinkChange, (void *)(ioaddr + IntrMask));
+		writel(1, (void *)(ioaddr + IntrEnable));
+	}
+}
+
+static int netdev_close(struct rtnet_device *dev)
+{
+	int i;
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+
+	if (netif_msg_ifdown(np))
+		rtdm_printk(KERN_DEBUG
+			"%s: Shutting down ethercard, status was %#04x.\n",
+			dev->name, (int)readl((void *)(ioaddr + ChipCmd)));
+	if (netif_msg_pktdata(np))
+		rtdm_printk(KERN_DEBUG
+			"%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
+			dev->name, np->cur_tx, np->dirty_tx,
+			np->cur_rx, np->dirty_rx);
+
+	/*
+	 * FIXME: what if someone tries to close a device
+	 * that is suspended?
+	 * Should we reenable the nic to switch to
+	 * the final WOL settings?
+	 */
+/*** RTnet ***
+	del_timer_sync(&np->timer);
+ *** RTnet ***/
+/*	disable_irq(dev->irq);*/
+	rtdm_irq_disable(&np->irq_handle);
+	rtdm_lock_get(&np->lock);
+	/* Disable interrupts, and flush posted writes */
+	writel(0, (void *)(ioaddr + IntrEnable));
+	readl((void *)(ioaddr + IntrEnable));
+	np->hands_off = 1;
+	rtdm_lock_put(&np->lock);
+
+/*** RTnet ***/
+	if ( (i=rtdm_irq_free(&np->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(dev);
+/*** RTnet ***/
+
+/*	enable_irq(dev->irq);*/
+
+/*	free_irq(dev->irq, dev);*/
+
+	/* Interrupt disabled, interrupt handler released,
+	 * queue stopped, timer deleted, rtnl_lock held
+	 * All async codepaths that access the driver are disabled.
+	 */
+	rtdm_lock_get(&np->lock);
+	np->hands_off = 0;
+	readl((void *)(ioaddr + IntrMask));
+	readw((void *)(ioaddr + MIntrStatus));
+
+	/* Freeze Stats */
+	writel(StatsFreeze, (void *)(ioaddr + StatsCtrl));
+
+	/* Stop the chip's Tx and Rx processes. */
+	natsemi_stop_rxtx(dev);
+
+	__get_stats(dev);
+	rtdm_lock_put(&np->lock);
+
+	/* clear the carrier last - an interrupt could reenable it otherwise */
+	rtnetif_carrier_off(dev);
+	rtnetif_stop_queue(dev);
+
+	dump_ring(dev);
+	drain_ring(dev);
+	free_ring(dev);
+
+	{
+		u32 wol = readl((void *)(ioaddr + WOLCmd)) & WakeOptsSummary;
+		if (wol) {
+			/* restart the NIC in WOL mode.
+			 * The nic must be stopped for this.
+			 */
+			enable_wol_mode(dev, 0);
+		} else {
+			/* Restore PME enable bit unmolested */
+			writel(np->SavedClkRun, (void *)(ioaddr + ClkRun));
+		}
+	}
+
+	return 0;
+}
+
+
+static void natsemi_remove1 (struct pci_dev *pdev)
+{
+
+ /*** RTnet ***/
+	struct rtnet_device *dev = pci_get_drvdata(pdev);
+
+	rt_unregister_rtnetdev(dev);
+	rt_rtdev_disconnect(dev);
+/*** RTnet ***/
+
+	pci_release_regions (pdev);
+	iounmap ((char *) dev->base_addr);
+	rtdev_free(dev); /*** RTnet ***/
+	pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * The ns83815 chip doesn't have explicit RxStop bits.
+ * Kicking the Rx or Tx process for a new packet reenables the Rx process
+ * of the nic, thus this function must be very careful:
+ *
+ * suspend/resume synchronization:
+ * entry points:
+ *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
+ *   start_tx, tx_timeout
+ *
+ * No function accesses the hardware without checking np->hands_off.
+ *	the check occurs under spin_lock_irq(&np->lock);
+ * exceptions:
+ *	* netdev_ioctl: noncritical access.
+ *	* netdev_open: cannot happen due to the device_detach
+ *	* netdev_close: doesn't hurt.
+ *	* netdev_timer: timer stopped by natsemi_suspend.
+ *	* intr_handler: doesn't acquire the spinlock. suspend calls
+ *		disable_irq() to enforce synchronization.
+ *
+ * Interrupts must be disabled, otherwise hands_off can cause irq storms.
+ */
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver natsemi_driver = {
+	.name		= DRV_NAME,
+	.id_table	= natsemi_pci_tbl,
+	.probe		= natsemi_probe1,
+	.remove		= natsemi_remove1,
+/*#ifdef CONFIG_PM*/
+};
+
+static int __init natsemi_init_mod (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+	rtdm_printk(version);
+#endif
+
+	return pci_register_driver (&natsemi_driver);
+}
+
+static void __exit natsemi_exit_mod (void)
+{
+	pci_unregister_driver (&natsemi_driver);
+}
+
+module_init(natsemi_init_mod);
+module_exit(natsemi_exit_mod);
+++ linux-patched/drivers/xenomai/net/drivers/e1000/e1000_hw.c	2022-03-21 12:58:29.505887830 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+
+#include "e1000_hw.h"
+
+static int32_t e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static int32_t e1000_setup_copper_link(struct e1000_hw *hw);
+static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data,
+                                     uint16_t count);
+static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw);
+static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset,
+                                      uint16_t words, uint16_t *data);
+static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw,
+                                            uint16_t offset, uint16_t words,
+                                            uint16_t *data);
+static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data,
+                                    uint16_t count);
+static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
+                                      uint16_t phy_data);
+static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr,
+                                     uint16_t *phy_data);
+static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count);
+static int32_t e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static int32_t e1000_set_vco_speed(struct e1000_hw *hw);
+static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static int32_t e1000_set_phy_mode(struct e1000_hw *hw);
+static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer);
+static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length);
+static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
+                                               uint16_t duplex);
+static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+
+/* IGP cable length table */
+static const
+uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+static const
+uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_set_phy_type(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_set_phy_type");
+
+    if (hw->mac_type == e1000_undefined)
+        return -E1000_ERR_PHY_TYPE;
+
+    switch (hw->phy_id) {
+    case M88E1000_E_PHY_ID:
+    case M88E1000_I_PHY_ID:
+    case M88E1011_I_PHY_ID:
+    case M88E1111_I_PHY_ID:
+        hw->phy_type = e1000_phy_m88;
+        break;
+    case IGP01E1000_I_PHY_ID:
+        if (hw->mac_type == e1000_82541 ||
+            hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            hw->phy_type = e1000_phy_igp;
+            break;
+        }
+        fallthrough;
+    case IGP03E1000_E_PHY_ID:
+        hw->phy_type = e1000_phy_igp_3;
+        break;
+    case IFE_E_PHY_ID:
+    case IFE_PLUS_E_PHY_ID:
+    case IFE_C_E_PHY_ID:
+        hw->phy_type = e1000_phy_ife;
+        break;
+    case GG82563_E_PHY_ID:
+        if (hw->mac_type == e1000_80003es2lan) {
+            hw->phy_type = e1000_phy_gg82563;
+            break;
+        }
+        fallthrough;
+    default:
+        /* Should never have loaded on this device */
+        hw->phy_type = e1000_phy_undefined;
+        return -E1000_ERR_PHY_TYPE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_phy_init_script(struct e1000_hw *hw)
+{
+    uint32_t ret_val;
+    uint16_t phy_saved_data;
+
+    DEBUGFUNC("e1000_phy_init_script");
+
+    if (hw->phy_init_script) {
+        msec_delay(20);
+
+        /* Save off the current value of register 0x2F5B to be restored at
+         * the end of this routine. */
+        ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+        /* Disabled the PHY transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+        msec_delay(20);
+
+        e1000_write_phy_reg(hw,0x0000,0x0140);
+
+        msec_delay(5);
+
+        switch (hw->mac_type) {
+        case e1000_82541:
+        case e1000_82547:
+            e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+            e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+            e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+            e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+            e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+            e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+            e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+            e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+            e1000_write_phy_reg(hw, 0x2010, 0x0008);
+            break;
+
+        case e1000_82541_rev_2:
+        case e1000_82547_rev_2:
+            e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+            break;
+        default:
+            break;
+        }
+
+        e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+        msec_delay(20);
+
+        /* Now enable the transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+        if (hw->mac_type == e1000_82547) {
+            uint16_t fused, fine, coarse;
+
+            /* Move to analog registers page */
+            e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+            if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+                e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+                fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+                coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+                if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+                    coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+                } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+                fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+                        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+                        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+                                    IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+            }
+        }
+    }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_set_mac_type(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_set_mac_type");
+
+    switch (hw->device_id) {
+    case E1000_DEV_ID_82542:
+        switch (hw->revision_id) {
+        case E1000_82542_2_0_REV_ID:
+            hw->mac_type = e1000_82542_rev2_0;
+            break;
+        case E1000_82542_2_1_REV_ID:
+            hw->mac_type = e1000_82542_rev2_1;
+            break;
+        default:
+            /* Invalid 82542 revision ID */
+            return -E1000_ERR_MAC_TYPE;
+        }
+        break;
+    case E1000_DEV_ID_82543GC_FIBER:
+    case E1000_DEV_ID_82543GC_COPPER:
+        hw->mac_type = e1000_82543;
+        break;
+    case E1000_DEV_ID_82544EI_COPPER:
+    case E1000_DEV_ID_82544EI_FIBER:
+    case E1000_DEV_ID_82544GC_COPPER:
+    case E1000_DEV_ID_82544GC_LOM:
+        hw->mac_type = e1000_82544;
+        break;
+    case E1000_DEV_ID_82540EM:
+    case E1000_DEV_ID_82540EM_LOM:
+    case E1000_DEV_ID_82540EP:
+    case E1000_DEV_ID_82540EP_LOM:
+    case E1000_DEV_ID_82540EP_LP:
+        hw->mac_type = e1000_82540;
+        break;
+    case E1000_DEV_ID_82545EM_COPPER:
+    case E1000_DEV_ID_82545EM_FIBER:
+        hw->mac_type = e1000_82545;
+        break;
+    case E1000_DEV_ID_82545GM_COPPER:
+    case E1000_DEV_ID_82545GM_FIBER:
+    case E1000_DEV_ID_82545GM_SERDES:
+        hw->mac_type = e1000_82545_rev_3;
+        break;
+    case E1000_DEV_ID_82546EB_COPPER:
+    case E1000_DEV_ID_82546EB_FIBER:
+    case E1000_DEV_ID_82546EB_QUAD_COPPER:
+        hw->mac_type = e1000_82546;
+        break;
+    case E1000_DEV_ID_82546GB_COPPER:
+    case E1000_DEV_ID_82546GB_FIBER:
+    case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82546GB_PCIE:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+        hw->mac_type = e1000_82546_rev_3;
+        break;
+    case E1000_DEV_ID_82541EI:
+    case E1000_DEV_ID_82541EI_MOBILE:
+    case E1000_DEV_ID_82541ER_LOM:
+        hw->mac_type = e1000_82541;
+        break;
+    case E1000_DEV_ID_82541ER:
+    case E1000_DEV_ID_82541GI:
+    case E1000_DEV_ID_82541GI_LF:
+    case E1000_DEV_ID_82541GI_MOBILE:
+        hw->mac_type = e1000_82541_rev_2;
+        break;
+    case E1000_DEV_ID_82547EI:
+    case E1000_DEV_ID_82547EI_MOBILE:
+        hw->mac_type = e1000_82547;
+        break;
+    case E1000_DEV_ID_82547GI:
+        hw->mac_type = e1000_82547_rev_2;
+        break;
+    case E1000_DEV_ID_82571EB_COPPER:
+    case E1000_DEV_ID_82571EB_FIBER:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82571EB_QUAD_COPPER:
+    case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+            hw->mac_type = e1000_82571;
+        break;
+    case E1000_DEV_ID_82572EI_COPPER:
+    case E1000_DEV_ID_82572EI_FIBER:
+    case E1000_DEV_ID_82572EI_SERDES:
+    case E1000_DEV_ID_82572EI:
+        hw->mac_type = e1000_82572;
+        break;
+    case E1000_DEV_ID_82573E:
+    case E1000_DEV_ID_82573E_IAMT:
+    case E1000_DEV_ID_82573L:
+        hw->mac_type = e1000_82573;
+        break;
+    case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+    case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+        hw->mac_type = e1000_80003es2lan;
+        break;
+    case E1000_DEV_ID_ICH8_IGP_M_AMT:
+    case E1000_DEV_ID_ICH8_IGP_AMT:
+    case E1000_DEV_ID_ICH8_IGP_C:
+    case E1000_DEV_ID_ICH8_IFE:
+    case E1000_DEV_ID_ICH8_IFE_GT:
+    case E1000_DEV_ID_ICH8_IFE_G:
+    case E1000_DEV_ID_ICH8_IGP_M:
+        hw->mac_type = e1000_ich8lan;
+        break;
+    default:
+        /* Should never have loaded on this device */
+        return -E1000_ERR_MAC_TYPE;
+    }
+
+    switch (hw->mac_type) {
+    case e1000_ich8lan:
+        hw->swfwhw_semaphore_present = TRUE;
+        hw->asf_firmware_present = TRUE;
+        break;
+    case e1000_80003es2lan:
+        hw->swfw_sync_present = TRUE;
+        fallthrough;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+        hw->eeprom_semaphore_present = TRUE;
+        fallthrough;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        hw->asf_firmware_present = TRUE;
+        break;
+    default:
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void
+e1000_set_media_type(struct e1000_hw *hw)
+{
+    uint32_t status;
+
+    DEBUGFUNC("e1000_set_media_type");
+
+    if (hw->mac_type != e1000_82543) {
+        /* tbi_compatibility is only valid on 82543 */
+        hw->tbi_compatibility_en = FALSE;
+    }
+
+    switch (hw->device_id) {
+    case E1000_DEV_ID_82545GM_SERDES:
+    case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82572EI_SERDES:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+        hw->media_type = e1000_media_type_internal_serdes;
+        break;
+    default:
+        switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+            hw->media_type = e1000_media_type_fiber;
+            break;
+        case e1000_ich8lan:
+        case e1000_82573:
+            /* The STATUS_TBIMODE bit is reserved or reused for the this
+             * device.
+             */
+            hw->media_type = e1000_media_type_copper;
+            break;
+        default:
+            status = E1000_READ_REG(hw, STATUS);
+            if (status & E1000_STATUS_TBIMODE) {
+                hw->media_type = e1000_media_type_fiber;
+                /* tbi_compatibility not valid on fiber */
+                hw->tbi_compatibility_en = FALSE;
+            } else {
+                hw->media_type = e1000_media_type_copper;
+            }
+            break;
+        }
+    }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_reset_hw(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint32_t ctrl_ext;
+    uint32_t icr;
+    uint32_t manc;
+    uint32_t led_ctrl;
+    uint32_t timeout;
+    uint32_t extcnf_ctrl;
+    int32_t ret_val;
+
+    DEBUGFUNC("e1000_reset_hw");
+
+    /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+    }
+
+    if (hw->bus_type == e1000_bus_type_pci_express) {
+        /* Prevent the PCI-E bus from sticking if there is no TLP connection
+         * on the last TLP read/write transaction when MAC is reset.
+         */
+        if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+            DEBUGOUT("PCI-E Master disable polling has failed.\n");
+        }
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    E1000_WRITE_REG(hw, IMC, 0xffffffff);
+
+    /* Disable the Transmit and Receive units.  Then delay to allow
+     * any pending transactions to complete before we hit the MAC with
+     * the global reset.
+     */
+    E1000_WRITE_REG(hw, RCTL, 0);
+    E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP);
+    E1000_WRITE_FLUSH(hw);
+
+    /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+    hw->tbi_compatibility_on = FALSE;
+
+    /* Delay to allow any outstanding PCI transactions to complete before
+     * resetting the device
+     */
+    msec_delay(10);
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Must reset the PHY before resetting the MAC */
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
+        msec_delay(5);
+    }
+
+    /* Must acquire the MDIO ownership before MAC reset.
+     * Ownership defaults to firmware after a reset. */
+    if (hw->mac_type == e1000_82573) {
+        timeout = 10;
+
+        extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+        extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+        do {
+            E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+            extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+                break;
+            else
+                extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+            msec_delay(2);
+            timeout--;
+        } while (timeout);
+    }
+
+    /* Workaround for ICH8 bit corruption issue in FIFO memory */
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Set Tx and Rx buffer allocation to 8k apiece. */
+        E1000_WRITE_REG(hw, PBA, E1000_PBA_8K);
+        /* Set Packet Buffer Size to 16k. */
+        E1000_WRITE_REG(hw, PBS, E1000_PBS_16K);
+    }
+
+    /* Issue a global reset to the MAC.  This will reset the chip's
+     * transmit, receive, DMA, and link units.  It will not effect
+     * the current PCI configuration.  The global reset bit is self-
+     * clearing, and should clear within a microsecond.
+     */
+    DEBUGOUT("Issuing a global reset to MAC\n");
+
+    switch (hw->mac_type) {
+        case e1000_82544:
+        case e1000_82540:
+        case e1000_82545:
+        case e1000_82546:
+        case e1000_82541:
+        case e1000_82541_rev_2:
+            /* These controllers can't ack the 64-bit write when issuing the
+             * reset, so use IO-mapping as a workaround to issue the reset */
+            E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_82545_rev_3:
+        case e1000_82546_rev_3:
+            /* Reset is performed on a shadow of the control register */
+            E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_ich8lan:
+            if (!hw->phy_reset_disable &&
+                e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+                /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+                 * at the same time to make sure the interface between
+                 * MAC and the external PHY is reset.
+                 */
+                ctrl |= E1000_CTRL_PHY_RST;
+            }
+
+            e1000_get_software_flag(hw);
+            E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            msec_delay(5);
+            break;
+        default:
+            E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+    }
+
+    /* After MAC reset, force reload of EEPROM to restore power-on settings to
+     * device.  Later controllers reload the EEPROM automatically, so just wait
+     * for reload to complete.
+     */
+    switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+        case e1000_82543:
+        case e1000_82544:
+            /* Wait for reset to complete */
+            usec_delay(10);
+            ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+            ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+            E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+            E1000_WRITE_FLUSH(hw);
+            /* Wait for EEPROM reload */
+            msec_delay(2);
+            break;
+        case e1000_82541:
+        case e1000_82541_rev_2:
+        case e1000_82547:
+        case e1000_82547_rev_2:
+            /* Wait for EEPROM reload */
+            msec_delay(20);
+            break;
+        case e1000_82573:
+            if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+                usec_delay(10);
+                ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+                ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+                E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+                E1000_WRITE_FLUSH(hw);
+            }
+            fallthrough;
+        case e1000_82571:
+        case e1000_82572:
+        case e1000_ich8lan:
+        case e1000_80003es2lan:
+            ret_val = e1000_get_auto_rd_done(hw);
+            if (ret_val)
+                /* We don't want to continue accessing MAC registers. */
+                return ret_val;
+            break;
+        default:
+            /* Wait for EEPROM reload (it happens automatically) */
+            msec_delay(5);
+            break;
+    }
+
+    /* Disable HW ARPs */
+    manc = E1000_READ_REG(hw, MANC);
+    manc &= ~(E1000_MANC_ARP_EN | E1000_MANC_ARP_RES_EN);
+    E1000_WRITE_REG(hw, MANC, manc);
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        e1000_phy_init_script(hw);
+
+        /* Configure activity LED after PHY reset */
+        led_ctrl = E1000_READ_REG(hw, LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    E1000_WRITE_REG(hw, IMC, 0xffffffff);
+
+    /* Clear any pending interrupt events. */
+    icr = E1000_READ_REG(hw, ICR);
+
+    /* If MWI was previously enabled, reenable it. */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        uint32_t kab = E1000_READ_REG(hw, KABGTXD);
+        kab |= E1000_KABGTXD_BGSQLBIAS;
+        E1000_WRITE_REG(hw, KABGTXD, kab);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+int32_t
+e1000_init_hw(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint32_t i;
+    int32_t ret_val;
+    uint16_t pcix_cmd_word;
+    uint16_t pcix_stat_hi_word;
+    uint16_t cmd_mmrbc;
+    uint16_t stat_mmrbc;
+    uint32_t mta_size;
+    uint32_t reg_data;
+    uint32_t ctrl_ext;
+
+    DEBUGFUNC("e1000_init_hw");
+
+    /* Initialize Identification LED */
+    ret_val = e1000_id_led_init(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Initializing Identification LED\n");
+        return ret_val;
+    }
+
+    /* Set the media type and TBI compatibility */
+    e1000_set_media_type(hw);
+
+    /* Disabling VLAN filtering. */
+    DEBUGOUT("Initializing the IEEE VLAN\n");
+    /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+    if (hw->mac_type != e1000_ich8lan) {
+        if (hw->mac_type < e1000_82545_rev_3)
+            E1000_WRITE_REG(hw, VET, 0);
+        e1000_clear_vfta(hw);
+    }
+
+    /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+        E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
+        E1000_WRITE_FLUSH(hw);
+        msec_delay(5);
+    }
+
+    /* Setup the receive address. This involves initializing all of the Receive
+     * Address Registers (RARs 0 - 15).
+     */
+    e1000_init_rx_addrs(hw);
+
+    /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        E1000_WRITE_REG(hw, RCTL, 0);
+        E1000_WRITE_FLUSH(hw);
+        msec_delay(1);
+        if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    /* Zero out the Multicast HASH table */
+    DEBUGOUT("Zeroing the MTA\n");
+    mta_size = E1000_MC_TBL_SIZE;
+    if (hw->mac_type == e1000_ich8lan)
+        mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+    for (i = 0; i < mta_size; i++) {
+        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+        /* use write flush to prevent Memory Write Block (MWB) from
+         * occuring when accessing our register space */
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    /* Set the PCI priority bit correctly in the CTRL register.  This
+     * determines if the adapter gives priority to receives, or if it
+     * gives equal priority to transmits and receives.  Valid only on
+     * 82542 and 82543 silicon.
+     */
+    if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+        ctrl = E1000_READ_REG(hw, CTRL);
+        E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
+    }
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+        if (hw->bus_type == e1000_bus_type_pcix) {
+            e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word);
+            e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI,
+                &pcix_stat_hi_word);
+            cmd_mmrbc = (pcix_cmd_word & PCIX_COMMAND_MMRBC_MASK) >>
+                PCIX_COMMAND_MMRBC_SHIFT;
+            stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+                PCIX_STATUS_HI_MMRBC_SHIFT;
+            if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+                stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+            if (cmd_mmrbc > stat_mmrbc) {
+                pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK;
+                pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+                e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER,
+                    &pcix_cmd_word);
+            }
+        }
+        break;
+    }
+
+    /* More time needed for PHY to initialize */
+    if (hw->mac_type == e1000_ich8lan)
+        msec_delay(15);
+
+    /* Call a subroutine to configure the link and setup flow control. */
+    ret_val = e1000_setup_link(hw);
+
+    /* Set the transmit descriptor write-back policy */
+    if (hw->mac_type > e1000_82544) {
+        ctrl = E1000_READ_REG(hw, TXDCTL);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        switch (hw->mac_type) {
+        default:
+            break;
+        case e1000_82571:
+        case e1000_82572:
+        case e1000_82573:
+        case e1000_ich8lan:
+        case e1000_80003es2lan:
+            ctrl |= E1000_TXDCTL_COUNT_DESC;
+            break;
+        }
+        E1000_WRITE_REG(hw, TXDCTL, ctrl);
+    }
+
+    if (hw->mac_type == e1000_82573) {
+        e1000_enable_tx_pkt_filtering(hw);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_80003es2lan:
+        /* Enable retransmit on late collisions */
+        reg_data = E1000_READ_REG(hw, TCTL);
+        reg_data |= E1000_TCTL_RTLC;
+        E1000_WRITE_REG(hw, TCTL, reg_data);
+
+        /* Configure Gigabit Carry Extend Padding */
+        reg_data = E1000_READ_REG(hw, TCTL_EXT);
+        reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
+        E1000_WRITE_REG(hw, TCTL_EXT, reg_data);
+
+        /* Configure Transmit Inter-Packet Gap */
+        reg_data = E1000_READ_REG(hw, TIPG);
+        reg_data &= ~E1000_TIPG_IPGT_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+        E1000_WRITE_REG(hw, TIPG, reg_data);
+
+        reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
+        reg_data &= ~0x00100000;
+        E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
+        fallthrough;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_ich8lan:
+        ctrl = E1000_READ_REG(hw, TXDCTL1);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        if (hw->mac_type >= e1000_82571)
+            ctrl |= E1000_TXDCTL_COUNT_DESC;
+        E1000_WRITE_REG(hw, TXDCTL1, ctrl);
+        break;
+    }
+
+
+
+    if (hw->mac_type == e1000_82573) {
+        uint32_t gcr = E1000_READ_REG(hw, GCR);
+        gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+        E1000_WRITE_REG(hw, GCR, gcr);
+    }
+
+    /* Clear all of the statistics registers (clear on read).  It is
+     * important that we do this after we have tried to establish link
+     * because the symbol error count will increment wildly if there
+     * is no link.
+     */
+    e1000_clear_hw_cntrs(hw);
+
+    /* ICH8/Nahum No-snoop bits are opposite polarity.
+     * Set to snoop by default after reset. */
+    if (hw->mac_type == e1000_ich8lan)
+        e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
+    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        /* Relaxed ordering must be disabled to avoid a parity
+         * error crash in a PCI slot. */
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
+    return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static int32_t
+e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+    uint16_t eeprom_data;
+    int32_t  ret_val;
+
+    DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+    if (hw->media_type != e1000_media_type_internal_serdes)
+        return E1000_SUCCESS;
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+    if (ret_val) {
+        return ret_val;
+    }
+
+    if (eeprom_data != EEPROM_RESERVED_WORD) {
+        /* Adjust SERDES output amplitude only. */
+        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+int32_t
+e1000_setup_link(struct e1000_hw *hw)
+{
+    uint32_t ctrl_ext;
+    int32_t ret_val;
+    uint16_t eeprom_data;
+
+    DEBUGFUNC("e1000_setup_link");
+
+    /* In the case of the phy reset being blocked, we already have a link.
+     * We do not have to set it up again. */
+    if (e1000_check_phy_reset_block(hw))
+        return E1000_SUCCESS;
+
+    /* Read and store word 0x0F of the EEPROM. This word contains bits
+     * that determine the hardware's default PAUSE (flow control) mode,
+     * a bit that determines whether the HW defaults to enabling or
+     * disabling auto-negotiation, and the direction of the
+     * SW defined pins. If there is no SW over-ride of the flow
+     * control setting, then the variable hw->fc will
+     * be initialized based on a value in the EEPROM.
+     */
+    if (hw->fc == e1000_fc_default) {
+        switch (hw->mac_type) {
+        case e1000_ich8lan:
+        case e1000_82573:
+            hw->fc = e1000_fc_full;
+            break;
+        default:
+            ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                        1, &eeprom_data);
+            if (ret_val) {
+                DEBUGOUT("EEPROM Read Error\n");
+                return -E1000_ERR_EEPROM;
+            }
+            if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+                hw->fc = e1000_fc_none;
+            else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+                    EEPROM_WORD0F_ASM_DIR)
+                hw->fc = e1000_fc_tx_pause;
+            else
+                hw->fc = e1000_fc_full;
+            break;
+        }
+    }
+
+    /* We want to save off the original Flow Control configuration just
+     * in case we get disconnected and then reconnected into a different
+     * hub or switch with different Flow Control capabilities.
+     */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        hw->fc &= (~e1000_fc_tx_pause);
+
+    if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+        hw->fc &= (~e1000_fc_rx_pause);
+
+    hw->original_fc = hw->fc;
+
+    DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+    /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+     * polarity value for the SW controlled pins, and setup the
+     * Extended Device Control reg with that info.
+     * This is needed because one of the SW controlled pins is used for
+     * signal detection.  So this should be done before e1000_setup_pcs_link()
+     * or e1000_phy_setup() is called.
+     */
+    if (hw->mac_type == e1000_82543) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                    1, &eeprom_data);
+        if (ret_val) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+                    SWDPIO__EXT_SHIFT);
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
+    /* Call the necessary subroutine to configure the link. */
+    ret_val = (hw->media_type == e1000_media_type_copper) ?
+              e1000_setup_copper_link(hw) :
+              e1000_setup_fiber_serdes_link(hw);
+
+    /* Initialize the flow control address, type, and PAUSE timer
+     * registers to their default values.  This is done even if flow
+     * control is disabled, because it does not hurt anything to
+     * initialize these registers.
+     */
+    DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+    /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+    if (hw->mac_type != e1000_ich8lan) {
+        E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+        E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+        E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
+    }
+
+    E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
+
+    /* Set the flow control receive threshold registers.  Normally,
+     * these registers will be set to a default threshold that may be
+     * adjusted later by the driver's runtime code.  However, if the
+     * ability to transmit pause frames in not enabled, then these
+     * registers will be set to 0.
+     */
+    if (!(hw->fc & e1000_fc_tx_pause)) {
+        E1000_WRITE_REG(hw, FCRTL, 0);
+        E1000_WRITE_REG(hw, FCRTH, 0);
+    } else {
+        /* We need to set up the Receive Threshold high and low water marks
+         * as well as (optionally) enabling the transmission of XON frames.
+         */
+        if (hw->fc_send_xon) {
+            E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+            E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+        } else {
+            E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water);
+            E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+        }
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static int32_t
+e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t txcw = 0;
+    uint32_t i;
+    uint32_t signal = 0;
+    int32_t ret_val;
+
+    DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+    /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+     * until explicitly turned off or a power cycle is performed.  A read to
+     * the register does not indicate its status.  Therefore, we ensure
+     * loopback mode is disabled during initialization.
+     */
+    if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+        E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
+    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     * If we're on serdes media, adjust the output amplitude to value set in
+     * the EEPROM.
+     */
+    ctrl = E1000_READ_REG(hw, CTRL);
+    if (hw->media_type == e1000_media_type_fiber)
+        signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+    ret_val = e1000_adjust_serdes_amplitude(hw);
+    if (ret_val)
+        return ret_val;
+
+    /* Take the link out of reset */
+    ctrl &= ~(E1000_CTRL_LRST);
+
+    /* Adjust VCO speed to improve BER performance */
+    ret_val = e1000_set_vco_speed(hw);
+    if (ret_val)
+        return ret_val;
+
+    e1000_config_collision_dist(hw);
+
+    /* Check for a software override of the flow control settings, and setup
+     * the device accordingly.  If auto-negotiation is enabled, then software
+     * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+     * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
+     * auto-negotiation is disabled, then software will have to manually
+     * configure the two flow control enable bits in the CTRL register.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames, but
+     *          not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames but we do
+     *          not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     */
+    switch (hw->fc) {
+    case e1000_fc_none:
+        /* Flow control is completely disabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+        break;
+    case e1000_fc_rx_pause:
+        /* RX Flow control is enabled and TX Flow control is disabled by a
+         * software over-ride. Since there really isn't a way to advertise
+         * that we are capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE. Later, we will
+         *  disable the adapter's ability to send PAUSE frames.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    case e1000_fc_tx_pause:
+        /* TX Flow control is enabled, and RX Flow control is disabled, by a
+         * software over-ride.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+        break;
+    case e1000_fc_full:
+        /* Flow control (both RX and TX) is enabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+        break;
+    }
+
+    /* Since auto-negotiation is enabled, take the link out of reset (the link
+     * will be in reset, because we previously reset the chip). This will
+     * restart auto-negotiation.  If auto-neogtiation is successful then the
+     * link-up status bit will be set and the flow control enable bits (RFCE
+     * and TFCE) will be set according to their negotiated value.
+     */
+    DEBUGOUT("Auto-negotiation enabled\n");
+
+    E1000_WRITE_REG(hw, TXCW, txcw);
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    E1000_WRITE_FLUSH(hw);
+
+    hw->txcw = txcw;
+    msec_delay(1);
+
+    /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+     * indication in the Device Status Register.  Time-out if a link isn't
+     * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+     * less than 500 milliseconds even if the other end is doing it in SW).
+     * For internal serdes, we just assume a signal is present, then poll.
+     */
+    if (hw->media_type == e1000_media_type_internal_serdes ||
+       (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+        DEBUGOUT("Looking for Link\n");
+        for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+            msec_delay(10);
+            status = E1000_READ_REG(hw, STATUS);
+            if (status & E1000_STATUS_LU) break;
+        }
+        if (i == (LINK_UP_TIMEOUT / 10)) {
+            DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+            hw->autoneg_failed = 1;
+            /* AutoNeg failed to achieve a link, so we'll call
+             * e1000_check_for_link. This routine will force the link up if
+             * we detect a signal. This will allow us to communicate with
+             * non-autonegotiating link partners.
+             */
+            ret_val = e1000_check_for_link(hw);
+            if (ret_val) {
+                DEBUGOUT("Error while checking for link\n");
+                return ret_val;
+            }
+            hw->autoneg_failed = 0;
+        } else {
+            hw->autoneg_failed = 0;
+            DEBUGOUT("Valid Link Found\n");
+        }
+    } else {
+        DEBUGOUT("No Signal Detected\n");
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Make sure we have a valid PHY and change PHY mode before link setup.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_preconfig");
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    /* With 82543, we need to force speed and duplex on the MAC equal to what
+     * the PHY speed and duplex configuration is. In addition, we need to
+     * perform a hardware reset on the PHY to take it out of reset.
+     */
+    if (hw->mac_type > e1000_82543) {
+        ctrl |= E1000_CTRL_SLU;
+        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+    } else {
+        ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Make sure we have a valid PHY */
+    ret_val = e1000_detect_gig_phy(hw);
+    if (ret_val) {
+        DEBUGOUT("Error, did not detect valid phy.\n");
+        return ret_val;
+    }
+    DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+    /* Set PHY to class A mode (if necessary) */
+    ret_val = e1000_set_phy_mode(hw);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82545_rev_3) ||
+       (hw->mac_type == e1000_82546_rev_3)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        phy_data |= 0x00000008;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    }
+
+    if (hw->mac_type <= e1000_82543 ||
+        hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+        hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+        hw->phy_reset_disable = FALSE;
+
+   return E1000_SUCCESS;
+}
+
+
+/********************************************************************
+* Copper link setup for e1000_phy_igp series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+    uint32_t led_ctrl;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_igp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+    /* Wait 15ms for MAC to configure PHY from eeprom settings */
+    msec_delay(15);
+    if (hw->mac_type != e1000_ich8lan) {
+    /* Configure activity LED after PHY reset */
+    led_ctrl = E1000_READ_REG(hw, LEDCTL);
+    led_ctrl &= IGP_ACTIVITY_LED_MASK;
+    led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+    E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+    }
+
+    /* disable lplu d3 during driver init */
+    ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+    if (ret_val) {
+        DEBUGOUT("Error Disabling LPLU D3\n");
+        return ret_val;
+    }
+
+    /* disable lplu d0 during driver init */
+    ret_val = e1000_set_d0_lplu_state(hw, FALSE);
+    if (ret_val) {
+        DEBUGOUT("Error Disabling LPLU D0\n");
+        return ret_val;
+    }
+    /* Configure mdi-mdix settings */
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        hw->dsp_config_state = e1000_dsp_config_disabled;
+        /* Force MDI for earlier revs of the IGP PHY */
+        phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
+        hw->mdix = 1;
+
+    } else {
+        hw->dsp_config_state = e1000_dsp_config_enabled;
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 2:
+            phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+            break;
+        }
+    }
+    ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* set auto-master slave resolution settings */
+    if (hw->autoneg) {
+        e1000_ms_type phy_ms_setting = hw->master_slave;
+
+        if (hw->ffe_config_state == e1000_ffe_config_active)
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+
+        if (hw->dsp_config_state == e1000_dsp_config_activated)
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+
+        /* when autonegotiation advertisment is only 1000Mbps then we
+          * should disable SmartSpeed and enable Auto MasterSlave
+          * resolution as hardware default. */
+        if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+            /* Disable SmartSpeed */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+            /* Set auto Master/Slave resolution process */
+            ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* load defaults for future use */
+        hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+                                        ((phy_data & CR_1000T_MS_VALUE) ?
+                                         e1000_ms_force_master :
+                                         e1000_ms_force_slave) :
+                                         e1000_ms_auto;
+
+        switch (phy_ms_setting) {
+        case e1000_ms_force_master:
+            phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_force_slave:
+            phy_data |= CR_1000T_MS_ENABLE;
+            phy_data &= ~(CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_auto:
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            default:
+            break;
+        }
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_gg82563 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+    uint32_t reg_data;
+
+    DEBUGFUNC("e1000_copper_link_ggp_setup");
+
+    if (!hw->phy_reset_disable) {
+
+        /* Enable CRS on TX for half-duplex operation. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+        /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
+        phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Options:
+         *   MDI/MDI-X = 0 (default)
+         *   0 - Auto for all speeds
+         *   1 - MDI mode
+         *   2 - MDI-X mode
+         *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+            break;
+        case 2:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+            break;
+        }
+
+        /* Options:
+         *   disable_polarity_correction = 0 (default)
+         *       Automatic Correction for Reversed Cable Polarity
+         *   0 - Disabled
+         *   1 - Enabled
+         */
+        phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        if (hw->disable_polarity_correction == 1)
+            phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        /* SW Reset the PHY so all changes take effect */
+        ret_val = e1000_phy_reset(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Resetting the PHY\n");
+            return ret_val;
+        }
+    } /* phy_reset_disable */
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Bypass RX and TX FIFO's */
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        reg_data = E1000_READ_REG(hw, CTRL_EXT);
+        reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+        E1000_WRITE_REG(hw, CTRL_EXT, reg_data);
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Do not init these registers when the HW is in IAMT mode, since the
+         * firmware will have already initialized them.  We only initialize
+         * them if the HW is not in IAMT mode.
+         */
+        if (e1000_check_mng_mode(hw) == FALSE) {
+            /* Enable Electrical Idle on the PHY */
+            phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                          phy_data);
+
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* Workaround: Disable padding in Kumeran interface in the MAC
+         * and in the PHY to avoid CRC errors.
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        phy_data |= GG82563_ICR_DIS_PADDING;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_m88 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    /* Enable CRS on TX. This must be set for half-duplex operation. */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+    /* Options:
+     *   MDI/MDI-X = 0 (default)
+     *   0 - Auto for all speeds
+     *   1 - MDI mode
+     *   2 - MDI-X mode
+     *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+     */
+    phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+    switch (hw->mdix) {
+    case 1:
+        phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+        break;
+    case 2:
+        phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+        break;
+    case 3:
+        phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+        break;
+    case 0:
+    default:
+        phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+        break;
+    }
+
+    /* Options:
+     *   disable_polarity_correction = 0 (default)
+     *       Automatic Correction for Reversed Cable Polarity
+     *   0 - Disabled
+     *   1 - Enabled
+     */
+    phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+    if (hw->disable_polarity_correction == 1)
+        phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_revision < M88E1011_I_REV_4) {
+        /* Force TX_CLK in the Extended PHY Specific Control Register
+         * to 25MHz clock.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+        if ((hw->phy_revision == E1000_REVISION_2) &&
+            (hw->phy_id == M88E1111_I_PHY_ID)) {
+            /* Vidalia Phy, set the downshift counter to 5x */
+            phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+            phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            /* Configure Master and Slave downshift values */
+            phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                              M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+            phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+               return ret_val;
+        }
+    }
+
+    /* SW Reset the PHY so all changes take effect */
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+   return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Setup auto-negotiation and flow control advertisements,
+* and then perform auto-negotiation.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_autoneg");
+
+    /* Perform some bounds checking on the hw->autoneg_advertised
+     * parameter.  If this variable is zero, then set it to the default.
+     */
+    hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* If autoneg_advertised is zero, we assume it was not defaulted
+     * by the calling code so we set to advertise full capability.
+     */
+    if (hw->autoneg_advertised == 0)
+        hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* IFE phy only supports 10/100 */
+    if (hw->phy_type == e1000_phy_ife)
+        hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+    DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+    ret_val = e1000_phy_setup_autoneg(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Setting up Auto-Negotiation\n");
+        return ret_val;
+    }
+    DEBUGOUT("Restarting Auto-Neg\n");
+
+    /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+     * the Auto Neg Restart bit in the PHY control register.
+     */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Does the user want to wait for Auto-Neg to complete here, or
+     * check at a later time (for example, callback routine).
+     */
+    if (hw->wait_autoneg_complete) {
+        ret_val = e1000_wait_autoneg(hw);
+        if (ret_val) {
+            DEBUGOUT("Error while waiting for autoneg to complete\n");
+            return ret_val;
+        }
+    }
+
+    hw->get_link_status = TRUE;
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_ife (Fast Ethernet PHY) series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_ife_setup(struct e1000_hw *hw)
+{
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Config the MAC and the PHY after link is up.
+*   1) Set up the MAC to the current PHY speed/duplex
+*      if we are on 82543.  If we
+*      are on newer silicon, we only need to configure
+*      collision distance in the Transmit Control Register.
+*   2) Set up flow control on the MAC to that established with
+*      the link partner.
+*   3) Config DSP to improve Gigabit link quality for some PHY revisions.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    DEBUGFUNC("e1000_copper_link_postconfig");
+
+    if (hw->mac_type >= e1000_82544) {
+        e1000_config_collision_dist(hw);
+    } else {
+        ret_val = e1000_config_mac_to_phy(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring MAC to PHY settings\n");
+            return ret_val;
+        }
+    }
+    ret_val = e1000_config_fc_after_link_up(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Configuring Flow Control\n");
+        return ret_val;
+    }
+
+    /* Config DSP to improve Giga link quality */
+    if (hw->phy_type == e1000_phy_igp) {
+        ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
+        if (ret_val) {
+            DEBUGOUT("Error Configuring DSP after link up\n");
+            return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and setup the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_setup_copper_link(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t i;
+    uint16_t phy_data;
+    uint16_t reg_data;
+
+    DEBUGFUNC("e1000_setup_copper_link");
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        /* Set the mac to wait the maximum time between each
+         * iteration and increase the max iterations when
+         * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+        if (ret_val)
+            return ret_val;
+        reg_data |= 0x3F;
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+        if (ret_val)
+            return ret_val;
+    default:
+        break;
+    }
+
+    /* Check if it is a valid PHY and set PHY mode if necessary. */
+    ret_val = e1000_copper_link_preconfig(hw);
+    if (ret_val)
+        return ret_val;
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+        /* Kumeran registers are written-only */
+        reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
+        reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
+                                       reg_data);
+        if (ret_val)
+            return ret_val;
+        break;
+    default:
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_copper_link_igp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_m88) {
+        ret_val = e1000_copper_link_mgp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_copper_link_ggp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_copper_link_ife_setup(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (hw->autoneg) {
+        /* Setup autoneg and flow control advertisement
+          * and perform autonegotiation */
+        ret_val = e1000_copper_link_autoneg(hw);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* PHY will be set to 10H, 10F, 100H,or 100F
+          * depending on value from forced_speed_duplex. */
+        DEBUGOUT("Forcing speed and duplex\n");
+        ret_val = e1000_phy_force_speed_duplex(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Forcing Speed and Duplex\n");
+            return ret_val;
+        }
+    }
+
+    /* Check link status. Wait up to 100 microseconds for link to become
+     * valid.
+     */
+    for (i = 0; i < 10; i++) {
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            /* Config the MAC and PHY after link is up */
+            ret_val = e1000_copper_link_postconfig(hw);
+            if (ret_val)
+                return ret_val;
+
+            DEBUGOUT("Valid link established!!!\n");
+            return E1000_SUCCESS;
+        }
+        usec_delay(10);
+    }
+
+    DEBUGOUT("Unable to establish link!!!\n");
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configure the MAC-to-PHY interface for 10/100Mbps
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
+{
+    int32_t ret_val = E1000_SUCCESS;
+    uint32_t tipg;
+    uint16_t reg_data;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = E1000_READ_REG(hw, TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
+    E1000_WRITE_REG(hw, TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    if (duplex == HALF_DUPLEX)
+        reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+    else
+        reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+static int32_t
+e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+{
+    int32_t ret_val = E1000_SUCCESS;
+    uint16_t reg_data;
+    uint32_t tipg;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = E1000_READ_REG(hw, TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+    E1000_WRITE_REG(hw, TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t mii_autoneg_adv_reg;
+    uint16_t mii_1000t_ctrl_reg;
+
+    DEBUGFUNC("e1000_phy_setup_autoneg");
+
+    /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+    ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_type != e1000_phy_ife) {
+        /* Read the MII 1000Base-T Control Register (Address 9). */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    } else
+        mii_1000t_ctrl_reg=0;
+
+    /* Need to parse both autoneg_advertised and fc and set up
+     * the appropriate PHY registers.  First we will parse for
+     * autoneg_advertised software override.  Since we can advertise
+     * a plethora of combinations, we need to check each bit
+     * individually.
+     */
+
+    /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+     * Advertisement Register (Address 4) and the 1000 mb speed bits in
+     * the  1000Base-T Control Register (Address 9).
+     */
+    mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+    mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+    DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+    /* Do we want to advertise 10 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+        DEBUGOUT("Advertise 10mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+    }
+
+    /* Do we want to advertise 10 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+        DEBUGOUT("Advertise 10mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+        DEBUGOUT("Advertise 100mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+        DEBUGOUT("Advertise 100mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+    }
+
+    /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+    if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+        DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+    }
+
+    /* Do we want to advertise 1000 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+        DEBUGOUT("Advertise 1000mb Full duplex\n");
+        mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+        if (hw->phy_type == e1000_phy_ife) {
+            DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+        }
+    }
+
+    /* Check for a software override of the flow control settings, and
+     * setup the PHY advertisement registers accordingly.  If
+     * auto-negotiation is enabled, then software will have to set the
+     * "PAUSE" bits to the correct value in the Auto-Negotiation
+     * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames
+     *          but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          but we do not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     *  other:  No software override.  The flow control configuration
+     *          in the EEPROM is used.
+     */
+    switch (hw->fc) {
+    case e1000_fc_none: /* 0 */
+        /* Flow control (RX & TX) is completely disabled by a
+         * software over-ride.
+         */
+        mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case e1000_fc_rx_pause: /* 1 */
+        /* RX Flow control is enabled, and TX Flow control is
+         * disabled, by a software over-ride.
+         */
+        /* Since there really isn't a way to advertise that we are
+         * capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE.  Later
+         * (in e1000_config_fc_after_link_up) we will disable the
+         *hw's ability to send PAUSE frames.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case e1000_fc_tx_pause: /* 2 */
+        /* TX Flow control is enabled, and RX Flow control is
+         * disabled, by a software over-ride.
+         */
+        mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+        mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+        break;
+    case e1000_fc_full: /* 3 */
+        /* Flow control (both RX and TX) is enabled by a software
+         * over-ride.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+    if (hw->phy_type != e1000_phy_ife) {
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    int32_t ret_val;
+    uint16_t mii_ctrl_reg;
+    uint16_t mii_status_reg;
+    uint16_t phy_data;
+    uint16_t i;
+
+    DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+    /* Turn off Flow control if we are forcing speed and duplex. */
+    hw->fc = e1000_fc_none;
+
+    DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+    /* Read the Device Control Register. */
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(DEVICE_SPEED_MASK);
+
+    /* Clear the Auto Speed Detect Enable bit. */
+    ctrl &= ~E1000_CTRL_ASDE;
+
+    /* Read the MII Control Register. */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    /* We need to disable autoneg in order to force link and duplex. */
+
+    mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+    /* Are we forcing Full or Half Duplex? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+        hw->forced_speed_duplex == e1000_10_full) {
+        /* We want to force full duplex so we SET the full duplex bits in the
+         * Device and MII Control Registers.
+         */
+        ctrl |= E1000_CTRL_FD;
+        mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Full Duplex\n");
+    } else {
+        /* We want to force half duplex so we CLEAR the full duplex bits in
+         * the Device and MII Control Registers.
+         */
+        ctrl &= ~E1000_CTRL_FD;
+        mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Half Duplex\n");
+    }
+
+    /* Are we forcing 100Mbps??? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+       hw->forced_speed_duplex == e1000_100_half) {
+        /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+        ctrl |= E1000_CTRL_SPD_100;
+        mii_ctrl_reg |= MII_CR_SPEED_100;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+        DEBUGOUT("Forcing 100mb ");
+    } else {
+        /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+        ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+        mii_ctrl_reg |= MII_CR_SPEED_10;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+        DEBUGOUT("Forcing 10mb ");
+    }
+
+    e1000_config_collision_dist(hw);
+
+    /* Write the configured values back to the Device Control Reg. */
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+         * forced whenever speed are duplex are forced.
+         */
+        phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+        /* Need to reset the PHY or these changes will be ignored */
+        mii_ctrl_reg |= MII_CR_RESET;
+    /* Disable MDI-X support for 10/100 */
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IFE_PMC_AUTO_MDIX;
+        phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+         * forced whenever speed or duplex are forced.
+         */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+        phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Write back the modified PHY MII control register. */
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    usec_delay(1);
+
+    /* The wait_autoneg_complete flag may be a little misleading here.
+     * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+     * But we do want to delay for a period while forcing only so we
+     * don't generate false No Link messages.  So we will wait here
+     * only if the user has set wait_autoneg_complete to 1, which is
+     * the default.
+     */
+    if (hw->wait_autoneg_complete) {
+        /* We will wait for autoneg to complete. */
+        DEBUGOUT("Waiting for forced speed/duplex link.\n");
+        mii_status_reg = 0;
+
+        /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msec_delay(100);
+        }
+        if ((i == 0) &&
+           ((hw->phy_type == e1000_phy_m88) ||
+            (hw->phy_type == e1000_phy_gg82563))) {
+            /* We didn't get link.  Reset the DSP and wait again for link. */
+            ret_val = e1000_phy_reset_dsp(hw);
+            if (ret_val) {
+                DEBUGOUT("Error Resetting PHY DSP\n");
+                return ret_val;
+            }
+        }
+        /* This loop will early-out if the link condition has been met.  */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msec_delay(100);
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    if (hw->phy_type == e1000_phy_m88) {
+        /* Because we reset the PHY above, we need to re-force TX_CLK in the
+         * Extended PHY Specific Control Register to 25MHz clock.  This value
+         * defaults back to a 2.5MHz clock when the PHY is reset.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* In addition, because of the s/w reset above, we need to enable CRS on
+         * TX.  This must be set for both full and half duplex operation.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+            (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+             hw->forced_speed_duplex == e1000_10_half)) {
+            ret_val = e1000_polarity_reversal_workaround(hw);
+            if (ret_val)
+                return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        /* The TX_CLK of the Extended PHY Specific Control Register defaults
+         * to 2.5MHz on a reset.  We need to re-force it back to 25MHz, if
+         * we're not in a forced 10/duplex configuration. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+        if ((hw->forced_speed_duplex == e1000_10_full) ||
+            (hw->forced_speed_duplex == e1000_10_half))
+            phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
+        else
+            phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
+
+        /* Also due to the reset, we need to enable CRS on Tx. */
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void
+e1000_config_collision_dist(struct e1000_hw *hw)
+{
+    uint32_t tctl, coll_dist;
+
+    DEBUGFUNC("e1000_config_collision_dist");
+
+    if (hw->mac_type < e1000_82543)
+        coll_dist = E1000_COLLISION_DISTANCE_82542;
+    else
+        coll_dist = E1000_COLLISION_DISTANCE;
+
+    tctl = E1000_READ_REG(hw, TCTL);
+
+    tctl &= ~E1000_TCTL_COLD;
+    tctl |= coll_dist << E1000_COLD_SHIFT;
+
+    E1000_WRITE_REG(hw, TCTL, tctl);
+    E1000_WRITE_FLUSH(hw);
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static int32_t
+e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_config_mac_to_phy");
+
+    /* 82544 or newer MAC, Auto Speed Detection takes care of
+    * MAC speed/duplex configuration.*/
+    if (hw->mac_type >= e1000_82544)
+        return E1000_SUCCESS;
+
+    /* Read the Device Control Register and set the bits to Force Speed
+     * and Duplex.
+     */
+    ctrl = E1000_READ_REG(hw, CTRL);
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+    /* Set up duplex in the Device Control and Transmit Control
+     * registers depending on negotiated values.
+     */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (phy_data & M88E1000_PSSR_DPLX)
+        ctrl |= E1000_CTRL_FD;
+    else
+        ctrl &= ~E1000_CTRL_FD;
+
+    e1000_config_collision_dist(hw);
+
+    /* Set up speed in the Device Control register depending on
+     * negotiated values.
+     */
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+        ctrl |= E1000_CTRL_SPD_1000;
+    else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+        ctrl |= E1000_CTRL_SPD_100;
+
+    /* Write the configured values back to the Device Control Reg. */
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+int32_t
+e1000_force_mac_fc(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+
+    DEBUGFUNC("e1000_force_mac_fc");
+
+    /* Get the current configuration of the Device Control Register */
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Because we didn't get link via the internal auto-negotiation
+     * mechanism (we either forced link or we got link via PHY
+     * auto-neg), we have to manually enable/disable transmit an
+     * receive flow control.
+     *
+     * The "Case" statement below enables/disable flow control
+     * according to the "hw->fc" parameter.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause
+     *          frames but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          frames but we do not receive pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) is enabled.
+     *  other:  No other values should be possible at this point.
+     */
+
+    switch (hw->fc) {
+    case e1000_fc_none:
+        ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+        break;
+    case e1000_fc_rx_pause:
+        ctrl &= (~E1000_CTRL_TFCE);
+        ctrl |= E1000_CTRL_RFCE;
+        break;
+    case e1000_fc_tx_pause:
+        ctrl &= (~E1000_CTRL_RFCE);
+        ctrl |= E1000_CTRL_TFCE;
+        break;
+    case e1000_fc_full:
+        ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    /* Disable TX Flow Control for 82542 (rev 2.0) */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        ctrl &= (~E1000_CTRL_TFCE);
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+int32_t
+e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t mii_status_reg;
+    uint16_t mii_nway_adv_reg;
+    uint16_t mii_nway_lp_ability_reg;
+    uint16_t speed;
+    uint16_t duplex;
+
+    DEBUGFUNC("e1000_config_fc_after_link_up");
+
+    /* Check for the case where we have fiber media and auto-neg failed
+     * so we had to force link.  In this case, we need to force the
+     * configuration of the MAC to match the "fc" parameter.
+     */
+    if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_internal_serdes) &&
+         (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+        ret_val = e1000_force_mac_fc(hw);
+        if (ret_val) {
+            DEBUGOUT("Error forcing flow control settings\n");
+            return ret_val;
+        }
+    }
+
+    /* Check for the case where we have copper media and auto-neg is
+     * enabled.  In this case, we need to check and see if Auto-Neg
+     * has completed, and if so, how the PHY and link partner has
+     * flow control configured.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+        /* Read the MII Status Register and check to see if AutoNeg
+         * has completed.  We read this twice because this reg has
+         * some "sticky" (latched) bits.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+            /* The AutoNeg process has completed, so we now need to
+             * read both the Auto Negotiation Advertisement Register
+             * (Address 4) and the Auto_Negotiation Base Page Ability
+             * Register (Address 5) to determine how flow control was
+             * negotiated.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+                                         &mii_nway_adv_reg);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+                                         &mii_nway_lp_ability_reg);
+            if (ret_val)
+                return ret_val;
+
+            /* Two bits in the Auto Negotiation Advertisement Register
+             * (Address 4) and two bits in the Auto Negotiation Base
+             * Page Ability Register (Address 5) determine flow control
+             * for both the PHY and the link partner.  The following
+             * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+             * 1999, describes these PAUSE resolution bits and how flow
+             * control is determined based upon these settings.
+             * NOTE:  DC = Don't Care
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+             *-------|---------|-------|---------|--------------------
+             *   0   |    0    |  DC   |   DC    | e1000_fc_none
+             *   0   |    1    |   0   |   DC    | e1000_fc_none
+             *   0   |    1    |   1   |    0    | e1000_fc_none
+             *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+             *   1   |    0    |   0   |   DC    | e1000_fc_none
+             *   1   |   DC    |   1   |   DC    | e1000_fc_full
+             *   1   |    1    |   0   |    0    | e1000_fc_none
+             *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+             *
+             */
+            /* Are both PAUSE bits set to 1?  If so, this implies
+             * Symmetric Flow Control is enabled at both ends.  The
+             * ASM_DIR bits are irrelevant per the spec.
+             *
+             * For Symmetric Flow Control:
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |   DC    |   1   |   DC    | e1000_fc_full
+             *
+             */
+            if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                /* Now we need to check if the user selected RX ONLY
+                 * of pause frames.  In this case, we had to advertise
+                 * FULL flow control because we could not advertise RX
+                 * ONLY. Hence, we must now check to see if we need to
+                 * turn OFF  the TRANSMISSION of PAUSE frames.
+                 */
+                if (hw->original_fc == e1000_fc_full) {
+                    hw->fc = e1000_fc_full;
+                    DEBUGOUT("Flow Control = FULL.\n");
+                } else {
+                    hw->fc = e1000_fc_rx_pause;
+                    DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+                }
+            }
+            /* For receiving PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+             *
+             */
+            else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = e1000_fc_tx_pause;
+                DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+            }
+            /* For transmitting PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+             *
+             */
+            else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = e1000_fc_rx_pause;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+            /* Per the IEEE spec, at this point flow control should be
+             * disabled.  However, we want to consider that we could
+             * be connected to a legacy switch that doesn't advertise
+             * desired flow control, but can be forced on the link
+             * partner.  So if we advertised no flow control, that is
+             * what we will resolve to.  If we advertised some kind of
+             * receive capability (Rx Pause Only or Full Flow Control)
+             * and the link partner advertised none, we will configure
+             * ourselves to enable Rx Flow Control only.  We can do
+             * this safely for two reasons:  If the link partner really
+             * didn't want flow control enabled, and we enable Rx, no
+             * harm done since we won't be receiving any PAUSE frames
+             * anyway.  If the intent on the link partner was to have
+             * flow control enabled, then by us enabling RX only, we
+             * can at least receive pause frames and process them.
+             * This is a good idea because in most cases, since we are
+             * predominantly a server NIC, more times than not we will
+             * be asked to delay transmission of packets than asking
+             * our link partner to pause transmission of frames.
+             */
+            else if ((hw->original_fc == e1000_fc_none ||
+                      hw->original_fc == e1000_fc_tx_pause) ||
+                      hw->fc_strict_ieee) {
+                hw->fc = e1000_fc_none;
+                DEBUGOUT("Flow Control = NONE.\n");
+            } else {
+                hw->fc = e1000_fc_rx_pause;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+
+            /* Now we need to do one last check...  If we auto-
+             * negotiated to HALF DUPLEX, flow control should not be
+             * enabled per IEEE 802.3 spec.
+             */
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+
+            if (duplex == HALF_DUPLEX)
+                hw->fc = e1000_fc_none;
+
+            /* Now we call a subroutine to actually force the MAC
+             * controller to use the correct flow control settings.
+             */
+            ret_val = e1000_force_mac_fc(hw);
+            if (ret_val) {
+                DEBUGOUT("Error forcing flow control settings\n");
+                return ret_val;
+            }
+        } else {
+            DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+int32_t
+e1000_check_for_link(struct e1000_hw *hw)
+{
+    uint32_t rxcw = 0;
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t rctl;
+    uint32_t icr;
+    uint32_t signal = 0;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_check_for_link");
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    status = E1000_READ_REG(hw, STATUS);
+
+    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     */
+    if ((hw->media_type == e1000_media_type_fiber) ||
+        (hw->media_type == e1000_media_type_internal_serdes)) {
+        rxcw = E1000_READ_REG(hw, RXCW);
+
+        if (hw->media_type == e1000_media_type_fiber) {
+            signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+            if (status & E1000_STATUS_LU)
+                hw->get_link_status = FALSE;
+        }
+    }
+
+    /* If we have a copper PHY then we only want to go out to the PHY
+     * registers to see if Auto-Neg has completed and/or if our link
+     * status has changed.  The get_link_status flag will be set if we
+     * receive a Link Status Change interrupt or we have Rx Sequence
+     * Errors.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+        /* First we want to see if the MII Status Register reports
+         * link.  If so, then we want to get the current speed/duplex
+         * of the PHY.
+         * Read the register twice since the link bit is sticky.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            hw->get_link_status = FALSE;
+            /* Check if there was DownShift, must be checked immediately after
+             * link-up */
+            e1000_check_downshift(hw);
+
+            /* If we are on 82544 or 82543 silicon and speed/duplex
+             * are forced to 10H or 10F, then we will implement the polarity
+             * reversal workaround.  We disable interrupts first, and upon
+             * returning, place the devices interrupt state to its previous
+             * value except for the link status change interrupt which will
+             * happen due to the execution of this workaround.
+             */
+
+            if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+                (!hw->autoneg) &&
+                (hw->forced_speed_duplex == e1000_10_full ||
+                 hw->forced_speed_duplex == e1000_10_half)) {
+                E1000_WRITE_REG(hw, IMC, 0xffffffff);
+                ret_val = e1000_polarity_reversal_workaround(hw);
+                icr = E1000_READ_REG(hw, ICR);
+                E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC));
+                E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK);
+            }
+
+        } else {
+            /* No link detected */
+            e1000_config_dsp_after_link_change(hw, FALSE);
+            return 0;
+        }
+
+        /* If we are forcing speed/duplex, then we simply return since
+         * we have already determined whether we have link or not.
+         */
+        if (!hw->autoneg) return -E1000_ERR_CONFIG;
+
+        /* optimize the dsp settings for the igp phy */
+        e1000_config_dsp_after_link_change(hw, TRUE);
+
+        /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+         * have Si on board that is 82544 or newer, Auto
+         * Speed Detection takes care of MAC speed/duplex
+         * configuration.  So we only need to configure Collision
+         * Distance in the MAC.  Otherwise, we need to force
+         * speed/duplex on the MAC to the current PHY speed/duplex
+         * settings.
+         */
+        if (hw->mac_type >= e1000_82544)
+            e1000_config_collision_dist(hw);
+        else {
+            ret_val = e1000_config_mac_to_phy(hw);
+            if (ret_val) {
+                DEBUGOUT("Error configuring MAC to PHY settings\n");
+                return ret_val;
+            }
+        }
+
+        /* Configure Flow Control now that Auto-Neg has completed. First, we
+         * need to restore the desired flow control settings because we may
+         * have had to re-autoneg with a different link partner.
+         */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+
+        /* At this point we know that we are on copper and we have
+         * auto-negotiated link.  These are conditions for checking the link
+         * partner capability register.  We use the link speed to determine if
+         * TBI compatibility needs to be turned on or off.  If the link is not
+         * at gigabit speed, then TBI compatibility is not needed.  If we are
+         * at gigabit speed, we turn on TBI compatibility.
+         */
+        if (hw->tbi_compatibility_en) {
+            uint16_t speed, duplex;
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+            if (speed != SPEED_1000) {
+                /* If link speed is not set to gigabit speed, we do not need
+                 * to enable TBI compatibility.
+                 */
+                if (hw->tbi_compatibility_on) {
+                    /* If we previously were in the mode, turn it off. */
+                    rctl = E1000_READ_REG(hw, RCTL);
+                    rctl &= ~E1000_RCTL_SBP;
+                    E1000_WRITE_REG(hw, RCTL, rctl);
+                    hw->tbi_compatibility_on = FALSE;
+                }
+            } else {
+                /* If TBI compatibility is was previously off, turn it on. For
+                 * compatibility with a TBI link partner, we will store bad
+                 * packets. Some frames have an additional byte on the end and
+                 * will look like CRC errors to to the hardware.
+                 */
+                if (!hw->tbi_compatibility_on) {
+                    hw->tbi_compatibility_on = TRUE;
+                    rctl = E1000_READ_REG(hw, RCTL);
+                    rctl |= E1000_RCTL_SBP;
+                    E1000_WRITE_REG(hw, RCTL, rctl);
+                }
+            }
+        }
+    }
+    /* If we don't have link (auto-negotiation failed or link partner cannot
+     * auto-negotiate), the cable is plugged in (we have signal), and our
+     * link partner is not trying to auto-negotiate with us (we are receiving
+     * idles or data), we need to force link up. We also need to give
+     * auto-negotiation time to complete, in case the cable was just plugged
+     * in. The autoneg_failed flag does this.
+     */
+    else if ((((hw->media_type == e1000_media_type_fiber) &&
+              ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (!(status & E1000_STATUS_LU)) &&
+              (!(rxcw & E1000_RXCW_C))) {
+        if (hw->autoneg_failed == 0) {
+            hw->autoneg_failed = 1;
+            return 0;
+        }
+        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+        /* Disable auto-negotiation in the TXCW register */
+        E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+        /* Force link-up and also force full-duplex. */
+        ctrl = E1000_READ_REG(hw, CTRL);
+        ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+
+        /* Configure Flow Control after forcing link up. */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+    }
+    /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+     * auto-negotiation in the TXCW register and disable forced link in the
+     * Device Control register in an attempt to auto-negotiate with our link
+     * partner.
+     */
+    else if (((hw->media_type == e1000_media_type_fiber) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+        E1000_WRITE_REG(hw, TXCW, hw->txcw);
+        E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+        hw->serdes_link_down = FALSE;
+    }
+    /* If we force link for non-auto-negotiation switch, check link status
+     * based on MAC synchronization for internal serdes media type.
+     */
+    else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+             !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+        /* SYNCH bit and IV bit are sticky. */
+        usec_delay(10);
+        if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
+            if (!(rxcw & E1000_RXCW_IV)) {
+                hw->serdes_link_down = FALSE;
+                DEBUGOUT("SERDES: Link is up.\n");
+            }
+        } else {
+            hw->serdes_link_down = TRUE;
+            DEBUGOUT("SERDES: Link is down.\n");
+        }
+    }
+    if ((hw->media_type == e1000_media_type_internal_serdes) &&
+        (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+        hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+int32_t
+e1000_get_speed_and_duplex(struct e1000_hw *hw,
+                           uint16_t *speed,
+                           uint16_t *duplex)
+{
+    uint32_t status;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_get_speed_and_duplex");
+
+    if (hw->mac_type >= e1000_82543) {
+        status = E1000_READ_REG(hw, STATUS);
+        if (status & E1000_STATUS_SPEED_1000) {
+            *speed = SPEED_1000;
+            DEBUGOUT("1000 Mbs, ");
+        } else if (status & E1000_STATUS_SPEED_100) {
+            *speed = SPEED_100;
+            DEBUGOUT("100 Mbs, ");
+        } else {
+            *speed = SPEED_10;
+            DEBUGOUT("10 Mbs, ");
+        }
+
+        if (status & E1000_STATUS_FD) {
+            *duplex = FULL_DUPLEX;
+            DEBUGOUT("Full Duplex\n");
+        } else {
+            *duplex = HALF_DUPLEX;
+            DEBUGOUT(" Half Duplex\n");
+        }
+    } else {
+        DEBUGOUT("1000 Mbs, Full Duplex\n");
+        *speed = SPEED_1000;
+        *duplex = FULL_DUPLEX;
+    }
+
+    /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+     * if it is operating at half duplex.  Here we set the duplex settings to
+     * match the duplex in the link partner's capabilities.
+     */
+    if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+        ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+            *duplex = HALF_DUPLEX;
+        else {
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+            if (ret_val)
+                return ret_val;
+            if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+               (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+                *duplex = HALF_DUPLEX;
+        }
+    }
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        if (*speed == SPEED_1000)
+            ret_val = e1000_configure_kmrn_for_1000(hw);
+        else
+            ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+        ret_val = e1000_kumeran_lock_loss_workaround(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_wait_autoneg(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t i;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_wait_autoneg");
+    DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+    /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+    for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Auto-Neg
+         * Complete bit to be set.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+            return E1000_SUCCESS;
+        }
+        msec_delay(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void
+e1000_raise_mdi_clk(struct e1000_hw *hw,
+                    uint32_t *ctrl)
+{
+    /* Raise the clock input to the Management Data Clock (by setting the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void
+e1000_lower_mdi_clk(struct e1000_hw *hw,
+                    uint32_t *ctrl)
+{
+    /* Lower the clock input to the Management Data Clock (by clearing the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void
+e1000_shift_out_mdi_bits(struct e1000_hw *hw,
+                         uint32_t data,
+                         uint16_t count)
+{
+    uint32_t ctrl;
+    uint32_t mask;
+
+    /* We need to shift "count" number of bits out to the PHY. So, the value
+     * in the "data" parameter will be shifted out to the PHY one bit at a
+     * time. In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01;
+    mask <<= (count - 1);
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+    ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+    while (mask) {
+        /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+         * then raising and lowering the Management Data Clock. A "0" is
+         * shifted out to the PHY by setting the MDIO bit to "0" and then
+         * raising and lowering the clock.
+         */
+        if (data & mask)
+            ctrl |= E1000_CTRL_MDIO;
+        else
+            ctrl &= ~E1000_CTRL_MDIO;
+
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+        E1000_WRITE_FLUSH(hw);
+
+        usec_delay(10);
+
+        e1000_raise_mdi_clk(hw, &ctrl);
+        e1000_lower_mdi_clk(hw, &ctrl);
+
+        mask = mask >> 1;
+    }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static uint16_t
+e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint16_t data = 0;
+    uint8_t i;
+
+    /* In order to read a register from the PHY, we need to shift in a total
+     * of 18 bits from the PHY. The first two bit (turnaround) times are used
+     * to avoid contention on the MDIO pin when a read operation is performed.
+     * These two bits are ignored by us and thrown away. Bits are "shifted in"
+     * by raising the input to the Management Data Clock (setting the MDC bit),
+     * and then reading the value of the MDIO bit.
+     */
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+    ctrl &= ~E1000_CTRL_MDIO_DIR;
+    ctrl &= ~E1000_CTRL_MDIO;
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    E1000_WRITE_FLUSH(hw);
+
+    /* Raise and Lower the clock before reading in the data. This accounts for
+     * the turnaround bits. The first clock occurred when we clocked out the
+     * last bit of the Register Address.
+     */
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    for (data = 0, i = 0; i < 16; i++) {
+        data = data << 1;
+        e1000_raise_mdi_clk(hw, &ctrl);
+        ctrl = E1000_READ_REG(hw, CTRL);
+        /* Check to see if we shifted in a "1". */
+        if (ctrl & E1000_CTRL_MDIO)
+            data |= 1;
+        e1000_lower_mdi_clk(hw, &ctrl);
+    }
+
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    return data;
+}
+
+int32_t
+e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
+{
+    uint32_t swfw_sync = 0;
+    uint32_t swmask = mask;
+    uint32_t fwmask = mask << 16;
+    int32_t timeout = 200;
+
+    DEBUGFUNC("e1000_swfw_sync_acquire");
+
+    if (hw->swfwhw_semaphore_present)
+        return e1000_get_software_flag(hw);
+
+    if (!hw->swfw_sync_present)
+        return e1000_get_hw_eeprom_semaphore(hw);
+
+    while (timeout) {
+            if (e1000_get_hw_eeprom_semaphore(hw))
+                return -E1000_ERR_SWFW_SYNC;
+
+            swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
+            if (!(swfw_sync & (fwmask | swmask))) {
+                break;
+            }
+
+            /* firmware currently using resource (fwmask) */
+            /* or other software thread currently using resource (swmask) */
+            e1000_put_hw_eeprom_semaphore(hw);
+            msec_delay_irq(5);
+            timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+        return -E1000_ERR_SWFW_SYNC;
+    }
+
+    swfw_sync |= swmask;
+    E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+    return E1000_SUCCESS;
+}
+
+void
+e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
+{
+    uint32_t swfw_sync;
+    uint32_t swmask = mask;
+
+    DEBUGFUNC("e1000_swfw_sync_release");
+
+    if (hw->swfwhw_semaphore_present) {
+        e1000_release_software_flag(hw);
+        return;
+    }
+
+    if (!hw->swfw_sync_present) {
+        e1000_put_hw_eeprom_semaphore(hw);
+        return;
+    }
+
+    /* if (e1000_get_hw_eeprom_semaphore(hw))
+     *    return -E1000_ERR_SWFW_SYNC; */
+    while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
+        /* empty */
+
+    swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
+    swfw_sync &= ~swmask;
+    E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+int32_t
+e1000_read_phy_reg(struct e1000_hw *hw,
+                   uint32_t reg_addr,
+                   uint16_t *phy_data)
+{
+    uint32_t ret_val;
+    uint16_t swfw;
+
+    DEBUGFUNC("e1000_read_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (uint16_t)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                    phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+int32_t
+e1000_read_phy_reg_ex(struct e1000_hw *hw,
+                      uint32_t reg_addr,
+                      uint16_t *phy_data)
+{
+    uint32_t i;
+    uint32_t mdic = 0;
+    const uint32_t phy_addr = 1;
+
+    DEBUGFUNC("e1000_read_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, and register address in the MDI
+         * Control register.  The MAC will take care of interfacing with the
+         * PHY to retrieve the desired data.
+         */
+        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_READ));
+
+        E1000_WRITE_REG(hw, MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 64; i++) {
+            usec_delay(50);
+            mdic = E1000_READ_REG(hw, MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Read did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+        if (mdic & E1000_MDIC_ERROR) {
+            DEBUGOUT("MDI Error\n");
+            return -E1000_ERR_PHY;
+        }
+        *phy_data = (uint16_t) mdic;
+    } else {
+        /* We must first send a preamble through the MDIO pin to signal the
+         * beginning of an MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the next few fields that are required for a read
+         * operation.  We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine five different times. The format of
+         * a MII read instruction consists of a shift out of 14 bits and is
+         * defined as follows:
+         *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+         * followed by a shift in of 18 bits.  This first two bits shifted in
+         * are TurnAround bits used to avoid contention on the MDIO pin when a
+         * READ operation is performed.  These two bits are thrown away
+         * followed by a shift in of 16 bits which contains the desired data.
+         */
+        mdic = ((reg_addr) | (phy_addr << 5) |
+                (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+        e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+        /* Now that we've shifted out the read command to the MII, we need to
+         * "shift in" the 16-bit value (18 total bits) of the requested PHY
+         * register address.
+         */
+        *phy_data = e1000_shift_in_mdi_bits(hw);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+int32_t
+e1000_write_phy_reg(struct e1000_hw *hw,
+                    uint32_t reg_addr,
+                    uint16_t phy_data)
+{
+    uint32_t ret_val;
+    uint16_t swfw;
+
+    DEBUGFUNC("e1000_write_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (uint16_t)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                     phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+int32_t
+e1000_write_phy_reg_ex(struct e1000_hw *hw,
+                    uint32_t reg_addr,
+                    uint16_t phy_data)
+{
+    uint32_t i;
+    uint32_t mdic = 0;
+    const uint32_t phy_addr = 1;
+
+    DEBUGFUNC("e1000_write_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, register address, and data intended
+         * for the PHY register in the MDI Control register.  The MAC will take
+         * care of interfacing with the PHY to send the desired data.
+         */
+        mdic = (((uint32_t) phy_data) |
+                (reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_WRITE));
+
+        E1000_WRITE_REG(hw, MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 641; i++) {
+            usec_delay(5);
+            mdic = E1000_READ_REG(hw, MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Write did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+    } else {
+        /* We'll need to use the SW defined pins to shift the write command
+         * out to the PHY. We first send a preamble to the PHY to signal the
+         * beginning of the MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the remaining required fields that will indicate a
+         * write operation. We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine for each field in the command. The
+         * format of a MII write instruction is as follows:
+         * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+         */
+        mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+                (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+        mdic <<= 16;
+        mdic |= (uint32_t) phy_data;
+
+        e1000_shift_out_mdi_bits(hw, mdic, 32);
+    }
+
+    return E1000_SUCCESS;
+}
+
+int32_t
+e1000_read_kmrn_reg(struct e1000_hw *hw,
+                    uint32_t reg_addr,
+                    uint16_t *data)
+{
+    uint32_t reg_val;
+    uint16_t swfw;
+    DEBUGFUNC("e1000_read_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    /* Write register address */
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) |
+              E1000_KUMCTRLSTA_REN;
+    E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
+    usec_delay(2);
+
+    /* Read the data returned */
+    reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
+    *data = (uint16_t)reg_val;
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+int32_t
+e1000_write_kmrn_reg(struct e1000_hw *hw,
+                     uint32_t reg_addr,
+                     uint16_t data)
+{
+    uint32_t reg_val;
+    uint16_t swfw;
+    DEBUGFUNC("e1000_write_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) | data;
+    E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
+    usec_delay(2);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+    uint32_t ctrl, ctrl_ext;
+    uint32_t led_ctrl;
+    int32_t ret_val;
+    uint16_t swfw;
+
+    DEBUGFUNC("e1000_phy_hw_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    DEBUGOUT("Resetting Phy...\n");
+
+    if (hw->mac_type > e1000_82543) {
+        if ((hw->mac_type == e1000_80003es2lan) &&
+            (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+            swfw = E1000_SWFW_PHY1_SM;
+        } else {
+            swfw = E1000_SWFW_PHY0_SM;
+        }
+        if (e1000_swfw_sync_acquire(hw, swfw)) {
+            e1000_release_software_semaphore(hw);
+            return -E1000_ERR_SWFW_SYNC;
+        }
+        /* Read the device control register and assert the E1000_CTRL_PHY_RST
+         * bit. Then, take it out of reset.
+         * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+         * and deassert.  For e1000_82571 hardware and later, we instead delay
+         * for 50us between and 10ms after the deassertion.
+         */
+        ctrl = E1000_READ_REG(hw, CTRL);
+        E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
+        E1000_WRITE_FLUSH(hw);
+
+        if (hw->mac_type < e1000_82571)
+            msec_delay(10);
+        else
+            usec_delay(100);
+
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+        E1000_WRITE_FLUSH(hw);
+
+        if (hw->mac_type >= e1000_82571)
+            msec_delay_irq(10);
+        e1000_swfw_sync_release(hw, swfw);
+    } else {
+        /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+         * bit to put the PHY into reset. Then, take it out of reset.
+         */
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+        ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH(hw);
+        msec_delay(10);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH(hw);
+    }
+    usec_delay(150);
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        /* Configure activity LED after PHY reset */
+        led_ctrl = E1000_READ_REG(hw, LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+    }
+
+    /* Wait for FW to finish PHY configuration. */
+    ret_val = e1000_get_phy_cfg_done(hw);
+    e1000_release_software_semaphore(hw);
+
+        if ((hw->mac_type == e1000_ich8lan) &&
+            (hw->phy_type == e1000_phy_igp_3)) {
+            ret_val = e1000_init_lcd_from_nvm(hw);
+            if (ret_val)
+                return ret_val;
+        }
+    return ret_val;
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control regiser
+******************************************************************************/
+int32_t
+e1000_phy_reset(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_phy_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    switch (hw->mac_type) {
+    case e1000_82541_rev_2:
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_ich8lan:
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+
+        break;
+    default:
+        ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= MII_CR_RESET;
+        ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        usec_delay(1);
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+        e1000_phy_init_script(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void
+e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+    int32_t reg;
+    uint16_t phy_data;
+    int32_t retry = 0;
+
+    DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+    if (hw->phy_type != e1000_phy_igp_3)
+        return;
+
+    do {
+        /* Disable link */
+        reg = E1000_READ_REG(hw, PHY_CTRL);
+        E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* Write VR power-down enable */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
+                            IGP3_VR_CTRL_MODE_SHUT);
+
+        /* Read it back and test */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+            break;
+
+        /* Issue PHY reset and repeat at most one more time */
+        reg = E1000_READ_REG(hw, CTRL);
+        E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST);
+        retry++;
+    } while (retry);
+
+    return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    int32_t reg;
+    int32_t cnt;
+    uint16_t phy_data;
+
+    if (hw->kmrn_lock_loss_workaround_disabled)
+        return E1000_SUCCESS;
+
+    /* Make sure link is up before proceeding. If not just return. 
+     * Attempting this while link is negotiating fouls up link
+     * stability */
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+    if (phy_data & MII_SR_LINK_STATUS) {
+        for (cnt = 0; cnt < 10; cnt++) {
+            /* read once to clear */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+            /* and again to get new status */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* check for PCS lock */
+            if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+                return E1000_SUCCESS;
+
+            /* Issue PHY reset */
+            e1000_phy_hw_reset(hw);
+            msec_delay_irq(5);
+        }
+        /* Disable GigE link negotiation */
+        reg = E1000_READ_REG(hw, PHY_CTRL);
+        E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* unable to acquire PCS lock */
+        return E1000_ERR_PHY;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+    int32_t phy_init_status, ret_val;
+    uint16_t phy_id_high, phy_id_low;
+    boolean_t match = FALSE;
+
+    DEBUGFUNC("e1000_detect_gig_phy");
+
+    /* The 82571 firmware may still be configuring the PHY.  In this
+     * case, we cannot access the PHY until the configuration is done.  So
+     * we explicitly set the PHY values. */
+    if (hw->mac_type == e1000_82571 ||
+        hw->mac_type == e1000_82572) {
+        hw->phy_id = IGP01E1000_I_PHY_ID;
+        hw->phy_type = e1000_phy_igp_2;
+        return E1000_SUCCESS;
+    }
+
+    /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
+     * around that forces PHY page 0 to be set or the reads fail.  The rest of
+     * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
+     * So for ESB-2 we need to have this set so our reads won't fail.  If the
+     * attached PHY is not a e1000_phy_gg82563, the routines below will figure
+     * this out as well. */
+    if (hw->mac_type == e1000_80003es2lan)
+        hw->phy_type = e1000_phy_gg82563;
+
+    /* Read the PHY ID Registers to identify which PHY is onboard. */
+    ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id = (uint32_t) (phy_id_high << 16);
+    usec_delay(20);
+    ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
+    hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
+
+    switch (hw->mac_type) {
+    case e1000_82543:
+        if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
+        break;
+    case e1000_82544:
+        if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_82573:
+        if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_80003es2lan:
+        if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
+        break;
+    case e1000_ich8lan:
+        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IFE_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE;
+        break;
+    default:
+        DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+        return -E1000_ERR_CONFIG;
+    }
+    phy_init_status = e1000_set_phy_type(hw);
+
+    if ((match) && (phy_init_status == E1000_SUCCESS)) {
+        DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+        return E1000_SUCCESS;
+    }
+    DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+    return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    DEBUGFUNC("e1000_phy_reset_dsp");
+
+    do {
+        if (hw->phy_type != e1000_phy_gg82563) {
+            ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+            if (ret_val) break;
+        }
+        ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+        if (ret_val) break;
+        ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+        if (ret_val) break;
+        ret_val = E1000_SUCCESS;
+    } while (0);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_igp_get_info(struct e1000_hw *hw,
+                       struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data, polarity, min_length, max_length, average;
+
+    DEBUGFUNC("e1000_phy_igp_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    /* IGP01E1000 does not need to support it. */
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    /* IGP01E1000 always correct polarity reversal */
+    phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >>
+                          IGP01E1000_PSSR_MDIX_SHIFT;
+
+    if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+       IGP01E1000_PSSR_SPEED_1000MBPS) {
+        /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT;
+        phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT;
+
+        /* Get cable length */
+        ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+        if (ret_val)
+            return ret_val;
+
+        /* Translate to old method */
+        average = (max_length + min_length) / 2;
+
+        if (average <= e1000_igp_cable_length_50)
+            phy_info->cable_length = e1000_cable_length_50;
+        else if (average <= e1000_igp_cable_length_80)
+            phy_info->cable_length = e1000_cable_length_50_80;
+        else if (average <= e1000_igp_cable_length_110)
+            phy_info->cable_length = e1000_cable_length_80_110;
+        else if (average <= e1000_igp_cable_length_140)
+            phy_info->cable_length = e1000_cable_length_110_140;
+        else
+            phy_info->cable_length = e1000_cable_length_140;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_ife_get_info(struct e1000_hw *hw,
+                       struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data, polarity;
+
+    DEBUGFUNC("e1000_phy_ife_get_info");
+
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+    phy_info->polarity_correction =
+                        (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+                        IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT;
+
+    if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+        ret_val = e1000_check_polarity(hw, &polarity);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* Polarity is forced. */
+        polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >>
+                       IFE_PSC_FORCE_POLARITY_SHIFT;
+    }
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode =
+                     (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+                     IFE_PMC_MDIX_MODE_SHIFT;
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_m88_get_info(struct e1000_hw *hw,
+                       struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data, polarity;
+
+    DEBUGFUNC("e1000_phy_m88_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->extended_10bt_distance =
+        (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+        M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT;
+    phy_info->polarity_correction =
+        (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+        M88E1000_PSCR_POLARITY_REVERSAL_SHIFT;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
+                          M88E1000_PSSR_MDIX_SHIFT;
+
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+        /* Cable Length Estimation and Local/Remote Receiver Information
+         * are only valid at 1000 Mbps.
+         */
+        if (hw->phy_type != e1000_phy_gg82563) {
+            phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                                      M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+        } else {
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_info->cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT;
+
+        phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_get_info(struct e1000_hw *hw,
+                   struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_phy_get_info");
+
+    phy_info->cable_length = e1000_cable_length_undefined;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+    phy_info->cable_polarity = e1000_rev_polarity_undefined;
+    phy_info->downshift = e1000_downshift_undefined;
+    phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+    phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+    phy_info->local_rx = e1000_1000t_rx_status_undefined;
+    phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+    if (hw->media_type != e1000_media_type_copper) {
+        DEBUGOUT("PHY info is only valid for copper media\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+        DEBUGOUT("PHY info is only valid if link is up\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2)
+        return e1000_phy_igp_get_info(hw, phy_info);
+    else if (hw->phy_type == e1000_phy_ife)
+        return e1000_phy_ife_get_info(hw, phy_info);
+    else
+        return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+int32_t
+e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_validate_mdi_settings");
+
+    if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+        DEBUGOUT("Invalid MDI setting detected\n");
+        hw->mdix = 1;
+        return -E1000_ERR_CONFIG;
+    }
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct.  Must be called after mac_type
+ * is configured.  Additionally, if this is ICH8, the flash controller GbE
+ * registers must be mapped, or this will crash.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd = E1000_READ_REG(hw, EECD);
+    int32_t ret_val = E1000_SUCCESS;
+    uint16_t eeprom_size;
+
+    DEBUGFUNC("e1000_init_eeprom_params");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->word_size = 64;
+        eeprom->opcode_bits = 3;
+        eeprom->address_bits = 6;
+        eeprom->delay_usec = 50;
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->opcode_bits = 3;
+        eeprom->delay_usec = 50;
+        if (eecd & E1000_EECD_SIZE) {
+            eeprom->word_size = 256;
+            eeprom->address_bits = 8;
+        } else {
+            eeprom->word_size = 64;
+            eeprom->address_bits = 6;
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (eecd & E1000_EECD_TYPE) {
+            eeprom->type = e1000_eeprom_spi;
+            eeprom->opcode_bits = 8;
+            eeprom->delay_usec = 1;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->page_size = 32;
+                eeprom->address_bits = 16;
+            } else {
+                eeprom->page_size = 8;
+                eeprom->address_bits = 8;
+            }
+        } else {
+            eeprom->type = e1000_eeprom_microwire;
+            eeprom->opcode_bits = 3;
+            eeprom->delay_usec = 50;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->word_size = 256;
+                eeprom->address_bits = 8;
+            } else {
+                eeprom->word_size = 64;
+                eeprom->address_bits = 6;
+            }
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82571:
+    case e1000_82572:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82573:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = TRUE;
+        eeprom->use_eewr = TRUE;
+        if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+            eeprom->type = e1000_eeprom_flash;
+            eeprom->word_size = 2048;
+
+            /* Ensure that the Autonomous FLASH update bit is cleared due to
+             * Flash update issue on parts which use a FLASH for NVM. */
+            eecd &= ~E1000_EECD_AUPDEN;
+            E1000_WRITE_REG(hw, EECD, eecd);
+        }
+        break;
+    case e1000_80003es2lan:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = TRUE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_ich8lan:
+    {
+        int32_t  i = 0;
+        uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+
+        eeprom->type = e1000_eeprom_ich8;
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+        /* Zero the shadow RAM structure. But don't load it from NVM
+         * so as to save time for driver init */
+        if (hw->eeprom_shadow_ram != NULL) {
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                hw->eeprom_shadow_ram[i].modified = FALSE;
+                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+            }
+        }
+
+        hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
+                              ICH8_FLASH_SECTOR_SIZE;
+
+        hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
+        hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
+        hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
+        hw->flash_bank_size /= 2 * sizeof(uint16_t);
+
+        break;
+    }
+    default:
+        break;
+    }
+
+    if (eeprom->type == e1000_eeprom_spi) {
+        /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+         * 32KB (incremented by powers of 2).
+         */
+        if (hw->mac_type <= e1000_82547_rev_2) {
+            /* Set to default value for initial eeprom read. */
+            eeprom->word_size = 64;
+            ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+            if (ret_val)
+                return ret_val;
+            eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+            /* 256B eeprom size was not supported in earlier hardware, so we
+             * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+             * is never the result used in the shifting logic below. */
+            if (eeprom_size)
+                eeprom_size++;
+        } else {
+            eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+                          E1000_EECD_SIZE_EX_SHIFT);
+        }
+
+        eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void
+e1000_raise_ee_clk(struct e1000_hw *hw,
+                   uint32_t *eecd)
+{
+    /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+     * wait <delay> microseconds.
+     */
+    *eecd = *eecd | E1000_EECD_SK;
+    E1000_WRITE_REG(hw, EECD, *eecd);
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void
+e1000_lower_ee_clk(struct e1000_hw *hw,
+                   uint32_t *eecd)
+{
+    /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+     * wait 50 microseconds.
+     */
+    *eecd = *eecd & ~E1000_EECD_SK;
+    E1000_WRITE_REG(hw, EECD, *eecd);
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void
+e1000_shift_out_ee_bits(struct e1000_hw *hw,
+                        uint16_t data,
+                        uint16_t count)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd;
+    uint32_t mask;
+
+    /* We need to shift "count" bits out to the EEPROM. So, value in the
+     * "data" parameter will be shifted out to the EEPROM one bit at a time.
+     * In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01 << (count - 1);
+    eecd = E1000_READ_REG(hw, EECD);
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~E1000_EECD_DO;
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_DO;
+    }
+    do {
+        /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+         * and then raising and then lowering the clock (the SK bit controls
+         * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
+         * by setting "DI" to "0" and then raising and then lowering the clock.
+         */
+        eecd &= ~E1000_EECD_DI;
+
+        if (data & mask)
+            eecd |= E1000_EECD_DI;
+
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+
+        usec_delay(eeprom->delay_usec);
+
+        e1000_raise_ee_clk(hw, &eecd);
+        e1000_lower_ee_clk(hw, &eecd);
+
+        mask = mask >> 1;
+
+    } while (mask);
+
+    /* We leave the "DI" bit set to "0" when we leave this routine. */
+    eecd &= ~E1000_EECD_DI;
+    E1000_WRITE_REG(hw, EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static uint16_t
+e1000_shift_in_ee_bits(struct e1000_hw *hw,
+                       uint16_t count)
+{
+    uint32_t eecd;
+    uint32_t i;
+    uint16_t data;
+
+    /* In order to read a register from the EEPROM, we need to shift 'count'
+     * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+     * input to the EEPROM (setting the SK bit), and then reading the value of
+     * the "DO" bit.  During this "shifting in" process the "DI" bit should
+     * always be clear.
+     */
+
+    eecd = E1000_READ_REG(hw, EECD);
+
+    eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+    data = 0;
+
+    for (i = 0; i < count; i++) {
+        data = data << 1;
+        e1000_raise_ee_clk(hw, &eecd);
+
+        eecd = E1000_READ_REG(hw, EECD);
+
+        eecd &= ~(E1000_EECD_DI);
+        if (eecd & E1000_EECD_DO)
+            data |= 1;
+
+        e1000_lower_ee_clk(hw, &eecd);
+    }
+
+    return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static int32_t
+e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd, i=0;
+
+    DEBUGFUNC("e1000_acquire_eeprom");
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+    eecd = E1000_READ_REG(hw, EECD);
+
+    if (hw->mac_type != e1000_82573) {
+        /* Request EEPROM Access */
+        if (hw->mac_type > e1000_82544) {
+            eecd |= E1000_EECD_REQ;
+            E1000_WRITE_REG(hw, EECD, eecd);
+            eecd = E1000_READ_REG(hw, EECD);
+            while ((!(eecd & E1000_EECD_GNT)) &&
+                  (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+                i++;
+                usec_delay(5);
+                eecd = E1000_READ_REG(hw, EECD);
+            }
+            if (!(eecd & E1000_EECD_GNT)) {
+                eecd &= ~E1000_EECD_REQ;
+                E1000_WRITE_REG(hw, EECD, eecd);
+                DEBUGOUT("Could not acquire EEPROM grant\n");
+                e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+                return -E1000_ERR_EEPROM;
+            }
+        }
+    }
+
+    /* Setup EEPROM for Read/Write */
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        /* Clear SK and DI */
+        eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+        E1000_WRITE_REG(hw, EECD, eecd);
+
+        /* Set CS */
+        eecd |= E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Clear SK and CS */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        E1000_WRITE_REG(hw, EECD, eecd);
+        usec_delay(1);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_standby_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd;
+
+    eecd = E1000_READ_REG(hw, EECD);
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+
+        /* Clock high */
+        eecd |= E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+
+        /* Select EEPROM */
+        eecd |= E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+
+        /* Clock low */
+        eecd &= ~E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Toggle CS to flush commands */
+        eecd |= E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+        eecd &= ~E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+    }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_release_eeprom(struct e1000_hw *hw)
+{
+    uint32_t eecd;
+
+    DEBUGFUNC("e1000_release_eeprom");
+
+    eecd = E1000_READ_REG(hw, EECD);
+
+    if (hw->eeprom.type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_CS;  /* Pull CS high */
+        eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+        E1000_WRITE_REG(hw, EECD, eecd);
+
+        usec_delay(hw->eeprom.delay_usec);
+    } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+        /* cleanup eeprom */
+
+        /* CS on Microwire is active-high */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+        E1000_WRITE_REG(hw, EECD, eecd);
+
+        /* Rising edge of clock */
+        eecd |= E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(hw->eeprom.delay_usec);
+
+        /* Falling edge of clock */
+        eecd &= ~E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(hw->eeprom.delay_usec);
+    }
+
+    /* Stop requesting EEPROM access */
+    if (hw->mac_type > e1000_82544) {
+        eecd &= ~E1000_EECD_REQ;
+        E1000_WRITE_REG(hw, EECD, eecd);
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+    uint16_t retry_count = 0;
+    uint8_t spi_stat_reg;
+
+    DEBUGFUNC("e1000_spi_eeprom_ready");
+
+    /* Read "Status Register" repeatedly until the LSB is cleared.  The
+     * EEPROM will signal that the command has been completed by clearing
+     * bit 0 of the internal status register.  If it's not cleared within
+     * 5 milliseconds, then error out.
+     */
+    retry_count = 0;
+    do {
+        e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+                                hw->eeprom.opcode_bits);
+        spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8);
+        if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+            break;
+
+        usec_delay(5);
+        retry_count += 5;
+
+        e1000_standby_eeprom(hw);
+    } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+    /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+     * only 0-5mSec on 5V devices)
+     */
+    if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+        DEBUGOUT("SPI EEPROM Status error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom(struct e1000_hw *hw,
+                  uint16_t offset,
+                  uint16_t words,
+                  uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t i = 0;
+    int32_t ret_val;
+
+    DEBUGFUNC("e1000_read_eeprom");
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT("\"words\" parameter out of bounds\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* FLASH reads without acquiring the semaphore are safe */
+    if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
+        hw->eeprom.use_eerd == FALSE) {
+        switch (hw->mac_type) {
+        case e1000_80003es2lan:
+            break;
+        default:
+            /* Prepare the EEPROM for reading  */
+            if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+                return -E1000_ERR_EEPROM;
+            break;
+        }
+    }
+
+    if (eeprom->use_eerd == TRUE) {
+        ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
+        if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
+            (hw->mac_type != e1000_82573))
+            e1000_release_eeprom(hw);
+        return ret_val;
+    }
+
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+    if (eeprom->type == e1000_eeprom_spi) {
+        uint16_t word_in;
+        uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) {
+            e1000_release_eeprom(hw);
+            return -E1000_ERR_EEPROM;
+        }
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the READ command (opcode + addr)  */
+        e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+        e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits);
+
+        /* Read the data.  The address of the eeprom internally increments with
+         * each byte (spi) being read, saving on the overhead of eeprom setup
+         * and tear-down.  The address counter will roll over if reading beyond
+         * the size of the eeprom, thus allowing the entire memory to be read
+         * starting from any offset. */
+        for (i = 0; i < words; i++) {
+            word_in = e1000_shift_in_ee_bits(hw, 16);
+            data[i] = (word_in >> 8) | (word_in << 8);
+        }
+    } else if (eeprom->type == e1000_eeprom_microwire) {
+        for (i = 0; i < words; i++) {
+            /* Send the READ command (opcode + addr)  */
+            e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+                                    eeprom->opcode_bits);
+            e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i),
+                                    eeprom->address_bits);
+
+            /* Read the data.  For microwire, each word requires the overhead
+             * of eeprom setup and tear-down. */
+            data[i] = e1000_shift_in_ee_bits(hw, 16);
+            e1000_standby_eeprom(hw);
+        }
+    }
+
+    /* End this read operation */
+    e1000_release_eeprom(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom_eerd(struct e1000_hw *hw,
+                  uint16_t offset,
+                  uint16_t words,
+                  uint16_t *data)
+{
+    uint32_t i, eerd = 0;
+    int32_t error = 0;
+
+    for (i = 0; i < words; i++) {
+        eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
+                         E1000_EEPROM_RW_REG_START;
+
+        E1000_WRITE_REG(hw, EERD, eerd);
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+
+        if (error) {
+            break;
+        }
+        data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
+
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word from the EEPROM using the EEWR register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_eewr(struct e1000_hw *hw,
+                   uint16_t offset,
+                   uint16_t words,
+                   uint16_t *data)
+{
+    uint32_t    register_value = 0;
+    uint32_t    i              = 0;
+    int32_t     error          = 0;
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+
+    for (i = 0; i < words; i++) {
+        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+                         E1000_EEPROM_RW_REG_START;
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+        if (error) {
+            break;
+        }
+
+        E1000_WRITE_REG(hw, EEWR, register_value);
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+
+        if (error) {
+            break;
+        }
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+    return error;
+}
+
+/******************************************************************************
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+{
+    uint32_t attempts = 100000;
+    uint32_t i, reg = 0;
+    int32_t done = E1000_ERR_EEPROM;
+
+    for (i = 0; i < attempts; i++) {
+        if (eerd == E1000_EEPROM_POLL_READ)
+            reg = E1000_READ_REG(hw, EERD);
+        else
+            reg = E1000_READ_REG(hw, EEWR);
+
+        if (reg & E1000_EEPROM_RW_REG_DONE) {
+            done = E1000_SUCCESS;
+            break;
+        }
+        usec_delay(5);
+    }
+
+    return done;
+}
+
+/***************************************************************************
+* Description:     Determines if the onboard NVM is FLASH or EEPROM.
+*
+* hw - Struct containing variables accessed by shared code
+****************************************************************************/
+boolean_t
+e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+{
+    uint32_t eecd = 0;
+
+    DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+
+    if (hw->mac_type == e1000_ich8lan)
+        return FALSE;
+
+    if (hw->mac_type == e1000_82573) {
+        eecd = E1000_READ_REG(hw, EECD);
+
+        /* Isolate bits 15 & 16 */
+        eecd = ((eecd >> 15) & 0x03);
+
+        /* If both bits are set, device is Flash type */
+        if (eecd == 0x03) {
+            return FALSE;
+        }
+    }
+    return TRUE;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+int32_t
+e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+    uint16_t checksum = 0;
+    uint16_t i, eeprom_data;
+
+    DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+    if ((hw->mac_type == e1000_82573) &&
+        (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) {
+        /* Check bit 4 of word 10h.  If it is 0, firmware is done updating
+         * 10h-12h.  Checksum may need to be fixed. */
+        e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
+        if ((eeprom_data & 0x10) == 0) {
+            /* Read 0x23 and check bit 15.  This bit is a 1 when the checksum
+             * has already been fixed.  If the checksum is still wrong and this
+             * bit is a 1, we need to return bad checksum.  Otherwise, we need
+             * to set this bit to a 1 and update the checksum. */
+            e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
+            if ((eeprom_data & 0x8000) == 0) {
+                eeprom_data |= 0x8000;
+                e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
+                e1000_update_eeprom_checksum(hw);
+            }
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Drivers must allocate the shadow ram structure for the
+         * EEPROM checksum to be updated.  Otherwise, this bit as well
+         * as the checksum must both be set correctly for this
+         * validation to pass.
+         */
+        e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+        if ((eeprom_data & 0x40) == 0) {
+            eeprom_data |= 0x40;
+            e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+            e1000_update_eeprom_checksum(hw);
+        }
+    }
+
+    for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+
+    if (checksum == (uint16_t) EEPROM_SUM)
+        return E1000_SUCCESS;
+    else {
+        DEBUGOUT("EEPROM Checksum Invalid\n");
+        return -E1000_ERR_EEPROM;
+    }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+int32_t
+e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+    uint32_t ctrl_ext;
+    uint16_t checksum = 0;
+    uint16_t i, eeprom_data;
+
+    DEBUGFUNC("e1000_update_eeprom_checksum");
+
+    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+    checksum = (uint16_t) EEPROM_SUM - checksum;
+    if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+        DEBUGOUT("EEPROM Write Error\n");
+        return -E1000_ERR_EEPROM;
+    } else if (hw->eeprom.type == e1000_eeprom_flash) {
+        e1000_commit_shadow_ram(hw);
+    } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+        e1000_commit_shadow_ram(hw);
+        /* Reload the EEPROM, or else modifications will not appear
+         * until after next adapter reset. */
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+        msec_delay(10);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom(struct e1000_hw *hw,
+                   uint16_t offset,
+                   uint16_t words,
+                   uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    int32_t status = 0;
+
+    DEBUGFUNC("e1000_write_eeprom");
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT("\"words\" parameter out of bounds\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* 82573 writes only through eewr */
+    if (eeprom->use_eewr == TRUE)
+        return e1000_write_eeprom_eewr(hw, offset, words, data);
+
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_write_eeprom_ich8(hw, offset, words, data);
+
+    /* Prepare the EEPROM for writing  */
+    if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+        return -E1000_ERR_EEPROM;
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        status = e1000_write_eeprom_microwire(hw, offset, words, data);
+    } else {
+        status = e1000_write_eeprom_spi(hw, offset, words, data);
+        msec_delay(10);
+    }
+
+    /* Done with writing */
+    e1000_release_eeprom(hw);
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_spi(struct e1000_hw *hw,
+                       uint16_t offset,
+                       uint16_t words,
+                       uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint16_t widx = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_spi");
+
+    while (widx < words) {
+        uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+        e1000_standby_eeprom(hw);
+
+        /*  Send the WRITE ENABLE command (8 bit opcode )  */
+        e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+                                    eeprom->opcode_bits);
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the Write command (8-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2),
+                                eeprom->address_bits);
+
+        /* Send the data */
+
+        /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+        while (widx < words) {
+            uint16_t word_out = data[widx];
+            word_out = (word_out >> 8) | (word_out << 8);
+            e1000_shift_out_ee_bits(hw, word_out, 16);
+            widx++;
+
+            /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+             * operation, while the smaller eeproms are capable of an 8-byte
+             * PAGE WRITE operation.  Break the inner loop to pass new address
+             */
+            if ((((offset + widx)*2) % eeprom->page_size) == 0) {
+                e1000_standby_eeprom(hw);
+                break;
+            }
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_microwire(struct e1000_hw *hw,
+                             uint16_t offset,
+                             uint16_t words,
+                             uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd;
+    uint16_t words_written = 0;
+    uint16_t i = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_microwire");
+
+    /* Send the write enable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 11).  It's less work to include
+     * the 11 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This puts the
+     * EEPROM into write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+                            (uint16_t)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+
+    /* Prepare the EEPROM */
+    e1000_standby_eeprom(hw);
+
+    while (words_written < words) {
+        /* Send the Write command (3-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+                                eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written),
+                                eeprom->address_bits);
+
+        /* Send the data */
+        e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+        /* Toggle the CS line.  This in effect tells the EEPROM to execute
+         * the previous command.
+         */
+        e1000_standby_eeprom(hw);
+
+        /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
+         * signal that the command has been completed by raising the DO signal.
+         * If DO does not go high in 10 milliseconds, then error out.
+         */
+        for (i = 0; i < 200; i++) {
+            eecd = E1000_READ_REG(hw, EECD);
+            if (eecd & E1000_EECD_DO) break;
+            usec_delay(50);
+        }
+        if (i == 200) {
+            DEBUGOUT("EEPROM Write did not complete\n");
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* Recover from write */
+        e1000_standby_eeprom(hw);
+
+        words_written++;
+    }
+
+    /* Send the write disable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 10).  It's less work to include
+     * the 10 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This takes the
+     * EEPROM out of write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+                            (uint16_t)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Flushes the cached eeprom to NVM. This is done by saving the modified values
+ * in the eeprom cache and the non modified values in the currently active bank
+ * to the new bank.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_commit_shadow_ram(struct e1000_hw *hw)
+{
+    uint32_t attempts = 100000;
+    uint32_t eecd = 0;
+    uint32_t flop = 0;
+    uint32_t i = 0;
+    int32_t error = E1000_SUCCESS;
+    uint32_t old_bank_offset = 0;
+    uint32_t new_bank_offset = 0;
+    uint32_t sector_retries = 0;
+    uint8_t low_byte = 0;
+    uint8_t high_byte = 0;
+    uint8_t temp_byte = 0;
+    boolean_t sector_write_failed = FALSE;
+
+    if (hw->mac_type == e1000_82573) {
+        /* The flop register will be used to determine if flash type is STM */
+        flop = E1000_READ_REG(hw, FLOP);
+        for (i=0; i < attempts; i++) {
+            eecd = E1000_READ_REG(hw, EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            usec_delay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* If STM opcode located in bits 15:8 of flop, reset firmware */
+        if ((flop & 0xFF00) == E1000_STM_OPCODE) {
+            E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
+        }
+
+        /* Perform the flash update */
+        E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
+
+        for (i=0; i < attempts; i++) {
+            eecd = E1000_READ_REG(hw, EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            usec_delay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+        /* We're writing to the opposite bank so if we're on bank 1,
+         * write to bank 0 etc.  We also need to erase the segment that
+         * is going to be written */
+        if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) {
+            new_bank_offset = hw->flash_bank_size * 2;
+            old_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 1);
+        } else {
+            old_bank_offset = hw->flash_bank_size * 2;
+            new_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 0);
+        }
+
+        do {
+            sector_write_failed = FALSE;
+            /* Loop for every byte in the shadow RAM,
+             * which is in units of words. */
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                /* Determine whether to write the value stored
+                 * in the other NVM bank or a modified value stored
+                 * in the shadow RAM */
+                if (hw->eeprom_shadow_ram[i].modified == TRUE) {
+                    low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+                                         &temp_byte);
+                    usec_delay(100);
+                    error = e1000_verify_write_ich8_byte(hw,
+                                                 (i << 1) + new_bank_offset,
+                                                 low_byte);
+                    if (error != E1000_SUCCESS)
+                        sector_write_failed = TRUE;
+                    high_byte =
+                        (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+                                         &temp_byte);
+                    usec_delay(100);
+                } else {
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+                                         &low_byte);
+                    usec_delay(100);
+                    error = e1000_verify_write_ich8_byte(hw,
+                                 (i << 1) + new_bank_offset, low_byte);
+                    if (error != E1000_SUCCESS)
+                        sector_write_failed = TRUE;
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+                                         &high_byte);
+                }
+
+                /* If the word is 0x13, then make sure the signature bits
+                 * (15:14) are 11b until the commit has completed.
+                 * This will allow us to write 10b which indicates the
+                 * signature is valid.  We want to do this after the write
+                 * has completed so that we don't mark the segment valid
+                 * while the write is still in progress */
+                if (i == E1000_ICH8_NVM_SIG_WORD)
+                    high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+
+                error = e1000_verify_write_ich8_byte(hw,
+                             (i << 1) + new_bank_offset + 1, high_byte);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = TRUE;
+
+                if (sector_write_failed == FALSE) {
+                    /* Clear the now not used entry in the cache */
+                    hw->eeprom_shadow_ram[i].modified = FALSE;
+                    hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+                }
+            }
+
+            /* Don't bother writing the segment valid bits if sector
+             * programming failed. */
+            if (sector_write_failed == FALSE) {
+                /* Finally validate the new segment by setting bit 15:14
+                 * to 10b in word 0x13 , this can be done without an
+                 * erase as well since these bits are 11 to start with
+                 * and we need to change bit 14 to 0b */
+                e1000_read_ich8_byte(hw,
+                    E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                    &high_byte);
+                high_byte &= 0xBF;
+                error = e1000_verify_write_ich8_byte(hw,
+                            E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                            high_byte);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = TRUE;
+
+                /* And invalidate the previously valid segment by setting
+                 * its signature word (0x13) high_byte to 0b. This can be
+                 * done without an erase because flash erase sets all bits
+                 * to 1's. We can write 1's to 0's without an erase */
+                error = e1000_verify_write_ich8_byte(hw,
+                            E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset,
+                            0);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = TRUE;
+            }
+        } while (++sector_retries < 10 && sector_write_failed == TRUE);
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads the adapter's part number from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ * part_num - Adapter's part number
+ *****************************************************************************/
+int32_t
+e1000_read_part_num(struct e1000_hw *hw,
+                    uint32_t *part_num)
+{
+    uint16_t offset = EEPROM_PBA_BYTE_1;
+    uint16_t eeprom_data;
+
+    DEBUGFUNC("e1000_read_part_num");
+
+    /* Get word 0 from EEPROM */
+    if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+    /* Save word 0 in upper half of part_num */
+    *part_num = (uint32_t) (eeprom_data << 16);
+
+    /* Get word 1 from EEPROM */
+    if (e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+    /* Save word 1 in lower half of part_num */
+    *part_num |= eeprom_data;
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_read_mac_addr(struct e1000_hw * hw)
+{
+    uint16_t offset;
+    uint16_t eeprom_data, i;
+
+    DEBUGFUNC("e1000_read_mac_addr");
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+        offset = i >> 1;
+        if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
+        hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_82546:
+    case e1000_82546_rev_3:
+    case e1000_82571:
+    case e1000_80003es2lan:
+        if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+            hw->perm_mac_addr[5] ^= 0x01;
+        break;
+    }
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+        hw->mac_addr[i] = hw->perm_mac_addr[i];
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+void
+e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+    uint32_t i;
+    uint32_t rar_num;
+
+    DEBUGFUNC("e1000_init_rx_addrs");
+
+    /* Setup the receive address. */
+    DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+    e1000_rar_set(hw, hw->mac_addr, 0);
+
+    rar_num = E1000_RAR_ENTRIES;
+
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+        rar_num -= 1;
+    if (hw->mac_type == e1000_ich8lan)
+        rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
+    /* Zero out the other 15 receive addresses. */
+    DEBUGOUT("Clearing RAR[1-15]\n");
+    for (i = 1; i < rar_num; i++) {
+        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+/******************************************************************************
+ * Updates the MAC's list of multicast addresses.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr_list - the list of new multicast addresses
+ * mc_addr_count - number of addresses
+ * pad - number of bytes between addresses in the list
+ * rar_used_count - offset where to start adding mc addresses into the RAR's
+ *
+ * The given list replaces any existing list. Clears the last 15 receive
+ * address registers and the multicast table. Uses receive address registers
+ * for the first 15 multicast addresses, and hashes the rest into the
+ * multicast table.
+ *****************************************************************************/
+void
+e1000_mc_addr_list_update(struct e1000_hw *hw,
+                          uint8_t *mc_addr_list,
+                          uint32_t mc_addr_count,
+                          uint32_t pad,
+                          uint32_t rar_used_count)
+{
+    uint32_t hash_value;
+    uint32_t i;
+    uint32_t num_rar_entry;
+    uint32_t num_mta_entry;
+
+    DEBUGFUNC("e1000_mc_addr_list_update");
+
+    /* Set the new number of MC addresses that we are being requested to use. */
+    hw->num_mc_addrs = mc_addr_count;
+
+    /* Clear RAR[1-15] */
+    DEBUGOUT(" Clearing RAR[1-15]\n");
+    num_rar_entry = E1000_RAR_ENTRIES;
+    if (hw->mac_type == e1000_ich8lan)
+        num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN;
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+        num_rar_entry -= 1;
+
+    for (i = rar_used_count; i < num_rar_entry; i++) {
+        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    /* Clear the MTA */
+    DEBUGOUT(" Clearing MTA\n");
+    num_mta_entry = E1000_NUM_MTA_REGISTERS;
+    if (hw->mac_type == e1000_ich8lan)
+        num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
+    for (i = 0; i < num_mta_entry; i++) {
+        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    /* Add the new addresses */
+    for (i = 0; i < mc_addr_count; i++) {
+        DEBUGOUT(" Adding the multicast addresses:\n");
+        DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 1],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 2],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 3],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 4],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 5]);
+
+        hash_value = e1000_hash_mc_addr(hw,
+                                        mc_addr_list +
+                                        (i * (ETH_LENGTH_OF_ADDRESS + pad)));
+
+        DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+
+        /* Place this multicast address in the RAR if there is room, *
+         * else put it in the MTA
+         */
+        if (rar_used_count < num_rar_entry) {
+            e1000_rar_set(hw,
+                          mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)),
+                          rar_used_count);
+            rar_used_count++;
+        } else {
+            e1000_mta_set(hw, hash_value);
+        }
+    }
+    DEBUGOUT("MC Update Complete\n");
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+uint32_t
+e1000_hash_mc_addr(struct e1000_hw *hw,
+                   uint8_t *mc_addr)
+{
+    uint32_t hash_value = 0;
+
+    /* The portion of the address that is used for the hash table is
+     * determined by the mc_filter_type setting.
+     */
+    switch (hw->mc_filter_type) {
+    /* [0] [1] [2] [3] [4] [5]
+     * 01  AA  00  12  34  56
+     * LSB                 MSB
+     */
+    case 0:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [47:38] i.e. 0x158 for above example address */
+            hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2));
+        } else {
+            /* [47:36] i.e. 0x563 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+        }
+        break;
+    case 1:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [46:37] i.e. 0x2B1 for above example address */
+            hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3));
+        } else {
+            /* [46:35] i.e. 0xAC6 for above example address */
+            hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+        }
+        break;
+    case 2:
+        if (hw->mac_type == e1000_ich8lan) {
+            /*[45:36] i.e. 0x163 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+        } else {
+            /* [45:34] i.e. 0x5D8 for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+        }
+        break;
+    case 3:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [43:34] i.e. 0x18D for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+        } else {
+            /* [43:32] i.e. 0x634 for above example address */
+            hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+        }
+        break;
+    }
+
+    hash_value &= 0xFFF;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_value &= 0x3FF;
+
+    return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void
+e1000_mta_set(struct e1000_hw *hw,
+              uint32_t hash_value)
+{
+    uint32_t hash_bit, hash_reg;
+    uint32_t mta;
+    uint32_t temp;
+
+    /* The MTA is a register array of 128 32-bit registers.
+     * It is treated like an array of 4096 bits.  We want to set
+     * bit BitArray[hash_value]. So we figure out what register
+     * the bit is in, read it, OR in the new bit, then write
+     * back the new value.  The register is determined by the
+     * upper 7 bits of the hash value and the bit within that
+     * register are determined by the lower 5 bits of the value.
+     */
+    hash_reg = (hash_value >> 5) & 0x7F;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_reg &= 0x1F;
+    hash_bit = hash_value & 0x1F;
+
+    mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+    mta |= (1 << hash_bit);
+
+    /* If we are on an 82544 and we are trying to write an odd offset
+     * in the MTA, save off the previous entry before writing and
+     * restore the old value after writing.
+     */
+    if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+        E1000_WRITE_FLUSH(hw);
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void
+e1000_rar_set(struct e1000_hw *hw,
+              uint8_t *addr,
+              uint32_t index)
+{
+    uint32_t rar_low, rar_high;
+
+    /* HW expects these in little endian so we reverse the byte order
+     * from network order (big endian) to little endian
+     */
+    rar_low = ((uint32_t) addr[0] |
+               ((uint32_t) addr[1] << 8) |
+               ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24));
+    rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8));
+
+    /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+     * unit hang.
+     *
+     * Description:
+     * If there are any Rx frames queued up or otherwise present in the HW
+     * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+     * hang.  To work around this issue, we have to disable receives and
+     * flush out all Rx frames before we enable RSS. To do so, we modify we
+     * redirect all Rx traffic to manageability and then reset the HW.
+     * This flushes away Rx frames, and (since the redirections to
+     * manageability persists across resets) keeps new ones from coming in
+     * while we work.  Then, we clear the Address Valid AV bit for all MAC
+     * addresses and undo the re-direction to manageability.
+     * Now, frames are coming in again, but the MAC won't accept them, so
+     * far so good.  We now proceed to initialize RSS (if necessary) and
+     * configure the Rx unit.  Last, we re-enable the AV bits and continue
+     * on our merry way.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_80003es2lan:
+        if (hw->leave_av_bit_off == TRUE)
+            break;
+        fallthrough;
+    default:
+        /* Indicate to hardware the Address is Valid. */
+        rar_high |= E1000_RAH_AV;
+        break;
+    }
+
+    E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+    E1000_WRITE_FLUSH(hw);
+    E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+    E1000_WRITE_FLUSH(hw);
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void
+e1000_write_vfta(struct e1000_hw *hw,
+                 uint32_t offset,
+                 uint32_t value)
+{
+    uint32_t temp;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+        E1000_WRITE_FLUSH(hw);
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_clear_vfta(struct e1000_hw *hw)
+{
+    uint32_t offset;
+    uint32_t vfta_value = 0;
+    uint32_t vfta_offset = 0;
+    uint32_t vfta_bit_in_reg = 0;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if (hw->mac_type == e1000_82573) {
+        if (hw->mng_cookie.vlan_id != 0) {
+            /* The VFTA is a 4096b bit-field, each identifying a single VLAN
+             * ID.  The following operations determine which 32b entry
+             * (i.e. offset) into the array we want to set the VLAN ID
+             * (i.e. bit) of the manageability unit. */
+            vfta_offset = (hw->mng_cookie.vlan_id >>
+                           E1000_VFTA_ENTRY_SHIFT) &
+                          E1000_VFTA_ENTRY_MASK;
+            vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+                                    E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+        }
+    }
+    for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+        /* If the offset we want to clear is the same offset of the
+         * manageability VLAN ID, then clear all bits except that of the
+         * manageability unit */
+        vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+int32_t
+e1000_id_led_init(struct e1000_hw * hw)
+{
+    uint32_t ledctl;
+    const uint32_t ledctl_mask = 0x000000FF;
+    const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+    const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+    uint16_t eeprom_data, i, temp;
+    const uint16_t led_mask = 0x0F;
+
+    DEBUGFUNC("e1000_id_led_init");
+
+    if (hw->mac_type < e1000_82540) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+
+    ledctl = E1000_READ_REG(hw, LEDCTL);
+    hw->ledctl_default = ledctl;
+    hw->ledctl_mode1 = hw->ledctl_default;
+    hw->ledctl_mode2 = hw->ledctl_default;
+
+    if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    if ((hw->mac_type == e1000_82573) &&
+        (eeprom_data == ID_LED_RESERVED_82573))
+        eeprom_data = ID_LED_DEFAULT_82573;
+    else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+            (eeprom_data == ID_LED_RESERVED_FFFF)) {
+        if (hw->mac_type == e1000_ich8lan)
+            eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+        else
+            eeprom_data = ID_LED_DEFAULT;
+    }
+    for (i = 0; i < 4; i++) {
+        temp = (eeprom_data >> (i << 2)) & led_mask;
+        switch (temp) {
+        case ID_LED_ON1_DEF2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_ON1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_OFF1_DEF2:
+        case ID_LED_OFF1_ON2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+        switch (temp) {
+        case ID_LED_DEF1_ON2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_OFF1_ON2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_DEF1_OFF2:
+        case ID_LED_ON1_OFF2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_setup_led(struct e1000_hw *hw)
+{
+    uint32_t ledctl;
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_setup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No setup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn off PHY Smart Power Down (if enabled) */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                     &hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      (uint16_t)(hw->phy_spd_default &
+                                      ~IGP01E1000_GMII_SPD));
+        if (ret_val)
+            return ret_val;
+        fallthrough;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            ledctl = E1000_READ_REG(hw, LEDCTL);
+            /* Save current LEDCTL settings */
+            hw->ledctl_default = ledctl;
+            /* Turn off LED0 */
+            ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                        E1000_LEDCTL_LED0_BLINK |
+                        E1000_LEDCTL_LED0_MODE_MASK);
+            ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                       E1000_LEDCTL_LED0_MODE_SHIFT);
+            E1000_WRITE_REG(hw, LEDCTL, ledctl);
+        } else if (hw->media_type == e1000_media_type_copper)
+            E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_blink_led_start(struct e1000_hw *hw)
+{
+    int16_t  i;
+    uint32_t ledctl_blink = 0;
+
+    DEBUGFUNC("e1000_id_led_blink_on");
+
+    if (hw->mac_type < e1000_82571) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+    if (hw->media_type == e1000_media_type_fiber) {
+        /* always blink LED0 for PCI-E fiber */
+        ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+    } else {
+        /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+        ledctl_blink = hw->ledctl_mode2;
+        for (i=0; i < 4; i++)
+            if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+                E1000_LEDCTL_MODE_LED_ON)
+                ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+    }
+
+    E1000_WRITE_REG(hw, LEDCTL, ledctl_blink);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_cleanup_led(struct e1000_hw *hw)
+{
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_cleanup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No cleanup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn on PHY Smart Power Down (if previously enabled) */
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        fallthrough;
+    default:
+        if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+            break;
+        }
+        /* Restore LEDCTL settings */
+        E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_led_on(struct e1000_hw *hw)
+{
+    uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+
+    DEBUGFUNC("e1000_led_on");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Set SW Defineable Pin 0 to turn on the LED */
+        ctrl |= E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn on the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_led_off(struct e1000_hw *hw)
+{
+    uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+
+    DEBUGFUNC("e1000_led_off");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Clear SW Defineable Pin 0 to turn off the LED */
+        ctrl &= ~E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn off the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+    volatile uint32_t temp;
+
+    temp = E1000_READ_REG(hw, CRCERRS);
+    temp = E1000_READ_REG(hw, SYMERRS);
+    temp = E1000_READ_REG(hw, MPC);
+    temp = E1000_READ_REG(hw, SCC);
+    temp = E1000_READ_REG(hw, ECOL);
+    temp = E1000_READ_REG(hw, MCC);
+    temp = E1000_READ_REG(hw, LATECOL);
+    temp = E1000_READ_REG(hw, COLC);
+    temp = E1000_READ_REG(hw, DC);
+    temp = E1000_READ_REG(hw, SEC);
+    temp = E1000_READ_REG(hw, RLEC);
+    temp = E1000_READ_REG(hw, XONRXC);
+    temp = E1000_READ_REG(hw, XONTXC);
+    temp = E1000_READ_REG(hw, XOFFRXC);
+    temp = E1000_READ_REG(hw, XOFFTXC);
+    temp = E1000_READ_REG(hw, FCRUC);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = E1000_READ_REG(hw, PRC64);
+    temp = E1000_READ_REG(hw, PRC127);
+    temp = E1000_READ_REG(hw, PRC255);
+    temp = E1000_READ_REG(hw, PRC511);
+    temp = E1000_READ_REG(hw, PRC1023);
+    temp = E1000_READ_REG(hw, PRC1522);
+    }
+
+    temp = E1000_READ_REG(hw, GPRC);
+    temp = E1000_READ_REG(hw, BPRC);
+    temp = E1000_READ_REG(hw, MPRC);
+    temp = E1000_READ_REG(hw, GPTC);
+    temp = E1000_READ_REG(hw, GORCL);
+    temp = E1000_READ_REG(hw, GORCH);
+    temp = E1000_READ_REG(hw, GOTCL);
+    temp = E1000_READ_REG(hw, GOTCH);
+    temp = E1000_READ_REG(hw, RNBC);
+    temp = E1000_READ_REG(hw, RUC);
+    temp = E1000_READ_REG(hw, RFC);
+    temp = E1000_READ_REG(hw, ROC);
+    temp = E1000_READ_REG(hw, RJC);
+    temp = E1000_READ_REG(hw, TORL);
+    temp = E1000_READ_REG(hw, TORH);
+    temp = E1000_READ_REG(hw, TOTL);
+    temp = E1000_READ_REG(hw, TOTH);
+    temp = E1000_READ_REG(hw, TPR);
+    temp = E1000_READ_REG(hw, TPT);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = E1000_READ_REG(hw, PTC64);
+    temp = E1000_READ_REG(hw, PTC127);
+    temp = E1000_READ_REG(hw, PTC255);
+    temp = E1000_READ_REG(hw, PTC511);
+    temp = E1000_READ_REG(hw, PTC1023);
+    temp = E1000_READ_REG(hw, PTC1522);
+    }
+
+    temp = E1000_READ_REG(hw, MPTC);
+    temp = E1000_READ_REG(hw, BPTC);
+
+    if (hw->mac_type < e1000_82543) return;
+
+    temp = E1000_READ_REG(hw, ALGNERRC);
+    temp = E1000_READ_REG(hw, RXERRC);
+    temp = E1000_READ_REG(hw, TNCRS);
+    temp = E1000_READ_REG(hw, CEXTERR);
+    temp = E1000_READ_REG(hw, TSCTC);
+    temp = E1000_READ_REG(hw, TSCTFC);
+
+    if (hw->mac_type <= e1000_82544) return;
+
+    temp = E1000_READ_REG(hw, MGTPRC);
+    temp = E1000_READ_REG(hw, MGTPDC);
+    temp = E1000_READ_REG(hw, MGTPTC);
+
+    if (hw->mac_type <= e1000_82547_rev_2) return;
+
+    temp = E1000_READ_REG(hw, IAC);
+    temp = E1000_READ_REG(hw, ICRXOC);
+
+    if (hw->mac_type == e1000_ich8lan) return;
+
+    temp = E1000_READ_REG(hw, ICRXPTC);
+    temp = E1000_READ_REG(hw, ICRXATC);
+    temp = E1000_READ_REG(hw, ICTXPTC);
+    temp = E1000_READ_REG(hw, ICTXATC);
+    temp = E1000_READ_REG(hw, ICTXQEC);
+    temp = E1000_READ_REG(hw, ICTXQMTC);
+    temp = E1000_READ_REG(hw, ICRXDMTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to TRUE. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void
+e1000_reset_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_reset_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if (!hw->ifs_params_forced) {
+            hw->current_ifs_val = 0;
+            hw->ifs_min_val = IFS_MIN;
+            hw->ifs_max_val = IFS_MAX;
+            hw->ifs_step_size = IFS_STEP;
+            hw->ifs_ratio = IFS_RATIO;
+        }
+        hw->in_ifs_mode = FALSE;
+        E1000_WRITE_REG(hw, AIT, 0);
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void
+e1000_update_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_update_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+            if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+                hw->in_ifs_mode = TRUE;
+                if (hw->current_ifs_val < hw->ifs_max_val) {
+                    if (hw->current_ifs_val == 0)
+                        hw->current_ifs_val = hw->ifs_min_val;
+                    else
+                        hw->current_ifs_val += hw->ifs_step_size;
+                    E1000_WRITE_REG(hw, AIT, hw->current_ifs_val);
+                }
+            }
+        } else {
+            if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+                hw->current_ifs_val = 0;
+                hw->in_ifs_mode = FALSE;
+                E1000_WRITE_REG(hw, AIT, 0);
+            }
+        }
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void
+e1000_tbi_adjust_stats(struct e1000_hw *hw,
+                       struct e1000_hw_stats *stats,
+                       uint32_t frame_len,
+                       uint8_t *mac_addr)
+{
+    uint64_t carry_bit;
+
+    /* First adjust the frame length. */
+    frame_len--;
+    /* We need to adjust the statistics counters, since the hardware
+     * counters overcount this packet as a CRC error and undercount
+     * the packet as a good packet
+     */
+    /* This packet should not be counted as a CRC error.    */
+    stats->crcerrs--;
+    /* This packet does count as a Good Packet Received.    */
+    stats->gprc++;
+
+    /* Adjust the Good Octets received counters             */
+    carry_bit = 0x80000000 & stats->gorcl;
+    stats->gorcl += frame_len;
+    /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+     * Received Count) was one before the addition,
+     * AND it is zero after, then we lost the carry out,
+     * need to add one to Gorch (Good Octets Received Count High).
+     * This could be simplified if all environments supported
+     * 64-bit integers.
+     */
+    if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+        stats->gorch++;
+    /* Is this a broadcast or multicast?  Check broadcast first,
+     * since the test for a multicast frame will test positive on
+     * a broadcast frame.
+     */
+    if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
+        /* Broadcast packet */
+        stats->bprc++;
+    else if (*mac_addr & 0x01)
+        /* Multicast packet */
+        stats->mprc++;
+
+    if (frame_len == hw->max_frame_size) {
+        /* In this case, the hardware has overcounted the number of
+         * oversize frames.
+         */
+        if (stats->roc > 0)
+            stats->roc--;
+    }
+
+    /* Adjust the bin counters when the extra byte put the frame in the
+     * wrong bin. Remember that the frame_len was adjusted above.
+     */
+    if (frame_len == 64) {
+        stats->prc64++;
+        stats->prc127--;
+    } else if (frame_len == 127) {
+        stats->prc127++;
+        stats->prc255--;
+    } else if (frame_len == 255) {
+        stats->prc255++;
+        stats->prc511--;
+    } else if (frame_len == 511) {
+        stats->prc511++;
+        stats->prc1023--;
+    } else if (frame_len == 1023) {
+        stats->prc1023++;
+        stats->prc1522--;
+    } else if (frame_len == 1522) {
+        stats->prc1522++;
+    }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_get_bus_info(struct e1000_hw *hw)
+{
+    uint32_t status;
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+        hw->bus_type = e1000_bus_type_unknown;
+        hw->bus_speed = e1000_bus_speed_unknown;
+        hw->bus_width = e1000_bus_width_unknown;
+        break;
+    case e1000_82572:
+    case e1000_82573:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        hw->bus_width = e1000_bus_width_pciex_1;
+        break;
+    case e1000_82571:
+    case e1000_ich8lan:
+    case e1000_80003es2lan:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        hw->bus_width = e1000_bus_width_pciex_4;
+        break;
+    default:
+        status = E1000_READ_REG(hw, STATUS);
+        hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+                       e1000_bus_type_pcix : e1000_bus_type_pci;
+
+        if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+            hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_120;
+        } else if (hw->bus_type == e1000_bus_type_pci) {
+            hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_33;
+        } else {
+            switch (status & E1000_STATUS_PCIX_SPEED) {
+            case E1000_STATUS_PCIX_SPEED_66:
+                hw->bus_speed = e1000_bus_speed_66;
+                break;
+            case E1000_STATUS_PCIX_SPEED_100:
+                hw->bus_speed = e1000_bus_speed_100;
+                break;
+            case E1000_STATUS_PCIX_SPEED_133:
+                hw->bus_speed = e1000_bus_speed_133;
+                break;
+            default:
+                hw->bus_speed = e1000_bus_speed_reserved;
+                break;
+            }
+        }
+        hw->bus_width = (status & E1000_STATUS_BUS64) ?
+                        e1000_bus_width_64 : e1000_bus_width_32;
+        break;
+    }
+}
+/******************************************************************************
+ * Reads a value from one of the devices registers using port I/O (as opposed
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to read from
+ *****************************************************************************/
+uint32_t
+e1000_read_reg_io(struct e1000_hw *hw,
+                  uint32_t offset)
+{
+    unsigned long io_addr = hw->io_base;
+    unsigned long io_data = hw->io_base + 4;
+
+    e1000_io_write(hw, io_addr, offset);
+    return e1000_io_read(hw, io_data);
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+void
+e1000_write_reg_io(struct e1000_hw *hw,
+                   uint32_t offset,
+                   uint32_t value)
+{
+    unsigned long io_addr = hw->io_base;
+    unsigned long io_data = hw->io_base + 4;
+
+    e1000_io_write(hw, io_addr, offset);
+    e1000_io_write(hw, io_data, value);
+}
+
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+int32_t
+e1000_get_cable_length(struct e1000_hw *hw,
+                       uint16_t *min_length,
+                       uint16_t *max_length)
+{
+    int32_t ret_val;
+    uint16_t agc_value = 0;
+    uint16_t i, phy_data;
+    uint16_t cable_length;
+
+    DEBUGFUNC("e1000_get_cable_length");
+
+    *min_length = *max_length = 0;
+
+    /* Use old method for Phy older than IGP */
+    if (hw->phy_type == e1000_phy_m88) {
+
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+        /* Convert the enum value to ranged values */
+        switch (cable_length) {
+        case e1000_cable_length_50:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_50;
+            break;
+        case e1000_cable_length_50_80:
+            *min_length = e1000_igp_cable_length_50;
+            *max_length = e1000_igp_cable_length_80;
+            break;
+        case e1000_cable_length_80_110:
+            *min_length = e1000_igp_cable_length_80;
+            *max_length = e1000_igp_cable_length_110;
+            break;
+        case e1000_cable_length_110_140:
+            *min_length = e1000_igp_cable_length_110;
+            *max_length = e1000_igp_cable_length_140;
+            break;
+        case e1000_cable_length_140:
+            *min_length = e1000_igp_cable_length_140;
+            *max_length = e1000_igp_cable_length_170;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+        switch (cable_length) {
+        case e1000_gg_cable_length_60:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_60;
+            break;
+        case e1000_gg_cable_length_60_115:
+            *min_length = e1000_igp_cable_length_60;
+            *max_length = e1000_igp_cable_length_115;
+            break;
+        case e1000_gg_cable_length_115_150:
+            *min_length = e1000_igp_cable_length_115;
+            *max_length = e1000_igp_cable_length_150;
+            break;
+        case e1000_gg_cable_length_150:
+            *min_length = e1000_igp_cable_length_150;
+            *max_length = e1000_igp_cable_length_180;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+        uint16_t cur_agc_value;
+        uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+        uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP01E1000_PHY_AGC_A,
+                                                          IGP01E1000_PHY_AGC_B,
+                                                          IGP01E1000_PHY_AGC_C,
+                                                          IGP01E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+            /* Value bound check. */
+            if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+                (cur_agc_value == 0))
+                return -E1000_ERR_PHY;
+
+            agc_value += cur_agc_value;
+
+            /* Update minimal AGC value. */
+            if (min_agc_value > cur_agc_value)
+                min_agc_value = cur_agc_value;
+        }
+
+        /* Remove the minimal AGC result for length < 50m */
+        if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+            agc_value -= min_agc_value;
+
+            /* Get the average length of the remaining 3 channels */
+            agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+        } else {
+            /* Get the average length of all the 4 channels. */
+            agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+        }
+
+        /* Set the range of the calculated length. */
+        *min_length = ((e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) > 0) ?
+                       (e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) : 0;
+        *max_length = e1000_igp_cable_length_table[agc_value] +
+                      IGP01E1000_AGC_RANGE;
+    } else if (hw->phy_type == e1000_phy_igp_2 ||
+               hw->phy_type == e1000_phy_igp_3) {
+        uint16_t cur_agc_index, max_agc_index = 0;
+        uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+        uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP02E1000_PHY_AGC_A,
+                                                          IGP02E1000_PHY_AGC_B,
+                                                          IGP02E1000_PHY_AGC_C,
+                                                          IGP02E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Getting bits 15:9, which represent the combination of course and
+             * fine gain values.  The result is a number that can be put into
+             * the lookup table to obtain the approximate cable length. */
+            cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                            IGP02E1000_AGC_LENGTH_MASK;
+
+            /* Array index bound check. */
+            if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+                (cur_agc_index == 0))
+                return -E1000_ERR_PHY;
+
+            /* Remove min & max AGC values from calculation. */
+            if (e1000_igp_2_cable_length_table[min_agc_index] >
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                min_agc_index = cur_agc_index;
+            if (e1000_igp_2_cable_length_table[max_agc_index] <
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                max_agc_index = cur_agc_index;
+
+            agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+        }
+
+        agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+                      e1000_igp_2_cable_length_table[max_agc_index]);
+        agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+        /* Calculate cable length with the error range of +/- 10 meters. */
+        *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                       (agc_value - IGP02E1000_AGC_RANGE) : 0;
+        *max_length = agc_value + IGP02E1000_AGC_RANGE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ *                               1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function simply reads the polarity bit in the
+ * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0.  If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+int32_t
+e1000_check_polarity(struct e1000_hw *hw,
+                     uint16_t *polarity)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_check_polarity");
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        /* return the Polarity bit in the Status register. */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
+                    M88E1000_PSSR_REV_POLARITY_SHIFT;
+    } else if (hw->phy_type == e1000_phy_igp ||
+              hw->phy_type == e1000_phy_igp_3 ||
+              hw->phy_type == e1000_phy_igp_2) {
+        /* Read the Status register to check the speed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+         * find the polarity status */
+        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+            /* Read the GIG initialization PCS register (0x00B4) */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Check the polarity bits */
+            *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? 1 : 0;
+        } else {
+            /* For 10 Mbps, read the polarity bit in the status register. (for
+             * 100 Mbps this bit is always 0) */
+            *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED;
+        }
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >>
+                           IFE_PESC_POLARITY_REVERSED_SHIFT;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ *                                1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register.  In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+int32_t
+e1000_check_downshift(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_check_downshift");
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+    } else if ((hw->phy_type == e1000_phy_m88) ||
+               (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+                               M88E1000_PSSR_DOWNSHIFT_SHIFT;
+    } else if (hw->phy_type == e1000_phy_ife) {
+        /* e1000_phy_ife supports 10/100 speed only */
+        hw->speed_downgraded = FALSE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+                                   boolean_t link_up)
+{
+    int32_t ret_val;
+    uint16_t phy_data, phy_saved_data, speed, duplex, i;
+    uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                        {IGP01E1000_PHY_AGC_PARAM_A,
+                                        IGP01E1000_PHY_AGC_PARAM_B,
+                                        IGP01E1000_PHY_AGC_PARAM_C,
+                                        IGP01E1000_PHY_AGC_PARAM_D};
+    uint16_t min_length, max_length;
+
+    DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+    if (hw->phy_type != e1000_phy_igp)
+        return E1000_SUCCESS;
+
+    if (link_up) {
+        ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+        if (ret_val) {
+            DEBUGOUT("Error getting link speed and duplex\n");
+            return ret_val;
+        }
+
+        if (speed == SPEED_1000) {
+
+            ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+            if (ret_val)
+                return ret_val;
+
+            if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+                min_length >= e1000_igp_cable_length_50) {
+
+                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                    ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+                    ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+                                                  phy_data);
+                    if (ret_val)
+                        return ret_val;
+                }
+                hw->dsp_config_state = e1000_dsp_config_activated;
+            }
+
+            if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+               (min_length < e1000_igp_cable_length_50)) {
+
+                uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+                uint32_t idle_errs = 0;
+
+                /* clear previous idle error counts */
+                ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                             &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                for (i = 0; i < ffe_idle_err_timeout; i++) {
+                    usec_delay(1000);
+                    ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+                    if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+                        hw->ffe_config_state = e1000_ffe_config_active;
+
+                        ret_val = e1000_write_phy_reg(hw,
+                                    IGP01E1000_PHY_DSP_FFE,
+                                    IGP01E1000_PHY_DSP_FFE_CM_CP);
+                        if (ret_val)
+                            return ret_val;
+                        break;
+                    }
+
+                    if (idle_errs)
+                        ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+                }
+            }
+        }
+    } else {
+        if (hw->dsp_config_state == e1000_dsp_config_activated) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+                phy_data |=  IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+                ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+        }
+
+        if (hw->ffe_config_state == e1000_ffe_config_active) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+                                          IGP01E1000_PHY_DSP_FFE_DEFAULT);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ *  1. Do a PHY soft reset
+ *  2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static int32_t
+e1000_set_phy_mode(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t eeprom_data;
+
+    DEBUGFUNC("e1000_set_phy_mode");
+
+    if ((hw->mac_type == e1000_82545_rev_3) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+        if (ret_val) {
+            return ret_val;
+        }
+
+        if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+            (eeprom_data & EEPROM_PHY_CLASS_A)) {
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+            if (ret_val)
+                return ret_val;
+
+            hw->phy_reset_disable = FALSE;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_set_d3_lplu_state(struct e1000_hw *hw,
+                        boolean_t active)
+{
+    uint32_t phy_ctrl = 0;
+    int32_t ret_val;
+    uint16_t phy_data;
+    DEBUGFUNC("e1000_set_d3_lplu_state");
+
+    if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+        && hw->phy_type != e1000_phy_igp_3)
+        return E1000_SUCCESS;
+
+    /* During driver activity LPLU should not be used or it will attain link
+     * from the lowest speeds starting from 10Mbps. The capability is used for
+     * Dx transitions and states */
+    if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->mac_type == e1000_ich8lan) {
+        /* MAC writes into PHY register based on the state transition
+         * and start auto-negotiation. SW driver can overwrite the settings
+         * in CSR PHY power control E1000_PHY_CTRL register. */
+        phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+                E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data &= ~IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+	    if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+    } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data |= IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+                E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data |= IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu d0 state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_set_d0_lplu_state(struct e1000_hw *hw,
+                        boolean_t active)
+{
+    uint32_t phy_ctrl = 0;
+    int32_t ret_val;
+    uint16_t phy_data;
+    DEBUGFUNC("e1000_set_d0_lplu_state");
+
+    if (hw->mac_type <= e1000_82547_rev_2)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+            E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data &= ~IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+	    if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+
+    } else {
+
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+            E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data |= IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static int32_t
+e1000_set_vco_speed(struct e1000_hw *hw)
+{
+    int32_t  ret_val;
+    uint16_t default_page = 0;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_set_vco_speed");
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+       break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    /* Set PHY register 30, page 5, bit 8 to 0 */
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Set PHY register 30, page 4, bit 11 to 1 */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+    if (ret_val)
+        return ret_val;
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function reads the cookie from ARC ram.
+ *
+ * returns: - E1000_SUCCESS .
+ ****************************************************************************/
+int32_t
+e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
+{
+    uint8_t i;
+    uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+    uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
+
+    length = (length >> 2);
+    offset = (offset >> 2);
+
+    for (i = 0; i < length; i++) {
+        *((uint32_t *) buffer + i) =
+            E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
+    }
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks whether the HOST IF is enabled for command operaton
+ * and also checks whether the previous command is completed.
+ * It busy waits in case of previous command is not completed.
+ *
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ *            timeout
+ *          - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_enable_host_if(struct e1000_hw * hw)
+{
+    uint32_t hicr;
+    uint8_t i;
+
+    /* Check that the host interface is enabled. */
+    hicr = E1000_READ_REG(hw, HICR);
+    if ((hicr & E1000_HICR_EN) == 0) {
+        DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    /* check the previous command is completed */
+    for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+        hicr = E1000_READ_REG(hw, HICR);
+        if (!(hicr & E1000_HICR_C))
+            break;
+        msec_delay_irq(1);
+    }
+
+    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+        DEBUGOUT("Previous command timeout failed .\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient way.
+ * Also fills up the sum of the buffer in *buffer parameter.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
+                        uint16_t length, uint16_t offset, uint8_t *sum)
+{
+    uint8_t *tmp;
+    uint8_t *bufptr = buffer;
+    uint32_t data = 0;
+    uint16_t remaining, i, j, prev_bytes;
+
+    /* sum = only sum of the data and it is not checksum */
+
+    if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+        return -E1000_ERR_PARAM;
+    }
+
+    tmp = (uint8_t *)&data;
+    prev_bytes = offset & 0x3;
+    offset &= 0xFFFC;
+    offset >>= 2;
+
+    if (prev_bytes) {
+        data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
+        for (j = prev_bytes; j < sizeof(uint32_t); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
+        length -= j - prev_bytes;
+        offset++;
+    }
+
+    remaining = length & 0x3;
+    length -= remaining;
+
+    /* Calculate length in DWORDs */
+    length >>= 2;
+
+    /* The device driver writes the relevant command block into the
+     * ram area. */
+    for (i = 0; i < length; i++) {
+        for (j = 0; j < sizeof(uint32_t); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+    if (remaining) {
+        for (j = 0; j < sizeof(uint32_t); j++) {
+            if (j < remaining)
+                *(tmp + j) = *bufptr++;
+            else
+                *(tmp + j) = 0;
+
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function writes the command header after does the checksum calculation.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_write_cmd_header(struct e1000_hw * hw,
+                           struct e1000_host_mng_command_header * hdr)
+{
+    uint16_t i;
+    uint8_t sum;
+    uint8_t *buffer;
+
+    /* Write the whole command header structure which includes sum of
+     * the buffer */
+
+    uint16_t length = sizeof(struct e1000_host_mng_command_header);
+
+    sum = hdr->checksum;
+    hdr->checksum = 0;
+
+    buffer = (uint8_t *) hdr;
+    i = length;
+    while (i--)
+        sum += buffer[i];
+
+    hdr->checksum = 0 - sum;
+
+    length >>= 2;
+    /* The device driver writes the relevant command block into the ram area. */
+    for (i = 0; i < length; i++) {
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function indicates to ARC that a new command is pending which completes
+ * one write operation by the driver.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_write_commit(struct e1000_hw * hw)
+{
+    uint32_t hicr;
+
+    hicr = E1000_READ_REG(hw, HICR);
+    /* Setting this bit tells the ARC that a new command is pending. */
+    E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C);
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks the mode of the firmware.
+ *
+ * returns  - TRUE when the mode is IAMT or FALSE.
+ ****************************************************************************/
+boolean_t
+e1000_check_mng_mode(struct e1000_hw *hw)
+{
+    uint32_t fwsm;
+
+    fwsm = E1000_READ_REG(hw, FWSM);
+
+    if (hw->mac_type == e1000_ich8lan) {
+        if ((fwsm & E1000_FWSM_MODE_MASK) ==
+            (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+            return TRUE;
+    } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+        return TRUE;
+
+    return FALSE;
+}
+
+
+/*****************************************************************************
+ * This function writes the dhcp info .
+ ****************************************************************************/
+int32_t
+e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
+			  uint16_t length)
+{
+    int32_t ret_val;
+    struct e1000_host_mng_command_header hdr;
+
+    hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+    hdr.command_length = length;
+    hdr.reserved1 = 0;
+    hdr.reserved2 = 0;
+    hdr.checksum = 0;
+
+    ret_val = e1000_mng_enable_host_if(hw);
+    if (ret_val == E1000_SUCCESS) {
+        ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
+                                          &(hdr.checksum));
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+            if (ret_val == E1000_SUCCESS)
+                ret_val = e1000_mng_write_commit(hw);
+        }
+    }
+    return ret_val;
+}
+
+
+/*****************************************************************************
+ * This function calculates the checksum.
+ *
+ * returns  - checksum of buffer contents.
+ ****************************************************************************/
+uint8_t
+e1000_calculate_mng_checksum(char *buffer, uint32_t length)
+{
+    uint8_t sum = 0;
+    uint32_t i;
+
+    if (!buffer)
+        return 0;
+
+    for (i=0; i < length; i++)
+        sum += buffer[i];
+
+    return (uint8_t) (0 - sum);
+}
+
+/*****************************************************************************
+ * This function checks whether tx pkt filtering needs to be enabled or not.
+ *
+ * returns  - TRUE for packet filtering or FALSE.
+ ****************************************************************************/
+boolean_t
+e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+    /* called in init as well as watchdog timer functions */
+
+    int32_t ret_val, checksum;
+    boolean_t tx_filter = FALSE;
+    struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
+    uint8_t *buffer = (uint8_t *) &(hw->mng_cookie);
+
+    if (e1000_check_mng_mode(hw)) {
+        ret_val = e1000_mng_enable_host_if(hw);
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_host_if_read_cookie(hw, buffer);
+            if (ret_val == E1000_SUCCESS) {
+                checksum = hdr->checksum;
+                hdr->checksum = 0;
+                if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
+                    checksum == e1000_calculate_mng_checksum((char *)buffer,
+                                               E1000_MNG_DHCP_COOKIE_LENGTH)) {
+                    if (hdr->status &
+                        E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
+                        tx_filter = TRUE;
+                } else
+                    tx_filter = TRUE;
+            } else
+                tx_filter = TRUE;
+        }
+    }
+
+    hw->tx_pkt_filtering = tx_filter;
+    return tx_filter;
+}
+
+/******************************************************************************
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - TRUE/FALSE
+ *
+ *****************************************************************************/
+uint32_t
+e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+    uint32_t manc;
+    uint32_t fwsm, factps;
+
+    if (hw->asf_firmware_present) {
+        manc = E1000_READ_REG(hw, MANC);
+
+        if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+            !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+            return FALSE;
+        if (e1000_arc_subsystem_valid(hw) == TRUE) {
+            fwsm = E1000_READ_REG(hw, FWSM);
+            factps = E1000_READ_REG(hw, FACTPS);
+
+            if (((fwsm & E1000_FWSM_MODE_MASK) ==
+                (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) &&
+                (factps & E1000_FACTPS_MNGCG))
+                return TRUE;
+        } else
+            if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+                return TRUE;
+    }
+    return FALSE;
+}
+
+static int32_t
+e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t mii_status_reg;
+    uint16_t i;
+
+    /* Polarity reversal workaround for forced 10F/10H links. */
+
+    /* Disable the transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the NO link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be clear.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+        msec_delay_irq(100);
+    }
+
+    /* Recommended delay time after link has been lost */
+    msec_delay_irq(1000);
+
+    /* Now we will re-enable th transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    msec_delay_irq(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+    if (ret_val)
+        return ret_val;
+    msec_delay_irq(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+    if (ret_val)
+        return ret_val;
+    msec_delay_irq(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be set.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_LINK_STATUS) break;
+        msec_delay_irq(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Disables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+void
+e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+
+    DEBUGFUNC("e1000_set_pci_express_master_disable");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return;
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+}
+
+/***************************************************************************
+ *
+ * Enables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+void
+e1000_enable_pciex_master(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+
+    DEBUGFUNC("e1000_enable_pciex_master");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return;
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+}
+
+/*******************************************************************************
+ *
+ * Disables PCI-Express master access and verifies there are no pending requests
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
+ *            caused the master requests to be disabled.
+ *            E1000_SUCCESS master requests disabled.
+ *
+ ******************************************************************************/
+int32_t
+e1000_disable_pciex_master(struct e1000_hw *hw)
+{
+    int32_t timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
+
+    DEBUGFUNC("e1000_disable_pciex_master");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    e1000_set_pci_express_master_disable(hw);
+
+    while (timeout) {
+        if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+            break;
+        else
+            usec_delay(100);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Master requests are pending.\n");
+        return -E1000_ERR_MASTER_REQUESTS_PENDING;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*******************************************************************************
+ *
+ * Check for EEPROM Auto Read bit done.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ******************************************************************************/
+int32_t
+e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+    int32_t timeout = AUTO_READ_DONE_TIMEOUT;
+
+    DEBUGFUNC("e1000_get_auto_rd_done");
+
+    switch (hw->mac_type) {
+    default:
+        msec_delay(5);
+        break;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        while (timeout) {
+            if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD)
+                break;
+            else msec_delay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+     * Need to wait for PHY configuration completion before accessing NVM
+     * and PHY. */
+    if (hw->mac_type == e1000_82573)
+        msec_delay(25);
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * Checks if the PHY configuration is done
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+    int32_t timeout = PHY_CFG_TIMEOUT;
+    uint32_t cfg_mask = E1000_EEPROM_CFG_DONE;
+
+    DEBUGFUNC("e1000_get_phy_cfg_done");
+
+    switch (hw->mac_type) {
+    default:
+        msec_delay_irq(10);
+        break;
+    case e1000_80003es2lan:
+        /* Separate *_CFG_DONE_* bit for each port */
+        if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+            cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
+        fallthrough;
+    case e1000_82571:
+    case e1000_82572:
+        while (timeout) {
+            if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask)
+                break;
+            else
+                msec_delay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("MNG configuration cycle has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Using the combination of SMBI and SWESMBI semaphore bits when resetting
+ * adapter or Eeprom access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    int32_t timeout;
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Get the SW semaphore. */
+        if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
+            return -E1000_ERR_EEPROM;
+    }
+
+    /* Get the FW semaphore. */
+    timeout = hw->eeprom.word_size + 1;
+    while (timeout) {
+        swsm = E1000_READ_REG(hw, SWSM);
+        swsm |= E1000_SWSM_SWESMBI;
+        E1000_WRITE_REG(hw, SWSM, swsm);
+        /* if we managed to set the bit we got the semaphore. */
+        swsm = E1000_READ_REG(hw, SWSM);
+        if (swsm & E1000_SWSM_SWESMBI)
+            break;
+
+        usec_delay(50);
+        timeout--;
+    }
+
+    if (!timeout) {
+        /* Release semaphores */
+        e1000_put_hw_eeprom_semaphore(hw);
+        DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * This function clears HW semaphore bits.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - None.
+ *
+ ***************************************************************************/
+void
+e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return;
+
+    swsm = E1000_READ_REG(hw, SWSM);
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Release both semaphores. */
+        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+    } else
+        swsm &= ~(E1000_SWSM_SWESMBI);
+    E1000_WRITE_REG(hw, SWSM, swsm);
+}
+
+/***************************************************************************
+ *
+ * Obtaining software semaphore bit (SMBI) before resetting PHY.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_software_semaphore(struct e1000_hw *hw)
+{
+    int32_t timeout = hw->eeprom.word_size + 1;
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_get_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan)
+        return E1000_SUCCESS;
+
+    while (timeout) {
+        swsm = E1000_READ_REG(hw, SWSM);
+        /* If SMBI bit cleared, it is now set and we hold the semaphore */
+        if (!(swsm & E1000_SWSM_SMBI))
+            break;
+        msec_delay_irq(1);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+        return -E1000_ERR_RESET;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release semaphore bit (SMBI).
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+void
+e1000_release_software_semaphore(struct e1000_hw *hw)
+{
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_release_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan)
+        return;
+
+    swsm = E1000_READ_REG(hw, SWSM);
+    /* Release the SW semaphores.*/
+    swsm &= ~E1000_SWSM_SMBI;
+    E1000_WRITE_REG(hw, SWSM, swsm);
+}
+
+/******************************************************************************
+ * Checks if PHY reset is blocked due to SOL/IDER session, for example.
+ * Returning E1000_BLK_PHY_RESET isn't necessarily an error.  But it's up to
+ * the caller to figure out how to deal with it.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_BLK_PHY_RESET
+ *            E1000_SUCCESS
+ *
+ *****************************************************************************/
+int32_t
+e1000_check_phy_reset_block(struct e1000_hw *hw)
+{
+    uint32_t manc = 0;
+    uint32_t fwsm = 0;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        fwsm = E1000_READ_REG(hw, FWSM);
+        return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+                                            : E1000_BLK_PHY_RESET;
+    }
+
+    if (hw->mac_type > e1000_82547_rev_2)
+        manc = E1000_READ_REG(hw, MANC);
+    return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	    E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+uint8_t
+e1000_arc_subsystem_valid(struct e1000_hw *hw)
+{
+    uint32_t fwsm;
+
+    /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
+     * may not be provided a DMA clock when no manageability features are
+     * enabled.  We do not want to perform any reads/writes to these registers
+     * if this is the case.  We read FWSM to determine the manageability mode.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+        fwsm = E1000_READ_REG(hw, FWSM);
+        if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
+            return TRUE;
+        break;
+    case e1000_ich8lan:
+        return TRUE;
+    default:
+        break;
+    }
+    return FALSE;
+}
+
+
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+int32_t
+e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
+{
+    uint32_t gcr_reg = 0;
+
+    DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+    if (hw->bus_type == e1000_bus_type_unknown)
+        e1000_get_bus_info(hw);
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    if (no_snoop) {
+        gcr_reg = E1000_READ_REG(hw, GCR);
+        gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+        gcr_reg |= no_snoop;
+        E1000_WRITE_REG(hw, GCR, gcr_reg);
+    }
+    if (hw->mac_type == e1000_ich8lan) {
+        uint32_t ctrl_ext;
+
+        E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
+
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_software_flag(struct e1000_hw *hw)
+{
+    int32_t timeout = PHY_CFG_TIMEOUT;
+    uint32_t extcnf_ctrl;
+
+    DEBUGFUNC("e1000_get_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        while (timeout) {
+            extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+            extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+            E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+
+            extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+                break;
+            msec_delay_irq(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("FW or HW locks the resource too long.\n");
+            return -E1000_ERR_CONFIG;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+void
+e1000_release_software_flag(struct e1000_hw *hw)
+{
+    uint32_t extcnf_ctrl;
+
+    DEBUGFUNC("e1000_release_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL);
+        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+        E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+    }
+
+    return;
+}
+
+/***************************************************************************
+ *
+ * Disable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
+{
+    uint16_t phy_data;
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_ife_disable_dynamic_power_down");
+
+    if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |=  IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+    }
+
+    return ret_val;
+}
+
+/***************************************************************************
+ *
+ * Enable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
+{
+    uint16_t phy_data;
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_ife_enable_dynamic_power_down");
+
+    if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &=  ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+    }
+
+    return ret_val;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+                       uint16_t *data)
+{
+    int32_t  error = E1000_SUCCESS;
+    uint32_t flash_bank = 0;
+    uint32_t act_offset = 0;
+    uint32_t bank_offset = 0;
+    uint16_t word = 0;
+    uint16_t i = 0;
+
+    /* We need to know which is the valid flash bank.  In the event
+     * that we didn't allocate eeprom_shadow_ram, we may not be
+     * managing flash_bank.  So it cannot be trusted and needs
+     * to be updated with each read.
+     */
+    /* Value of bit 22 corresponds to the flash bank we're on. */
+    flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+    /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+    bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    for (i = 0; i < words; i++) {
+        if (hw->eeprom_shadow_ram != NULL &&
+            hw->eeprom_shadow_ram[offset+i].modified == TRUE) {
+            data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+        } else {
+            /* The NVM part needs a byte offset, hence * 2 */
+            act_offset = bank_offset + ((offset + i) * 2);
+            error = e1000_read_ich8_word(hw, act_offset, &word);
+            if (error != E1000_SUCCESS)
+                break;
+            data[i] = word;
+        }
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register.  Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram.  e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+                        uint16_t *data)
+{
+    uint32_t i = 0;
+    int32_t error = E1000_SUCCESS;
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    /* A driver can write to the NVM only if it has eeprom_shadow_ram
+     * allocated.  Subsequent reads to the modified words are read from
+     * this cached structure as well.  Writes will only go into this
+     * cached structure unless it's followed by a call to
+     * e1000_update_eeprom_checksum() where it will commit the changes
+     * and clear the "modified" field.
+     */
+    if (hw->eeprom_shadow_ram != NULL) {
+        for (i = 0; i < words; i++) {
+            if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+                hw->eeprom_shadow_ram[offset+i].modified = TRUE;
+                hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+            } else {
+                error = -E1000_ERR_EEPROM;
+                break;
+            }
+        }
+    } else {
+        /* Drivers have the option to not allocate eeprom_shadow_ram as long
+         * as they don't perform any NVM writes.  An attempt in doing so
+         * will result in this error.
+         */
+        error = -E1000_ERR_EEPROM;
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+int32_t
+e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+    union ich8_hws_flash_status hsfsts;
+    int32_t error = E1000_ERR_EEPROM;
+    int32_t i     = 0;
+
+    DEBUGFUNC("e1000_ich8_cycle_init");
+
+    hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+    /* May be check the Flash Des Valid bit in Hw status */
+    if (hsfsts.hsf_status.fldesvalid == 0) {
+        DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.");
+        return error;
+    }
+
+    /* Clear FCERR in Hw status by writing 1 */
+    /* Clear DAEL in Hw status by writing a 1 */
+    hsfsts.hsf_status.flcerr = 1;
+    hsfsts.hsf_status.dael = 1;
+
+    E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+
+    /* Either we should have a hardware SPI cycle in progress bit to check
+     * against, in order to start a new cycle or FDONE bit should be changed
+     * in the hardware so that it is 1 after harware reset, which can then be
+     * used as an indication whether a cycle is in progress or has been
+     * completed .. we should also have some software semaphore mechanism to
+     * guard FDONE or the cycle in progress bit so that two threads access to
+     * those bits can be sequentiallized or a way so that 2 threads dont
+     * start the cycle at the same time */
+
+    if (hsfsts.hsf_status.flcinprog == 0) {
+        /* There is no cycle running at present, so we can start a cycle */
+        /* Begin by setting Flash Cycle Done. */
+        hsfsts.hsf_status.flcdone = 1;
+        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+        error = E1000_SUCCESS;
+    } else {
+        /* otherwise poll for sometime so the current cycle has a chance
+         * to end before giving up. */
+        for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
+            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcinprog == 0) {
+                error = E1000_SUCCESS;
+                break;
+            }
+            usec_delay(1);
+        }
+        if (error == E1000_SUCCESS) {
+            /* Successful in waiting for previous cycle to timeout,
+             * now set the Flash Cycle Done. */
+            hsfsts.hsf_status.flcdone = 1;
+            E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+        } else {
+            DEBUGOUT("Flash controller busy, cannot get access");
+        }
+    }
+    return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+int32_t
+e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
+{
+    union ich8_hws_flash_ctrl hsflctl;
+    union ich8_hws_flash_status hsfsts;
+    int32_t error = E1000_ERR_EEPROM;
+    uint32_t i = 0;
+
+    /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+    hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+    hsflctl.hsf_ctrl.flcgo = 1;
+    E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+    /* wait till FDONE bit is set to 1 */
+    do {
+        hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+        if (hsfsts.hsf_status.flcdone == 1)
+            break;
+        usec_delay(1);
+        i++;
+    } while (i < timeout);
+    if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+        error = E1000_SUCCESS;
+    }
+    return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+                     uint32_t size, uint16_t* data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    uint32_t flash_linear_address;
+    uint32_t flash_data = 0;
+    int32_t error = -E1000_ERR_EEPROM;
+    int32_t count = 0;
+
+    DEBUGFUNC("e1000_read_ich8_data");
+
+    if (size < 1  || size > 2 || data == 0x0 ||
+        index > ICH8_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        usec_delay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size - 1;
+        hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
+        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        /* TODO: TBD maybe check the index against the size of flash */
+
+        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+        error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+
+        /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+         * sequence a few more times, else read in (shift in) the Flash Data0,
+         * the order is least significant byte first msb to lsb */
+        if (error == E1000_SUCCESS) {
+            flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+            if (size == 1) {
+                *data = (uint8_t)(flash_data & 0x000000FF);
+            } else if (size == 2) {
+                *data = (uint16_t)(flash_data & 0x0000FFFF);
+            }
+            break;
+        } else {
+            /* If we've gotten here, then things are probably completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
+                      uint16_t data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    uint32_t flash_linear_address;
+    uint32_t flash_data = 0;
+    int32_t error = -E1000_ERR_EEPROM;
+    int32_t count = 0;
+
+    DEBUGFUNC("e1000_write_ich8_data");
+
+    if (size < 1  || size > 2 || data > size * 0xff ||
+        index > ICH8_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        usec_delay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size -1;
+        hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
+        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+        if (size == 1)
+            flash_data = (uint32_t)data & 0x00FF;
+        else
+            flash_data = (uint32_t)data;
+
+        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+
+        /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+         * sequence a few more times else done */
+        error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+        if (error == E1000_SUCCESS) {
+            break;
+        } else {
+            /* If we're here, then things are most likely completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
+{
+    int32_t status = E1000_SUCCESS;
+    uint16_t word = 0;
+
+    status = e1000_read_ich8_data(hw, index, 1, &word);
+    if (status == E1000_SUCCESS) {
+        *data = (uint8_t)word;
+    }
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
+{
+    int32_t error = E1000_SUCCESS;
+    int32_t program_retries;
+    uint8_t temp_byte = 0;
+
+    e1000_write_ich8_byte(hw, index, byte);
+    usec_delay(100);
+
+    for (program_retries = 0; program_retries < 100; program_retries++) {
+        e1000_read_ich8_byte(hw, index, &temp_byte);
+        if (temp_byte == byte)
+            break;
+        usec_delay(10);
+        e1000_write_ich8_byte(hw, index, byte);
+        usec_delay(100);
+    }
+    if (program_retries == 100)
+        error = E1000_ERR_EEPROM;
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
+{
+    int32_t status = E1000_SUCCESS;
+    uint16_t word = (uint16_t)data;
+
+    status = e1000_write_ich8_data(hw, index, 1, word);
+
+    return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
+{
+    int32_t status = E1000_SUCCESS;
+    status = e1000_read_ich8_data(hw, index, 2, data);
+    return status;
+}
+
+/******************************************************************************
+ * Writes a word to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - The word to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
+{
+    int32_t status = E1000_SUCCESS;
+    status = e1000_write_ich8_data(hw, index, 2, data);
+    return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
+ * segment N is 4096 * N + flash_reg_addr.
+ *
+ * hw - pointer to e1000_hw structure
+ * segment - 0 for first segment, 1 for second segment, etc.
+ *****************************************************************************/
+int32_t
+e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    uint32_t flash_linear_address;
+    int32_t  count = 0;
+    int32_t  error = E1000_ERR_EEPROM;
+    int32_t  iteration, seg_size;
+    int32_t  sector_size;
+    int32_t  j = 0;
+    int32_t  error_flag = 0;
+
+    hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+    /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+    /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+     *     consecutive sectors.  The start index for the nth Hw sector can be
+     *     calculated as = segment * 4096 + n * 256
+     * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+     *     The start index for the nth Hw sector can be calculated
+     *     as = segment * 4096
+     * 10: Error condition
+     * 11: The Hw sector size is much bigger than the size asked to
+     *     erase...error condition */
+    if (hsfsts.hsf_status.berasesz == 0x0) {
+        /* Hw sector size 256 */
+        sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256;
+        iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+    } else if (hsfsts.hsf_status.berasesz == 0x1) {
+        sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K;
+        iteration = 1;
+    } else if (hsfsts.hsf_status.berasesz == 0x3) {
+        sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K;
+        iteration = 1;
+    } else {
+        return error;
+    }
+
+    for (j = 0; j < iteration ; j++) {
+        do {
+            count++;
+            /* Steps */
+            error = e1000_ich8_cycle_init(hw);
+            if (error != E1000_SUCCESS) {
+                error_flag = 1;
+                break;
+            }
+
+            /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+             * Control */
+            hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+            hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
+            E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+            /* Write the last 24 bits of an index within the block into Flash
+             * Linear address field in Flash Address.  This probably needs to
+             * be calculated here based off the on-chip segment size and the
+             * software segment size assumed (4K) */
+            /* TBD */
+            flash_linear_address = segment * sector_size + j * seg_size;
+            flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+            flash_linear_address += hw->flash_base_addr;
+
+            E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+            error = e1000_ich8_flash_cycle(hw, 1000000);
+            /* Check if FCERR is set to 1.  If 1, clear it and try the whole
+             * sequence a few more times else Done */
+            if (error == E1000_SUCCESS) {
+                break;
+            } else {
+                hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+                if (hsfsts.hsf_status.flcerr == 1) {
+                    /* repeat for some time before giving up */
+                    continue;
+                } else if (hsfsts.hsf_status.flcdone == 0) {
+                    error_flag = 1;
+                    break;
+                }
+            }
+        } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+        if (error_flag == 1)
+            break;
+    }
+    if (error_flag != 1)
+        error = E1000_SUCCESS;
+    return error;
+}
+
+/******************************************************************************
+ *
+ * Reverse duplex setting without breaking the link.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ *****************************************************************************/
+int32_t
+e1000_duplex_reversal(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    if (hw->phy_type != e1000_phy_igp_3)
+        return E1000_SUCCESS;
+
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data ^= MII_CR_FULL_DUPLEX;
+
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET;
+    ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data);
+
+    return ret_val;
+}
+
+int32_t
+e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+                                      uint32_t cnf_base_addr, uint32_t cnf_size)
+{
+    uint32_t ret_val = E1000_SUCCESS;
+    uint16_t word_addr, reg_data, reg_addr;
+    uint16_t i;
+
+    /* cnf_base_addr is in DWORD */
+    word_addr = (uint16_t)(cnf_base_addr << 1);
+
+    /* cnf_size is returned in size of dwords */
+    for (i = 0; i < cnf_size; i++) {
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_get_software_flag(hw);
+        if (ret_val != E1000_SUCCESS)
+            return ret_val;
+
+        ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data);
+
+        e1000_release_software_flag(hw);
+    }
+
+    return ret_val;
+}
+
+
+int32_t
+e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+    uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+    if (hw->phy_type != e1000_phy_igp_3)
+          return E1000_SUCCESS;
+
+    /* Check if SW needs configure the PHY */
+    reg_data = E1000_READ_REG(hw, FEXTNVM);
+    if (!(reg_data & FEXTNVM_SW_CONFIG))
+        return E1000_SUCCESS;
+
+    /* Wait for basic configuration completes before proceeding*/
+    loop = 0;
+    do {
+        reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE;
+        usec_delay(100);
+        loop++;
+    } while ((!reg_data) && (loop < 50));
+
+    /* Clear the Init Done bit for the next init event */
+    reg_data = E1000_READ_REG(hw, STATUS);
+    reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+    E1000_WRITE_REG(hw, STATUS, reg_data);
+
+    /* Make sure HW does not configure LCD from PHY extended configuration
+       before SW configuration */
+    reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+    if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+        reg_data = E1000_READ_REG(hw, EXTCNF_SIZE);
+        cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+        cnf_size >>= 16;
+        if (cnf_size) {
+            reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+            cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+            /* cnf_base_addr is in DWORD */
+            cnf_base_addr >>= 16;
+
+            /* Configure LCD from extended configuration region. */
+            ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+                                                            cnf_size);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+
+++ linux-patched/drivers/xenomai/net/drivers/e1000/Makefile	2022-03-21 12:58:29.500887879 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/e1000_param.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000) += rt_e1000.o
+
+rt_e1000-y := \
+	e1000_hw.o \
+	e1000_main.o \
+	e1000_param.o
+++ linux-patched/drivers/xenomai/net/drivers/e1000/e1000_param.c	2022-03-21 12:58:29.492887957 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/e1000_hw.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when e1000_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define E1000_PARAM(X, desc) \
+	static const int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \
+	MODULE_PARM_DESC(X, desc);
+#else
+#define E1000_PARAM(X, desc) \
+	static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	static int num_##X = 0; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+
+E1000_PARAM(FlowControl, "Flow Control setting");
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *        on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0 for rtnet
+ */
+
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0 for rtnet
+ */
+
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic)
+ *
+ * Default Value: 0 for rtnet
+ */
+
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
+
+#define DEFAULT_RADV                   0
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
+
+#define DEFAULT_TIDV                   0 
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
+
+#define DEFAULT_TADV                   0
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
+
+#define DEFAULT_ITR                    0
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	char *name;
+	char *err;
+	int  def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int e1000_validate_option(int *value, struct e1000_option *opt,
+		struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void e1000_check_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+	if (bd >= E1000_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = E1000_MAX_NIC;
+#endif
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_TXD),
+			.def  = E1000_DEFAULT_TXD,
+			.arg  = { .r = { .min = E1000_MIN_TXD }}
+		};
+		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+		opt.arg.r.max = mac_type < e1000_82544 ?
+			E1000_MAX_TXD : E1000_MAX_82544_TXD;
+
+#ifdef module_param_array
+		if (num_TxDescriptors > bd) {
+#endif
+			tx_ring->count = TxDescriptors[bd];
+			e1000_validate_option(&tx_ring->count, &opt, adapter);
+			E1000_ROUNDUP(tx_ring->count,
+						REQ_TX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			tx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_RXD),
+			.def  = E1000_DEFAULT_RXD,
+			.arg  = { .r = { .min = E1000_MIN_RXD }}
+		};
+		struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+		opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+			E1000_MAX_82544_RXD;
+
+#ifdef module_param_array
+		if (num_RxDescriptors > bd) {
+#endif
+			rx_ring->count = RxDescriptors[bd];
+			e1000_validate_option(&rx_ring->count, &opt, adapter);
+			E1000_ROUNDUP(rx_ring->count,
+						REQ_RX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			rx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_XsumRX > bd) {
+#endif
+			int rx_csum = XsumRX[bd];
+			e1000_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+#ifdef module_param_array
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+#endif
+	}
+	{ /* Flow Control */
+
+		struct e1000_opt_list fc_list[] =
+			{{ e1000_fc_none,    "Flow Control Disabled" },
+			 { e1000_fc_rx_pause,"Flow Control Receive Only" },
+			 { e1000_fc_tx_pause,"Flow Control Transmit Only" },
+			 { e1000_fc_full,    "Flow Control Enabled" },
+			 { e1000_fc_default, "Flow Control Hardware Default" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = e1000_fc_default,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+#ifdef module_param_array
+		if (num_FlowControl > bd) {
+#endif
+			int fc = FlowControl[bd];
+			e1000_validate_option(&fc, &opt, adapter);
+			adapter->hw.fc = adapter->hw.original_fc = fc;
+#ifdef module_param_array
+		} else {
+			adapter->hw.fc = adapter->hw.original_fc = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxIntDelay > bd) {
+#endif
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxAbsIntDelay > bd) {
+#endif
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_RxIntDelay > bd) {
+#endif
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_RxAbsIntDelay > bd) {
+#endif
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Interrupt Throttling Rate */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+#ifdef module_param_array
+		if (num_InterruptThrottleRate > bd) {
+#endif
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+					opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				break;
+			default:
+				e1000_validate_option(&adapter->itr, &opt,
+					adapter);
+				break;
+			}
+#ifdef module_param_array
+		} else {
+			adapter->itr = opt.def;
+		}
+#endif
+	}
+	{ /* Smart Power Down */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+#ifdef module_param_array
+		if (num_SmartPowerDownEnable > bd) {
+#endif
+			int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			adapter->smart_power_down = spd;
+#ifdef module_param_array
+		} else {
+			adapter->smart_power_down = opt.def;
+		}
+#endif
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_KumeranLockLoss > bd) {
+#endif
+			int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+#ifdef module_param_array
+		} else {
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+		}
+#endif
+	}
+
+	switch (adapter->hw.media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		e1000_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		e1000_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+	if ((Speed[bd] != OPTION_UNSET)) {
+#else
+	if (num_Speed > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((Duplex[bd] != OPTION_UNSET)) {
+#else
+	if (num_Duplex > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
+#else
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+#endif
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+	int speed, dplx, an;
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+#endif
+
+	{ /* Speed */
+		struct e1000_opt_list speed_list[] = {{          0, "" },
+						      {   SPEED_10, "" },
+						      {  SPEED_100, "" },
+						      { SPEED_1000, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+#ifdef module_param_array
+		if (num_Speed > bd) {
+#endif
+			speed = Speed[bd];
+			e1000_validate_option(&speed, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			speed = opt.def;
+		}
+#endif
+	}
+	{ /* Duplex */
+		struct e1000_opt_list dplx_list[] = {{           0, "" },
+						     { HALF_DUPLEX, "" },
+						     { FULL_DUPLEX, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_phy_reset_block(&adapter->hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+#ifdef module_param_array
+		if (num_Duplex > bd) {
+#endif
+			dplx = Duplex[bd];
+			e1000_validate_option(&dplx, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			dplx = opt.def;
+		}
+#endif
+	}
+
+#ifdef module_param_array
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+#else
+	if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
+#endif
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		struct e1000_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+#ifdef module_param_array
+		if (num_AutoNeg > bd) {
+#endif
+			an = AutoNeg[bd];
+			e1000_validate_option(&an, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			an = opt.def;
+		}
+#endif
+		adapter->hw.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+#ifdef module_param_array
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+#else
+		if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
+#endif
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+		                                 ADVERTISE_100_FULL |
+		                                 ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+		                                 ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		DPRINTK(PROBE, INFO,
+			"Using Autonegotiation at 1000 Mbps "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		DPRINTK(PROBE, INFO,
+			"Using Autonegotiation at 1000 Mbps "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/e1000/e1000_hw.h	2022-03-21 12:58:29.487888006 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/kcompat.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+  
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+    e1000_undefined = 0,
+    e1000_82542_rev2_0,
+    e1000_82542_rev2_1,
+    e1000_82543,
+    e1000_82544,
+    e1000_82540,
+    e1000_82545,
+    e1000_82545_rev_3,
+    e1000_82546,
+    e1000_82546_rev_3,
+    e1000_82541,
+    e1000_82541_rev_2,
+    e1000_82547,
+    e1000_82547_rev_2,
+    e1000_82571,
+    e1000_82572,
+    e1000_82573,
+    e1000_80003es2lan,
+    e1000_ich8lan,
+    e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+    e1000_eeprom_uninitialized = 0,
+    e1000_eeprom_spi,
+    e1000_eeprom_microwire,
+    e1000_eeprom_flash,
+    e1000_eeprom_ich8,
+    e1000_eeprom_none, /* No NVM support */
+    e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+    e1000_media_type_copper = 0,
+    e1000_media_type_fiber = 1,
+    e1000_media_type_internal_serdes = 2,
+    e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+    e1000_10_half = 0,
+    e1000_10_full = 1,
+    e1000_100_half = 2,
+    e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+    e1000_fc_none = 0,
+    e1000_fc_rx_pause = 1,
+    e1000_fc_tx_pause = 2,
+    e1000_fc_full = 3,
+    e1000_fc_default = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+    uint16_t    eeprom_word;
+    boolean_t   modified;
+};
+
+/* PCI bus types */
+typedef enum {
+    e1000_bus_type_unknown = 0,
+    e1000_bus_type_pci,
+    e1000_bus_type_pcix,
+    e1000_bus_type_pci_express,
+    e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+    e1000_bus_speed_unknown = 0,
+    e1000_bus_speed_33,
+    e1000_bus_speed_66,
+    e1000_bus_speed_100,
+    e1000_bus_speed_120,
+    e1000_bus_speed_133,
+    e1000_bus_speed_2500,
+    e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+    e1000_bus_width_unknown = 0,
+    e1000_bus_width_32,
+    e1000_bus_width_64,
+    e1000_bus_width_pciex_1,
+    e1000_bus_width_pciex_2,
+    e1000_bus_width_pciex_4,
+    e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+    e1000_cable_length_50 = 0,
+    e1000_cable_length_50_80,
+    e1000_cable_length_80_110,
+    e1000_cable_length_110_140,
+    e1000_cable_length_140,
+    e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+    e1000_gg_cable_length_60 = 0,
+    e1000_gg_cable_length_60_115 = 1,
+    e1000_gg_cable_length_115_150 = 2,
+    e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+    e1000_igp_cable_length_10  = 10,
+    e1000_igp_cable_length_20  = 20,
+    e1000_igp_cable_length_30  = 30,
+    e1000_igp_cable_length_40  = 40,
+    e1000_igp_cable_length_50  = 50,
+    e1000_igp_cable_length_60  = 60,
+    e1000_igp_cable_length_70  = 70,
+    e1000_igp_cable_length_80  = 80,
+    e1000_igp_cable_length_90  = 90,
+    e1000_igp_cable_length_100 = 100,
+    e1000_igp_cable_length_110 = 110,
+    e1000_igp_cable_length_115 = 115,
+    e1000_igp_cable_length_120 = 120,
+    e1000_igp_cable_length_130 = 130,
+    e1000_igp_cable_length_140 = 140,
+    e1000_igp_cable_length_150 = 150,
+    e1000_igp_cable_length_160 = 160,
+    e1000_igp_cable_length_170 = 170,
+    e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+    e1000_10bt_ext_dist_enable_normal = 0,
+    e1000_10bt_ext_dist_enable_lower,
+    e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+    e1000_rev_polarity_normal = 0,
+    e1000_rev_polarity_reversed,
+    e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+    e1000_downshift_normal = 0,
+    e1000_downshift_activated,
+    e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+    e1000_smart_speed_default = 0,
+    e1000_smart_speed_on,
+    e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+    e1000_polarity_reversal_enabled = 0,
+    e1000_polarity_reversal_disabled,
+    e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+    e1000_auto_x_mode_manual_mdi = 0,
+    e1000_auto_x_mode_manual_mdix,
+    e1000_auto_x_mode_auto1,
+    e1000_auto_x_mode_auto2,
+    e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+    e1000_1000t_rx_status_not_ok = 0,
+    e1000_1000t_rx_status_ok,
+    e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+    e1000_phy_m88 = 0,
+    e1000_phy_igp,
+    e1000_phy_igp_2,
+    e1000_phy_gg82563,
+    e1000_phy_igp_3,
+    e1000_phy_ife,
+    e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+    e1000_ms_hw_default = 0,
+    e1000_ms_force_master,
+    e1000_ms_force_slave,
+    e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+    e1000_ffe_config_enabled = 0,
+    e1000_ffe_config_active,
+    e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+    e1000_dsp_config_disabled = 0,
+    e1000_dsp_config_enabled,
+    e1000_dsp_config_activated,
+    e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+    e1000_cable_length cable_length;
+    e1000_10bt_ext_dist_enable extended_10bt_distance;
+    e1000_rev_polarity cable_polarity;
+    e1000_downshift downshift;
+    e1000_polarity_reversal polarity_correction;
+    e1000_auto_x_mode mdix_mode;
+    e1000_1000t_rx_status local_rx;
+    e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+    uint32_t idle_errors;
+    uint32_t receive_errors;
+};
+
+struct e1000_eeprom_info {
+    e1000_eeprom_type type;
+    uint16_t word_size;
+    uint16_t opcode_bits;
+    uint16_t address_bits;
+    uint16_t delay_usec;
+    uint16_t page_size;
+    boolean_t use_eerd;
+    boolean_t use_eewr;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE  2048
+
+typedef enum {
+    e1000_byte_align = 0,
+    e1000_word_align = 1,
+    e1000_dword_align = 2
+} e1000_align_type;
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_EEPROM   1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+
+/* Function prototypes */
+/* Initialization */
+int32_t e1000_reset_hw(struct e1000_hw *hw);
+int32_t e1000_init_hw(struct e1000_hw *hw);
+int32_t e1000_id_led_init(struct e1000_hw * hw);
+int32_t e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+int32_t e1000_setup_link(struct e1000_hw *hw);
+int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw);
+int32_t e1000_check_for_link(struct e1000_hw *hw);
+int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t * speed, uint16_t * duplex);
+int32_t e1000_wait_autoneg(struct e1000_hw *hw);
+int32_t e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
+int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
+int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
+int32_t e1000_phy_reset(struct e1000_hw *hw);
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+int32_t e1000_duplex_reversal(struct e1000_hw *hw);
+int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
+int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
+int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length);
+int32_t e1000_check_polarity(struct e1000_hw *hw, uint16_t *polarity);
+int32_t e1000_check_downshift(struct e1000_hw *hw);
+int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
+int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
+
+/* EEPROM Functions */
+int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
+boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
+int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
+int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+
+/* MNG HOST IF functions */
+uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD   64
+#define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT  10      /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0   /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10    /* Cookie length */
+#define E1000_MNG_IAMT_MODE             0x3
+#define E1000_MNG_ICH_IAMT_MODE         0x2
+#define E1000_IAMT_SIGNATURE            0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT    0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT                       0x5
+#define E1000_VFTA_ENTRY_MASK                        0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK              0x1F
+
+struct e1000_host_mng_command_header {
+    uint8_t command_id;
+    uint8_t checksum;
+    uint16_t reserved1;
+    uint16_t reserved2;
+    uint16_t command_length;
+};
+
+struct e1000_host_mng_command_info {
+    struct e1000_host_mng_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
+};
+#ifdef E1000_BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie{
+    uint32_t signature;
+    uint16_t vlan_id;
+    uint8_t reserved0;
+    uint8_t status;
+    uint32_t reserved1;
+    uint8_t checksum;
+    uint8_t reserved3;
+    uint16_t reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie{
+    uint32_t signature;
+    uint8_t status;
+    uint8_t reserved0;
+    uint16_t vlan_id;
+    uint32_t reserved1;
+    uint16_t reserved2;
+    uint8_t reserved3;
+    uint8_t checksum;
+};
+#endif
+
+int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
+                                  uint16_t length);
+boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
+boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+int32_t e1000_mng_enable_host_if(struct e1000_hw *hw);
+int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer,
+                            uint16_t length, uint16_t offset, uint8_t *sum);
+int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw,
+                                   struct e1000_host_mng_command_header* hdr);
+
+int32_t e1000_mng_write_commit(struct e1000_hw *hw);
+
+int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
+int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
+int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
+int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
+int32_t e1000_read_mac_addr(struct e1000_hw * hw);
+int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
+void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
+void e1000_release_software_flag(struct e1000_hw *hw);
+int32_t e1000_get_software_flag(struct e1000_hw *hw);
+
+/* Filters (multicast, vlan, receive) */
+void e1000_init_rx_addrs(struct e1000_hw *hw);
+void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
+uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
+void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+void e1000_clear_vfta(struct e1000_hw *hw);
+
+/* LED functions */
+int32_t e1000_setup_led(struct e1000_hw *hw);
+int32_t e1000_cleanup_led(struct e1000_hw *hw);
+int32_t e1000_led_on(struct e1000_hw *hw);
+int32_t e1000_led_off(struct e1000_hw *hw);
+int32_t e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
+/* Port I/O is only supported on 82544 and newer */
+uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
+uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
+void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up);
+int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
+int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active);
+void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
+void e1000_enable_pciex_master(struct e1000_hw *hw);
+int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
+int32_t e1000_get_auto_rd_done(struct e1000_hw *hw);
+int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw);
+int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
+void e1000_release_software_semaphore(struct e1000_hw *hw);
+int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
+int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
+int32_t e1000_commit_shadow_ram(struct e1000_hw *hw);
+uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
+int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
+
+int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
+                             uint8_t *data);
+int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+                                     uint8_t byte);
+int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+                              uint8_t byte);
+int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
+                             uint16_t *data);
+int32_t e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index,
+                              uint16_t word);
+int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+                             uint32_t size, uint16_t *data);
+int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index,
+                              uint32_t size, uint16_t data);
+int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+                               uint16_t words, uint16_t *data);
+int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+                                uint16_t words, uint16_t *data);
+int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
+int32_t e1000_ich8_cycle_init(struct e1000_hw *hw);
+int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout);
+int32_t e1000_phy_ife_get_info(struct e1000_hw *hw,
+                               struct e1000_phy_info *phy_info);
+int32_t e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw);
+int32_t e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw);
+
+#define E1000_READ_REG_IO(a, reg) \
+    e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+    e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542               0x1000
+#define E1000_DEV_ID_82543GC_FIBER       0x1001
+#define E1000_DEV_ID_82543GC_COPPER      0x1004
+#define E1000_DEV_ID_82544EI_COPPER      0x1008
+#define E1000_DEV_ID_82544EI_FIBER       0x1009
+#define E1000_DEV_ID_82544GC_COPPER      0x100C
+#define E1000_DEV_ID_82544GC_LOM         0x100D
+#define E1000_DEV_ID_82540EM             0x100E
+#define E1000_DEV_ID_82540EM_LOM         0x1015
+#define E1000_DEV_ID_82540EP_LOM         0x1016
+#define E1000_DEV_ID_82540EP             0x1017
+#define E1000_DEV_ID_82540EP_LP          0x101E
+#define E1000_DEV_ID_82545EM_COPPER      0x100F
+#define E1000_DEV_ID_82545EM_FIBER       0x1011
+#define E1000_DEV_ID_82545GM_COPPER      0x1026
+#define E1000_DEV_ID_82545GM_FIBER       0x1027
+#define E1000_DEV_ID_82545GM_SERDES      0x1028
+#define E1000_DEV_ID_82546EB_COPPER      0x1010
+#define E1000_DEV_ID_82546EB_FIBER       0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI             0x1013
+#define E1000_DEV_ID_82541EI_MOBILE      0x1018
+#define E1000_DEV_ID_82541ER_LOM         0x1014
+#define E1000_DEV_ID_82541ER             0x1078
+#define E1000_DEV_ID_82547GI             0x1075
+#define E1000_DEV_ID_82541GI             0x1076
+#define E1000_DEV_ID_82541GI_MOBILE      0x1077
+#define E1000_DEV_ID_82541GI_LF          0x107C
+#define E1000_DEV_ID_82546GB_COPPER      0x1079
+#define E1000_DEV_ID_82546GB_FIBER       0x107A
+#define E1000_DEV_ID_82546GB_SERDES      0x107B
+#define E1000_DEV_ID_82546GB_PCIE        0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI             0x1019
+#define E1000_DEV_ID_82547EI_MOBILE      0x101A
+#define E1000_DEV_ID_82571EB_COPPER      0x105E
+#define E1000_DEV_ID_82571EB_FIBER       0x105F
+#define E1000_DEV_ID_82571EB_SERDES      0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
+#define E1000_DEV_ID_82572EI_COPPER      0x107D
+#define E1000_DEV_ID_82572EI_FIBER       0x107E
+#define E1000_DEV_ID_82572EI_SERDES      0x107F
+#define E1000_DEV_ID_82572EI             0x10B9
+#define E1000_DEV_ID_82573E              0x108B
+#define E1000_DEV_ID_82573E_IAMT         0x108C
+#define E1000_DEV_ID_82573L              0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT     0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT     0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT     0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT     0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT      0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT        0x104A
+#define E1000_DEV_ID_ICH8_IGP_C          0x104B
+#define E1000_DEV_ID_ICH8_IFE            0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT         0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G          0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M          0x104D
+
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0       0
+#define E1000_REVISION_1       1
+#define E1000_REVISION_2       2
+#define E1000_REVISION_3       3
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE             14
+#define MAXIMUM_ETHERNET_FRAME_SIZE  1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE  64   /* With FCS */
+#define ETHERNET_FCS_SIZE            4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+    (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+    (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH                   ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE         0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE  4     /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+#define ETHERNET_IP_TYPE        0x0800  /* IP packets */
+#define ETHERNET_ARP_TYPE       0x0806  /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP    6
+#define IP_PROTOCOL_UDP    0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |         \
+    E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Additional interrupts need to be handled for e1000_ich8lan:
+    DSW = The FW changed the status of the DISSW bit in FWSM
+    PHYINT = The LAN connected device generates an interrupt
+    EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+    E1000_IMS_DSW   | \
+    E1000_IMS_PHYINT | \
+    E1000_IMS_EPRST)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAR_ENTRIES_ICH8LAN  7
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+    uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+    uint16_t length;     /* Length of data DMAed into data buffer */
+    uint16_t csum;       /* Packet checksum */
+    uint8_t status;      /* Descriptor status */
+    uint8_t errors;      /* Descriptor Errors */
+    uint16_t special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+    struct {
+        uint64_t buffer_addr;
+        uint64_t reserved;
+    } read;
+    struct {
+        struct {
+            uint32_t mrq;              /* Multiple Rx Queues */
+            union {
+                uint32_t rss;          /* RSS Hash */
+                struct {
+                    uint16_t ip_id;    /* IP id */
+                    uint16_t csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            uint32_t status_error;     /* ext status/error */
+            uint16_t length;
+            uint16_t vlan;             /* VLAN tag */
+        } upper;
+    } wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+    struct {
+        /* one buffer for protocol header(s), three data buffers */
+        uint64_t buffer_addr[MAX_PS_BUFFERS];
+    } read;
+    struct {
+        struct {
+            uint32_t mrq;              /* Multiple Rx Queues */
+            union {
+                uint32_t rss;          /* RSS Hash */
+                struct {
+                    uint16_t ip_id;    /* IP id */
+                    uint16_t csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            uint32_t status_error;     /* ext status/error */
+            uint16_t length0;          /* length of buffer 0 */
+            uint16_t vlan;             /* VLAN tag */
+        } middle;
+        struct {
+            uint16_t header_status;
+            uint16_t length[3];        /* length of buffers 1-3 */
+        } upper;
+        uint64_t reserved;
+    } wb; /* writeback */
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP        0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK  0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+    uint64_t buffer_addr;       /* Address of the descriptor's data buffer */
+    union {
+        uint32_t data;
+        struct {
+            uint16_t length;    /* Data buffer length */
+            uint8_t cso;        /* Checksum offset */
+            uint8_t cmd;        /* Descriptor control */
+        } flags;
+    } lower;
+    union {
+        uint32_t data;
+        struct {
+            uint8_t status;     /* Descriptor status */
+            uint8_t css;        /* Checksum start */
+            uint16_t special;
+        } fields;
+    } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+    union {
+        uint32_t ip_config;
+        struct {
+            uint8_t ipcss;      /* IP checksum start */
+            uint8_t ipcso;      /* IP checksum offset */
+            uint16_t ipcse;     /* IP checksum end */
+        } ip_fields;
+    } lower_setup;
+    union {
+        uint32_t tcp_config;
+        struct {
+            uint8_t tucss;      /* TCP checksum start */
+            uint8_t tucso;      /* TCP checksum offset */
+            uint16_t tucse;     /* TCP checksum end */
+        } tcp_fields;
+    } upper_setup;
+    uint32_t cmd_and_length;    /* */
+    union {
+        uint32_t data;
+        struct {
+            uint8_t status;     /* Descriptor status */
+            uint8_t hdr_len;    /* Header length */
+            uint16_t mss;       /* Maximum segment size */
+        } fields;
+    } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+    uint64_t buffer_addr;       /* Address of the descriptor's buffer address */
+    union {
+        uint32_t data;
+        struct {
+            uint16_t length;    /* Data buffer length */
+            uint8_t typ_len_ext;        /* */
+            uint8_t cmd;        /* */
+        } flags;
+    } lower;
+    union {
+        uint32_t data;
+        struct {
+            uint8_t status;     /* Descriptor status */
+            uint8_t popts;      /* Packet Options */
+            uint16_t special;   /* */
+        } fields;
+    } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST          16   /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE          128  /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+#define E1000_NUM_UNICAST_ICH8LAN  7
+#define E1000_MC_TBL_SIZE_ICH8LAN  32
+
+
+/* Receive Address Register */
+struct e1000_rar {
+    volatile uint32_t low;      /* receive address low */
+    volatile uint32_t high;     /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+    volatile uint32_t ipv4_addr;        /* IP Address (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE                  E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN          3
+#define E1000_IP6AT_SIZE                  1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+    volatile uint8_t ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+    volatile uint32_t length;   /* Flexible Filter Length (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+    volatile uint32_t mask;     /* Flexible Filter Mask (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+    volatile uint32_t value;    /* Flexible Filter Value (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK   0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and  should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM register */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* TX Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG  0x0001
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDBAL    0x02800  /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH    0x02804  /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN    0x02808  /* RX Descriptor Length - RW */
+#define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
+#define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
+#define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL   0x02828  /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1  0x02928  /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* TX DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL    0x03800  /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH    0x03804  /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN    0x03808  /* TX Descriptor Length - RW */
+#define E1000_TDH      0x03810  /* TX Descriptor Head - RW */
+#define E1000_TDT      0x03818  /* TX Descripotr Tail - RW */
+#define E1000_TIDV     0x03820  /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
+#define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA     0x0003C  /* PHY address - RW */
+#define E1000_MANC2H     0x05860  /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR       0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL     E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS   E1000_STATUS
+#define E1000_82542_EECD     E1000_EECD
+#define E1000_82542_EERD     E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA      E1000_FLA
+#define E1000_82542_MDIC     E1000_MDIC
+#define E1000_82542_SCTL     E1000_SCTL
+#define E1000_82542_FEXTNVM  E1000_FEXTNVM
+#define E1000_82542_FCAL     E1000_FCAL
+#define E1000_82542_FCAH     E1000_FCAH
+#define E1000_82542_FCT      E1000_FCT
+#define E1000_82542_VET      E1000_VET
+#define E1000_82542_RA       0x00040
+#define E1000_82542_ICR      E1000_ICR
+#define E1000_82542_ITR      E1000_ITR
+#define E1000_82542_ICS      E1000_ICS
+#define E1000_82542_IMS      E1000_IMS
+#define E1000_82542_IMC      E1000_IMC
+#define E1000_82542_RCTL     E1000_RCTL
+#define E1000_82542_RDTR     0x00108
+#define E1000_82542_RDBAL    0x00110
+#define E1000_82542_RDBAH    0x00114
+#define E1000_82542_RDLEN    0x00118
+#define E1000_82542_RDH      0x00120
+#define E1000_82542_RDT      0x00128
+#define E1000_82542_RDTR0    E1000_82542_RDTR
+#define E1000_82542_RDBAL0   E1000_82542_RDBAL
+#define E1000_82542_RDBAH0   E1000_82542_RDBAH
+#define E1000_82542_RDLEN0   E1000_82542_RDLEN
+#define E1000_82542_RDH0     E1000_82542_RDH
+#define E1000_82542_RDT0     E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+                                                       * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3   0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3   0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3   0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3     0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3     0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2   0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2   0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2   0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2     0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2     0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1    0x00130
+#define E1000_82542_RDBAL1   0x00138
+#define E1000_82542_RDBAH1   0x0013C
+#define E1000_82542_RDLEN1   0x00140
+#define E1000_82542_RDH1     0x00148
+#define E1000_82542_RDT1     0x00150
+#define E1000_82542_FCRTH    0x00160
+#define E1000_82542_FCRTL    0x00168
+#define E1000_82542_FCTTV    E1000_FCTTV
+#define E1000_82542_TXCW     E1000_TXCW
+#define E1000_82542_RXCW     E1000_RXCW
+#define E1000_82542_MTA      0x00200
+#define E1000_82542_TCTL     E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG     E1000_TIPG
+#define E1000_82542_TDBAL    0x00420
+#define E1000_82542_TDBAH    0x00424
+#define E1000_82542_TDLEN    0x00428
+#define E1000_82542_TDH      0x00430
+#define E1000_82542_TDT      0x00438
+#define E1000_82542_TIDV     0x00440
+#define E1000_82542_TBT      E1000_TBT
+#define E1000_82542_AIT      E1000_AIT
+#define E1000_82542_VFTA     0x00600
+#define E1000_82542_LEDCTL   E1000_LEDCTL
+#define E1000_82542_PBA      E1000_PBA
+#define E1000_82542_PBS      E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC   E1000_EEARBC
+#define E1000_82542_FLASHT   E1000_FLASHT
+#define E1000_82542_EEWR     E1000_EEWR
+#define E1000_82542_FLSWCTL  E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT  E1000_FLSWCNT
+#define E1000_82542_FLOP     E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL  E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE  E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT      E1000_ERT
+#define E1000_82542_RXDCTL   E1000_RXDCTL
+#define E1000_82542_RXDCTL1  E1000_RXDCTL1
+#define E1000_82542_RADV     E1000_RADV
+#define E1000_82542_RSRPD    E1000_RSRPD
+#define E1000_82542_TXDMAC   E1000_TXDMAC
+#define E1000_82542_KABGTXD  E1000_KABGTXD
+#define E1000_82542_TDFHS    E1000_TDFHS
+#define E1000_82542_TDFTS    E1000_TDFTS
+#define E1000_82542_TDFPC    E1000_TDFPC
+#define E1000_82542_TXDCTL   E1000_TXDCTL
+#define E1000_82542_TADV     E1000_TADV
+#define E1000_82542_TSPMT    E1000_TSPMT
+#define E1000_82542_CRCERRS  E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS  E1000_SYMERRS
+#define E1000_82542_RXERRC   E1000_RXERRC
+#define E1000_82542_MPC      E1000_MPC
+#define E1000_82542_SCC      E1000_SCC
+#define E1000_82542_ECOL     E1000_ECOL
+#define E1000_82542_MCC      E1000_MCC
+#define E1000_82542_LATECOL  E1000_LATECOL
+#define E1000_82542_COLC     E1000_COLC
+#define E1000_82542_DC       E1000_DC
+#define E1000_82542_TNCRS    E1000_TNCRS
+#define E1000_82542_SEC      E1000_SEC
+#define E1000_82542_CEXTERR  E1000_CEXTERR
+#define E1000_82542_RLEC     E1000_RLEC
+#define E1000_82542_XONRXC   E1000_XONRXC
+#define E1000_82542_XONTXC   E1000_XONTXC
+#define E1000_82542_XOFFRXC  E1000_XOFFRXC
+#define E1000_82542_XOFFTXC  E1000_XOFFTXC
+#define E1000_82542_FCRUC    E1000_FCRUC
+#define E1000_82542_PRC64    E1000_PRC64
+#define E1000_82542_PRC127   E1000_PRC127
+#define E1000_82542_PRC255   E1000_PRC255
+#define E1000_82542_PRC511   E1000_PRC511
+#define E1000_82542_PRC1023  E1000_PRC1023
+#define E1000_82542_PRC1522  E1000_PRC1522
+#define E1000_82542_GPRC     E1000_GPRC
+#define E1000_82542_BPRC     E1000_BPRC
+#define E1000_82542_MPRC     E1000_MPRC
+#define E1000_82542_GPTC     E1000_GPTC
+#define E1000_82542_GORCL    E1000_GORCL
+#define E1000_82542_GORCH    E1000_GORCH
+#define E1000_82542_GOTCL    E1000_GOTCL
+#define E1000_82542_GOTCH    E1000_GOTCH
+#define E1000_82542_RNBC     E1000_RNBC
+#define E1000_82542_RUC      E1000_RUC
+#define E1000_82542_RFC      E1000_RFC
+#define E1000_82542_ROC      E1000_ROC
+#define E1000_82542_RJC      E1000_RJC
+#define E1000_82542_MGTPRC   E1000_MGTPRC
+#define E1000_82542_MGTPDC   E1000_MGTPDC
+#define E1000_82542_MGTPTC   E1000_MGTPTC
+#define E1000_82542_TORL     E1000_TORL
+#define E1000_82542_TORH     E1000_TORH
+#define E1000_82542_TOTL     E1000_TOTL
+#define E1000_82542_TOTH     E1000_TOTH
+#define E1000_82542_TPR      E1000_TPR
+#define E1000_82542_TPT      E1000_TPT
+#define E1000_82542_PTC64    E1000_PTC64
+#define E1000_82542_PTC127   E1000_PTC127
+#define E1000_82542_PTC255   E1000_PTC255
+#define E1000_82542_PTC511   E1000_PTC511
+#define E1000_82542_PTC1023  E1000_PTC1023
+#define E1000_82542_PTC1522  E1000_PTC1522
+#define E1000_82542_MPTC     E1000_MPTC
+#define E1000_82542_BPTC     E1000_BPTC
+#define E1000_82542_TSCTC    E1000_TSCTC
+#define E1000_82542_TSCTFC   E1000_TSCTFC
+#define E1000_82542_RXCSUM   E1000_RXCSUM
+#define E1000_82542_WUC      E1000_WUC
+#define E1000_82542_WUFC     E1000_WUFC
+#define E1000_82542_WUS      E1000_WUS
+#define E1000_82542_MANC     E1000_MANC
+#define E1000_82542_IPAV     E1000_IPAV
+#define E1000_82542_IP4AT    E1000_IP4AT
+#define E1000_82542_IP6AT    E1000_IP6AT
+#define E1000_82542_WUPL     E1000_WUPL
+#define E1000_82542_WUPM     E1000_WUPM
+#define E1000_82542_FFLT     E1000_FFLT
+#define E1000_82542_TDFH     0x08010
+#define E1000_82542_TDFT     0x08018
+#define E1000_82542_FFMT     E1000_FFMT
+#define E1000_82542_FFVT     E1000_FFVT
+#define E1000_82542_HOST_IF  E1000_HOST_IF
+#define E1000_82542_IAM         E1000_IAM
+#define E1000_82542_EEMNGCTL    E1000_EEMNGCTL
+#define E1000_82542_PSRCTL      E1000_PSRCTL
+#define E1000_82542_RAID        E1000_RAID
+#define E1000_82542_TARC0       E1000_TARC0
+#define E1000_82542_TDBAL1      E1000_TDBAL1
+#define E1000_82542_TDBAH1      E1000_TDBAH1
+#define E1000_82542_TDLEN1      E1000_TDLEN1
+#define E1000_82542_TDH1        E1000_TDH1
+#define E1000_82542_TDT1        E1000_TDT1
+#define E1000_82542_TXDCTL1     E1000_TXDCTL1
+#define E1000_82542_TARC1       E1000_TARC1
+#define E1000_82542_RFCTL       E1000_RFCTL
+#define E1000_82542_GCR         E1000_GCR
+#define E1000_82542_GSCL_1      E1000_GSCL_1
+#define E1000_82542_GSCL_2      E1000_GSCL_2
+#define E1000_82542_GSCL_3      E1000_GSCL_3
+#define E1000_82542_GSCL_4      E1000_GSCL_4
+#define E1000_82542_FACTPS      E1000_FACTPS
+#define E1000_82542_SWSM        E1000_SWSM
+#define E1000_82542_FWSM        E1000_FWSM
+#define E1000_82542_FFLT_DBG    E1000_FFLT_DBG
+#define E1000_82542_IAC         E1000_IAC
+#define E1000_82542_ICRXPTC     E1000_ICRXPTC
+#define E1000_82542_ICRXATC     E1000_ICRXATC
+#define E1000_82542_ICTXPTC     E1000_ICTXPTC
+#define E1000_82542_ICTXATC     E1000_ICTXATC
+#define E1000_82542_ICTXQEC     E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC    E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC    E1000_ICRXDMTC
+#define E1000_82542_ICRXOC      E1000_ICRXOC
+#define E1000_82542_HICR        E1000_HICR
+
+#define E1000_82542_CPUVEC      E1000_CPUVEC
+#define E1000_82542_MRQC        E1000_MRQC
+#define E1000_82542_RETA        E1000_RETA
+#define E1000_82542_RSSRK       E1000_RSSRK
+#define E1000_82542_RSSIM       E1000_RSSIM
+#define E1000_82542_RSSIR       E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+    uint64_t crcerrs;
+    uint64_t algnerrc;
+    uint64_t symerrs;
+    uint64_t rxerrc;
+    uint64_t mpc;
+    uint64_t scc;
+    uint64_t ecol;
+    uint64_t mcc;
+    uint64_t latecol;
+    uint64_t colc;
+    uint64_t dc;
+    uint64_t tncrs;
+    uint64_t sec;
+    uint64_t cexterr;
+    uint64_t rlec;
+    uint64_t xonrxc;
+    uint64_t xontxc;
+    uint64_t xoffrxc;
+    uint64_t xofftxc;
+    uint64_t fcruc;
+    uint64_t prc64;
+    uint64_t prc127;
+    uint64_t prc255;
+    uint64_t prc511;
+    uint64_t prc1023;
+    uint64_t prc1522;
+    uint64_t gprc;
+    uint64_t bprc;
+    uint64_t mprc;
+    uint64_t gptc;
+    uint64_t gorcl;
+    uint64_t gorch;
+    uint64_t gotcl;
+    uint64_t gotch;
+    uint64_t rnbc;
+    uint64_t ruc;
+    uint64_t rfc;
+    uint64_t roc;
+    uint64_t rjc;
+    uint64_t mgprc;
+    uint64_t mgpdc;
+    uint64_t mgptc;
+    uint64_t torl;
+    uint64_t torh;
+    uint64_t totl;
+    uint64_t toth;
+    uint64_t tpr;
+    uint64_t tpt;
+    uint64_t ptc64;
+    uint64_t ptc127;
+    uint64_t ptc255;
+    uint64_t ptc511;
+    uint64_t ptc1023;
+    uint64_t ptc1522;
+    uint64_t mptc;
+    uint64_t bptc;
+    uint64_t tsctc;
+    uint64_t tsctfc;
+    uint64_t iac;
+    uint64_t icrxptc;
+    uint64_t icrxatc;
+    uint64_t ictxptc;
+    uint64_t ictxatc;
+    uint64_t ictxqec;
+    uint64_t ictxqmtc;
+    uint64_t icrxdmtc;
+    uint64_t icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+    uint8_t *hw_addr;
+    uint8_t *flash_address;
+    e1000_mac_type mac_type;
+    e1000_phy_type phy_type;
+    uint32_t phy_init_script;
+    e1000_media_type media_type;
+    void *back;
+    struct e1000_shadow_ram *eeprom_shadow_ram;
+    uint32_t flash_bank_size;
+    uint32_t flash_base_addr;
+    e1000_fc_type fc;
+    e1000_bus_speed bus_speed;
+    e1000_bus_width bus_width;
+    e1000_bus_type bus_type;
+    struct e1000_eeprom_info eeprom;
+    e1000_ms_type master_slave;
+    e1000_ms_type original_master_slave;
+    e1000_ffe_config ffe_config_state;
+    uint32_t asf_firmware_present;
+    uint32_t eeprom_semaphore_present;
+    uint32_t swfw_sync_present;
+    uint32_t swfwhw_semaphore_present;
+    unsigned long io_base;
+    uint32_t phy_id;
+    uint32_t phy_revision;
+    uint32_t phy_addr;
+    uint32_t original_fc;
+    uint32_t txcw;
+    uint32_t autoneg_failed;
+    uint32_t max_frame_size;
+    uint32_t min_frame_size;
+    uint32_t mc_filter_type;
+    uint32_t num_mc_addrs;
+    uint32_t collision_delta;
+    uint32_t tx_packet_delta;
+    uint32_t ledctl_default;
+    uint32_t ledctl_mode1;
+    uint32_t ledctl_mode2;
+    boolean_t tx_pkt_filtering;
+    struct e1000_host_mng_dhcp_cookie mng_cookie;
+    uint16_t phy_spd_default;
+    uint16_t autoneg_advertised;
+    uint16_t pci_cmd_word;
+    uint16_t fc_high_water;
+    uint16_t fc_low_water;
+    uint16_t fc_pause_time;
+    uint16_t current_ifs_val;
+    uint16_t ifs_min_val;
+    uint16_t ifs_max_val;
+    uint16_t ifs_step_size;
+    uint16_t ifs_ratio;
+    uint16_t device_id;
+    uint16_t vendor_id;
+    uint16_t subsystem_id;
+    uint16_t subsystem_vendor_id;
+    uint8_t revision_id;
+    uint8_t autoneg;
+    uint8_t mdix;
+    uint8_t forced_speed_duplex;
+    uint8_t wait_autoneg_complete;
+    uint8_t dma_fairness;
+    uint8_t mac_addr[NODE_ADDRESS_SIZE];
+    uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
+    boolean_t disable_polarity_correction;
+    boolean_t speed_downgraded;
+    e1000_smart_speed smart_speed;
+    e1000_dsp_config dsp_config_state;
+    boolean_t get_link_status;
+    boolean_t serdes_link_down;
+    boolean_t tbi_compatibility_en;
+    boolean_t tbi_compatibility_on;
+    boolean_t laa_is_present;
+    boolean_t phy_reset_disable;
+    boolean_t fc_send_xon;
+    boolean_t fc_strict_ieee;
+    boolean_t report_tx_early;
+    boolean_t adaptive_ifs;
+    boolean_t ifs_params_forced;
+    boolean_t in_ifs_mode;
+    boolean_t mng_reg_access_disabled;
+    boolean_t leave_av_bit_off;
+    boolean_t kmrn_lock_loss_workaround_disabled;
+};
+
+
+#define E1000_EEPROM_SWDPIN0   0x0001   /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA   16   /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START  1    /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ     0    /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion
+                                                   by EEPROM/Flash */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK        0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS        0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO        0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE      0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+                                         * (0-small, 1-large) */
+#define E1000_EECD_TYPE      0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD          0x00000200  /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT    11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+#define E1000_STM_OPCODE     0xDB00
+#define E1000_HICR_FW_RESET  0xC0
+
+#define E1000_SHADOW_RAM_WORDS     2048
+#define E1000_ICH8_NVM_SIG_WORD    0x13
+#define E1000_ICH8_NVM_SIG_MASK    0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START      0x00000001 /* Start Read */
+#define E1000_EERD_DONE       0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK  0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK  0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI  0x01
+#define EEPROM_STATUS_WEN_SPI  0x02
+#define EEPROM_STATUS_BP0_SPI  0x04
+#define EEPROM_STATUS_BP1_SPI  0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000  /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000  /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000  /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+#define E1000_KUMCTRLSTA_MASK           0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET         0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
+#define E1000_KUMCTRLSTA_REN            0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL      0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL           0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL       0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG           0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS       0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM      0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL        0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES     0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES      0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS   0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS   0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT    0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING  0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT  0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL       0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK           0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK           0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN            0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN            0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK   0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS                0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN                  0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU                0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU             0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE      0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE             0x00000040
+#define E1000_PHY_CTRL_B2B_EN                  0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x0000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x0002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Receive Address */
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMC_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMC_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMC_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMC_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMC_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMC_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMC_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMC_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMC_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMC_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMC_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMC_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD      E1000_ICR_SRPD
+#define E1000_IMC_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMC_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMC_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW       E1000_ICR_DSW
+#define E1000_IMC_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMC_EPRST     E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM     0x0001
+#define E1000_SWFW_PHY0_SM    0x0002
+#define E1000_SWFW_PHY1_SM    0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff      /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB  0x80000000      /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80      /* descriptor length */
+#define E1000_RDH_RDH   0x0000ffff      /* receive descriptor head */
+#define E1000_RDT_RDT   0x0000ffff      /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN    0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x000000FF /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x0000FF00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x00FF0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+                                              still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW    0x0000ffff     /* RxConfigWord mask */
+#define E1000_RXCW_NC    0x04000000     /* Receive config no carrier */
+#define E1000_RXCW_IV    0x08000000     /* Receive config invalid */
+#define E1000_RXCW_CC    0x10000000     /* Receive config change */
+#define E1000_RXCW_C     0x20000000     /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000     /* Receive config synch */
+#define E1000_RXCW_ANC   0x80000000     /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK  0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX   0x00010000
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK              0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q            0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT           0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK           0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP       0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4           0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX    0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX        0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6           0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP       0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO      0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG  0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX   0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC   0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC   0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP  0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000 /* Enable Neighbor Discovery
+                                             * Filtering */
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
+                                                    * filtering */
+#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
+                                             * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
+                                                    * filtering */
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN         0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK    0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT            1
+#define E1000_FWSM_FW_VALID     0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY        0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW           0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK     0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT     29
+#define E1000_FWSM_SKUSEL_EMB      0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS     0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC     0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+    e1000_mng_mode_none     = 0,
+    e1000_mng_mode_asf,
+    e1000_mng_mode_pt,
+    e1000_mng_mode_ipmi,
+    e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Inteface Control Register */
+#define E1000_HICR_EN           0x00000001  /* Enable Bit - RO */
+#define E1000_HICR_C            0x00000002  /* Driver sets this bit when done
+                                             * to put command in RAM */
+#define E1000_HICR_SV           0x00000004  /* Status Validity */
+#define E1000_HICR_FWR          0x00000080  /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH         252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH  1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH  448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT         500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+    uint8_t command_id;
+    uint8_t command_length;
+    uint8_t command_options;   /* I/F bits for command, status for return */
+    uint8_t checksum;
+};
+struct e1000_host_command_info {
+    struct e1000_host_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    uint8_t command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN      0x00000001  /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN     0x00000002  /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT    0x00000004  /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT     0x00000008  /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN      E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN     E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT    E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT     E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK    0x000000FF  /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN          4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                             E1000_GCR_RXDSCW_NO_SNOOP      | \
+                             E1000_GCR_RXDSCR_NO_SNOOP      | \
+                             E1000_GCR_TXD_NO_SNOOP         | \
+                             E1000_GCR_TXDSCW_NO_SNOOP      | \
+                             E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
+#define E1000_FACTPS_LAN0_VALID                     0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN                   0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK         0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT        6
+#define E1000_FACTPS_LAN1_VALID                     0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN                   0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK         0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT        12
+#define E1000_FACTPS_IDE_ENABLE                     0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN                   0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK         0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT        18
+#define E1000_FACTPS_SP_ENABLE                      0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN                   0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK         0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT        24
+#define E1000_FACTPS_IPMI_ENABLE                    0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN                   0x08000000
+#define E1000_FACTPS_MNGCG                          0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL                   0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED               0x80000000
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE  0x6  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5  /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7  /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE  0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE  0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI        5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI      0x03  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI     0x02  /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI        0x08  /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI      0x06  /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI      0x04  /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI      0x05  /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI      0x01  /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI   0x20  /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT  6
+#define EEPROM_SIZE_SHIFT       10
+#define EEPROM_SIZE_MASK        0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT                 0x0003
+#define EEPROM_ID_LED_SETTINGS        0x0004
+#define EEPROM_VERSION                0x0005
+#define EEPROM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD         0x0007
+#define EEPROM_INIT_CONTROL1_REG      0x000A
+#define EEPROM_INIT_CONTROL2_REG      0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B   0x0014
+#define EEPROM_INIT_3GIO_3            0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A   0x0024
+#define EEPROM_CFG                    0x0012
+#define EEPROM_FLASH_VERSION          0x0032
+#define EEPROM_CHECKSUM_REG           0x003F
+
+#define E1000_EEPROM_CFG_DONE         0x00040000   /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1  0x00080000   /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573  0xF746
+#define ID_LED_DEFAULT_82573   0x1811
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2 << 12) | \
+                              (ID_LED_OFF1_OFF2 << 8) | \
+                              (ID_LED_DEF1_DEF2 << 4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+                                 (ID_LED_DEF1_OFF2 <<  8) | \
+                                 (ID_LED_DEF1_ON2  <<  4) | \
+                                 (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK  0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A   0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS   0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST   0x0200
+#define EEPROM_WORD0A_FD     0x0400
+#define EEPROM_WORD0A_66MHZ  0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE      0x1000
+#define EEPROM_WORD0F_ASM_DIR    0x2000
+#define EEPROM_WORD0F_ANE        0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU       0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE         0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1          8
+
+#define EEPROM_RESERVED_WORD          0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLLISION_DISTANCE_82542  64
+#define E1000_FDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT                12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT        10
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000   0x00000008
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START     8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP      190
+#define TX_THRESHOLD_DISABLE   0
+#define TX_THRESHOLD_TIMER_MS  10000
+#define MIN_NUM_XMITS          1000
+#define IFS_MAX                80
+#define IFS_STEP               10
+#define IFS_MIN                40
+#define IFS_RATIO              4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE  0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE       0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY      0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER        0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER   0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH    0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH   0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH   0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE  0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG            0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008    /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C    /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH        (0x8000)    /* 32KB */
+#define FC_DEFAULT_LO_THRESH        (0x4000)    /* 16KB */
+#define FC_DEFAULT_TX_TIMER         (0x100)     /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER    0xE6
+#define PCIX_STATUS_REGISTER_LO  0xE8
+#define PCIX_STATUS_REGISTER_HI  0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT  3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE  (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT             500
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT      10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+
+#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the RX descriptor with EOP set
+ *      error = the 8 bit error field of the RX descriptor with EOP set
+ *      length = the sum of all the length fields of the RX descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = TRUE;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = FALSE;
+ *  }
+ *  ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+    ((adapter)->tbi_compatibility_on && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= ((adapter)->max_frame_size + 1))) : \
+          (((length) > (adapter)->min_frame_size) && \
+           ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL         0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS        0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG     0xF   /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE  0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA      0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL   0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO       0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT      0x19
+#define IGP01E1000_PHY_PAGE_SELECT     0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A        0x1172
+#define IGP01E1000_PHY_AGC_B        0x1272
+#define IGP01E1000_PHY_AGC_C        0x1472
+#define IGP01E1000_PHY_AGC_D        0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A        0x11B1
+#define IGP02E1000_PHY_AGC_B        0x12B1
+#define IGP02E1000_PHY_AGC_C        0x14B1
+#define IGP02E1000_PHY_AGC_D        0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET   0x1F33
+#define IGP01E1000_PHY_DSP_SET     0x1F71
+#define IGP01E1000_PHY_DSP_FFE     0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM    4
+#define IGP02E1000_PHY_CHANNEL_NUM    4
+
+#define IGP01E1000_PHY_AGC_PARAM_A    0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B    0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C    0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D    0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX        0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE      0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A       0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE  0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP        0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT      0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG  0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG  0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE  0x20C0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS    0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS    0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS  0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS  0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS     0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE          0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR        0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT   0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE      0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE         0x0800 /* Toggles between exchanges
+                                    * of different NP
+                                    */
+#define NPTX_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                    * 0 = cannot comply with msg
+                                    */
+#define NPTX_MSG_PAGE       0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE      0x8000 /* 1 = addition NP will follow
+                                    * 0 = sending last NP
+                                    */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE         0x0800 /* Toggles between exchanges
+                                       * of different NP
+                                       */
+#define LP_RNPR_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                       * 0 = cannot comply with msg
+                                       */
+#define LP_RNPR_MSG_PAGE       0x2000  /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE     0x4000  /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE      0x8000  /* 1 = addition NP will follow
+                                        * 0 = sending last NP
+                                        */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT          12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT           13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT    5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20            20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100           100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK   0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0      /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE  0x0010 /* register 11h bit 4 */
+                                      /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
+                                                * 0=CLK125 toggling
+                                                */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
+                                                *  100BASE-TX/10BASE-T:
+                                                *  MDI Mode
+                                                */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
+                                                * all speeds.
+                                                */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+                                        /* 1=Enable Extended 10BASE-T distance
+                                         * (Lower 10BASE-T RX Threshold)
+                                         * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+                                        /* 1=5-Bit interface in 100BASE-TX
+                                         * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT    1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT          5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+                                            * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT    5
+#define M88E1000_PSSR_MDIX_SHIFT         6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000 /* 1=Lost lock detect enabled.
+                                              * Will assert lost lock and bring
+                                              * link down if idle not seen
+                                              * within 1ms in 1000BASE-T
+                                              */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT  0x0010
+#define IGP01E1000_PSCFR_PRE_EN                0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED           0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK    0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER        0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT      0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED         0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH           0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX            0x0200
+#define IGP01E1000_PSSR_LINK_UP                0x0400
+#define IGP01E1000_PSSR_MDIX                   0x0800
+#define IGP01E1000_PSSR_SPEED_MASK             0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS           0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS          0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS         0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT     0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT             0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK            0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR      0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT         0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP              0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX              0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX         0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE           0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR    0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT           0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION      0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK       0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW   0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1             0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0             0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT          0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE         0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D        0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C        0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B        0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A        0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D        0x000F
+#define IGP01E1000_MSE_CHANNEL_C        0x00F0
+#define IGP01E1000_MSE_CHANNEL_B        0x0F00
+#define IGP01E1000_MSE_CHANNEL_A        0xF000
+
+#define IGP02E1000_PM_SPD                         0x0001  /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU                     0x0004  /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU                     0x0002  /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE     0x0
+#define DSP_RESET_DISABLE    0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7         /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9         /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK  0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE    10
+#define IGP02E1000_AGC_RANGE    15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK    0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD               0x10 /* Enable flexible speed
+                                                     * on Link-Up */
+#define IGP01E1000_GMII_SPD                    0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS       0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS             0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL            0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS             0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK            0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK            0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK          0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED        0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL    0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH        0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10            0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1               0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10              0x0500
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_DISABLE_JABBER             0x0001 /* 1=Disable Jabber */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Polarity Reversal Disabled */
+#define GG82563_PSCR_POWER_DOWN                 0x0004 /* 1=Power Down */
+#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE  0x0008 /* 1=Transmitter Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Automatic crossover */
+#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE   0x0080 /* 1=Enable Extended Distance */
+#define GG82563_PSCR_ENERGY_DETECT_MASK         0x0300
+#define GG82563_PSCR_ENERGY_DETECT_OFF          0x0000 /* 00,01=Off */
+#define GG82563_PSCR_ENERGY_DETECT_RX           0x0200 /* 10=Sense on Rx only (Energy Detect) */
+#define GG82563_PSCR_ENERGY_DETECT_RX_TM        0x0300 /* 11=Sense and Tx NLP */
+#define GG82563_PSCR_FORCE_LINK_GOOD            0x0400 /* 1=Force Link Good */
+#define GG82563_PSCR_DOWNSHIFT_ENABLE           0x0800 /* 1=Enable Downshift */
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK     0x7000
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT    12
+
+/* PHY Specific Status Register (Page 0, Register 17) */
+#define GG82563_PSSR_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR_POLARITY              0x0002 /* 1=Polarity Reversed */
+#define GG82563_PSSR_LINK                  0x0008 /* 1=Link is Up */
+#define GG82563_PSSR_ENERGY_DETECT         0x0010 /* 1=Sleep, 0=Active */
+#define GG82563_PSSR_DOWNSHIFT             0x0020 /* 1=Downshift */
+#define GG82563_PSSR_CROSSOVER_STATUS      0x0040 /* 1=MDIX, 0=MDI */
+#define GG82563_PSSR_RX_PAUSE_ENABLED      0x0100 /* 1=Receive Pause Enabled */
+#define GG82563_PSSR_TX_PAUSE_ENABLED      0x0200 /* 1=Transmit Pause Enabled */
+#define GG82563_PSSR_LINK_UP               0x0400 /* 1=Link Up */
+#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
+#define GG82563_PSSR_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR_DUPLEX                0x2000 /* 1-Full-Duplex */
+#define GG82563_PSSR_SPEED_MASK            0xC000
+#define GG82563_PSSR_SPEED_10MBPS          0x0000 /* 00=10Mbps */
+#define GG82563_PSSR_SPEED_100MBPS         0x4000 /* 01=100Mbps */
+#define GG82563_PSSR_SPEED_1000MBPS        0x8000 /* 10=1000Mbps */
+
+/* PHY Specific Status Register 2 (Page 0, Register 19) */
+#define GG82563_PSSR2_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR2_POLARITY_CHANGED      0x0002 /* 1=Polarity Changed */
+#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
+#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT   0x0020 /* 1=Downshift Detected */
+#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE  0x0040 /* 1=Crossover Changed */
+#define GG82563_PSSR2_FALSE_CARRIER         0x0100 /* 1=False Carrier */
+#define GG82563_PSSR2_SYMBOL_ERROR          0x0200 /* 1=Symbol Error */
+#define GG82563_PSSR2_LINK_STATUS_CHANGED   0x0400 /* 1=Link Status Changed */
+#define GG82563_PSSR2_AUTO_NEG_COMPLETED    0x0800 /* 1=Auto-Neg Completed */
+#define GG82563_PSSR2_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR2_DUPLEX_CHANGED        0x2000 /* 1=Duplex Changed */
+#define GG82563_PSSR2_SPEED_CHANGED         0x4000 /* 1=Speed Changed */
+#define GG82563_PSSR2_AUTO_NEG_ERROR        0x8000 /* 1=Auto-Neg Error */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_10BT_POLARITY_FORCE           0x0002 /* 1=Force Negative Polarity */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK       0x000C
+#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL     0x0000 /* 00,01=Normal Operation */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS      0x0008 /* 10=Select 112ns Sequence */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS       0x000C /* 11=Select 16ns Sequence */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG              0x2000 /* 1=Reverse Auto-Negotiation */
+#define GG82563_PSCR2_1000BT_DISABLE                0x4000 /* 1=Disable 1000BASE-T */
+#define GG82563_PSCR2_TRANSMITER_TYPE_MASK          0x8000
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B      0x0000 /* 0=Class B */
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A      0x8000 /* 1=Class A */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK                    0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ           0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ           0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ         0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ          0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX               0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M;
+                                                          1 = 50-80M;
+                                                          2 = 80-110M;
+                                                          3 = 110-140M;
+                                                          4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PHY_LEDS_EN                    0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
+#define GG82563_KMCR_FORCE_LINK_UP                  0x0040 /* 1=Force Link Up */
+#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT         0x0080
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK     0x0400
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT          0x0400 /* 1=6.25MHz, 0=0.8MHz */
+#define GG82563_KMCR_PASS_FALSE_CARRIER             0x0800
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE         0x0001 /* 1=Enalbe SERDES Electrical Idle */
+#define GG82563_PMCR_DISABLE_PORT                   0x0002 /* 1=Disable Port */
+#define GG82563_PMCR_DISABLE_SERDES                 0x0004 /* 1=Disable SERDES */
+#define GG82563_PMCR_REVERSE_AUTO_NEG               0x0008 /* 1=Enable Reverse Auto-Negotiation */
+#define GG82563_PMCR_DISABLE_1000_NON_D0            0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
+#define GG82563_PMCR_DISABLE_1000                   0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
+#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A           0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
+#define GG82563_PMCR_FORCE_POWER_STATE              0x0080 /* 1=Force Power State */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK    0x0300
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR      0x0000 /* 00=Dr */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U     0x0100 /* 01=D0u */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A     0x0200 /* 10=D0a */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3      0x0300 /* 11=D3 */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                     0x0010 /* Disable Padding Use */
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID  0x01410C50
+#define M88E1000_I_PHY_ID  0x01410C30
+#define M88E1011_I_PHY_ID  0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4   0x04
+#define M88E1111_I_PHY_ID  0x01410CC0
+#define L1LXT971A_PHY_ID   0x001378E0
+#define GG82563_E_PHY_ID   0x01410CA0
+
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT        5
+#define PHY_REG(page, reg)    \
+        (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL           \
+        PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+        PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+        PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+        PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+        PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+        PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+        PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+        PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT       0x0200 /* Enter powerdown, shutdown VRs */
+
+#define IGP3_CAPABILITY \
+        PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control  */
+#define IGP3_CAP_INITIATE_TEAM       0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM                 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF                 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU                0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED       0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD                 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE          0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS                 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ              0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB              0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN           0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED   0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS         0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK   0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA        0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100         0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL                0x1B   /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET        0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL  PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND    0x0080
+
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL   0x10  /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL           0x11  /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER         0x13  /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT            0x14  /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME           0x15  /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR            0x16  /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR              0x17  /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR               0x18  /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT          0x19  /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER                 0x1A  /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED       0x1B  /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL              0x1C  /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL               0x1D  /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE  0x2000  /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN           0x0400  /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN            0x0200  /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED           0x0100  /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK               0x007C  /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED                       0x0002  /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX                      0x0001  /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT     8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN   0x0100  /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY               0x0020  /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE        0x0010  /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE          0x0001  /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT         5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT  4
+
+#define IFE_PMC_AUTO_MDIX                    0x0080  /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX                   0x0040  /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS                  0x0020  /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE           0x0010  /* Resolution algorthm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT              6
+#define IFE_PHC_MDIX_RESET_ALL_MASK          0x0000  /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE                   0x8000  /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK                0x4000  /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC                    0x2000  /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ                        0x0200  /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ                         0x0400  /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK              0x0600  /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK                0x01FF  /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK               0x0000  /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE                  0x0020  /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF              0x0006  /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON               0x0007  /* Force LEDs 0 and 2 on */
+
+#define ICH8_FLASH_COMMAND_TIMEOUT           500   /* 500 ms , should be adjusted */
+#define ICH8_FLASH_CYCLE_REPEAT_COUNT        10    /* 10 cycles , should be adjusted */
+#define ICH8_FLASH_SEG_SIZE_256              256
+#define ICH8_FLASH_SEG_SIZE_4K               4096
+#define ICH8_FLASH_SEG_SIZE_64K              65536
+
+#define ICH8_CYCLE_READ                      0x0
+#define ICH8_CYCLE_RESERVED                  0x1
+#define ICH8_CYCLE_WRITE                     0x2
+#define ICH8_CYCLE_ERASE                     0x3
+
+#define ICH8_FLASH_GFPREG   0x0000
+#define ICH8_FLASH_HSFSTS   0x0004
+#define ICH8_FLASH_HSFCTL   0x0006
+#define ICH8_FLASH_FADDR    0x0008
+#define ICH8_FLASH_FDATA0   0x0010
+#define ICH8_FLASH_FRACC    0x0050
+#define ICH8_FLASH_FREG0    0x0054
+#define ICH8_FLASH_FREG1    0x0058
+#define ICH8_FLASH_FREG2    0x005C
+#define ICH8_FLASH_FREG3    0x0060
+#define ICH8_FLASH_FPR0     0x0074
+#define ICH8_FLASH_FPR1     0x0078
+#define ICH8_FLASH_SSFSTS   0x0090
+#define ICH8_FLASH_SSFCTL   0x0092
+#define ICH8_FLASH_PREOP    0x0094
+#define ICH8_FLASH_OPTYPE   0x0096
+#define ICH8_FLASH_OPMENU   0x0098
+
+#define ICH8_FLASH_REG_MAPSIZE      0x00A0
+#define ICH8_FLASH_SECTOR_SIZE      4096
+#define ICH8_GFPREG_BASE_MASK       0x1FFF
+#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+    struct ich8_hsfsts {
+#ifdef E1000_BIG_ENDIAN
+        uint16_t reserved2      :6;
+        uint16_t fldesvalid     :1;
+        uint16_t flockdn        :1;
+        uint16_t flcdone        :1;
+        uint16_t flcerr         :1;
+        uint16_t dael           :1;
+        uint16_t berasesz       :2;
+        uint16_t flcinprog      :1;
+        uint16_t reserved1      :2;
+#else
+        uint16_t flcdone        :1;   /* bit 0 Flash Cycle Done */
+        uint16_t flcerr         :1;   /* bit 1 Flash Cycle Error */
+        uint16_t dael           :1;   /* bit 2 Direct Access error Log */
+        uint16_t berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
+        uint16_t flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
+        uint16_t reserved1      :2;   /* bit 13:6 Reserved */
+        uint16_t reserved2      :6;   /* bit 13:6 Reserved */
+        uint16_t fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
+        uint16_t flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
+#endif
+    } hsf_status;
+    uint16_t regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+    struct ich8_hsflctl {
+#ifdef E1000_BIG_ENDIAN
+        uint16_t fldbcount      :2;
+        uint16_t flockdn        :6;
+        uint16_t flcgo          :1;
+        uint16_t flcycle        :2;
+        uint16_t reserved       :5;
+#else
+        uint16_t flcgo          :1;   /* 0 Flash Cycle Go */
+        uint16_t flcycle        :2;   /* 2:1 Flash Cycle */
+        uint16_t reserved       :5;   /* 7:3 Reserved  */
+        uint16_t fldbcount      :2;   /* 9:8 Flash Data Byte Count */
+        uint16_t flockdn        :6;   /* 15:10 Reserved */
+#endif
+    } hsf_ctrl;
+    uint16_t regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+    struct ich8_flracc {
+#ifdef E1000_BIG_ENDIAN
+        uint32_t gmwag          :8;
+        uint32_t gmrag          :8;
+        uint32_t grwa           :8;
+        uint32_t grra           :8;
+#else
+        uint32_t grra           :8;   /* 0:7 GbE region Read Access */
+        uint32_t grwa           :8;   /* 8:15 GbE region Write Access */
+        uint32_t gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
+        uint32_t gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
+#endif
+    } hsf_flregacc;
+    uint16_t regval;
+};
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE        0xFFFFFFFF
+#define PHY_SOF             0x01
+#define PHY_OP_READ         0x02
+#define PHY_OP_WRITE        0x01
+#define PHY_TURNAROUND      0x02
+#define PHY_PREAMBLE_SIZE   32
+#define MII_CR_SPEED_1000   0x0040
+#define MII_CR_SPEED_100    0x2000
+#define MII_CR_SPEED_10     0x0000
+#define E1000_PHY_ADDRESS   0x01
+#define PHY_AUTO_NEG_TIME   45  /* 4.5 Seconds */
+#define PHY_FORCE_TIME      20  /* 2.0 Seconds */
+#define PHY_REVISION_MASK   0xFFFFFFF0
+#define DEVICE_SPEED_MASK   0x00000300  /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK     0x01E0
+#define REG9_SPEED_MASK     0x0300
+#define ADVERTISE_10_HALF   0x0001
+#define ADVERTISE_10_FULL   0x0002
+#define ADVERTISE_100_HALF  0x0004
+#define ADVERTISE_100_FULL  0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F  /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL    0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL        0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/e1000/kcompat.h	2022-03-21 12:58:29.482888054 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/e1000_main.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+
+#include <rtnet_port.h>
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+/* Useful settings for rtnet */
+#undef MAX_SKB_FRAGS
+#undef NETIF_F_TSO
+#undef E1000_COUNT_ICR
+#undef NETIF_F_HW_VLAN_TX
+#undef CONFIG_NET_POLL_CONTROLLER
+#undef ETHTOOL_OPS_COMPAT
+#undef ETHTOOL_GPERMADDR
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x)	kfree(x)
+#endif
+
+#undef E1000_NAPI
+#undef CONFIG_E1000_NAPI
+
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+
+
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+#undef CONFIG_PM
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+/*****************************************************************************/
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+/*****************************************************************************/
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+	uint32_t cmd;
+	char	 driver[32];
+	char	 version[32];
+	char	 fw_version[32];
+	char	 bus_info[32];
+	char	 reserved1[32];
+	char	 reserved2[16];
+	uint32_t n_stats;
+	uint32_t testinfo_len;
+	uint32_t eedump_len;
+	uint32_t regdump_len;
+};
+
+struct ethtool_stats {
+	uint32_t cmd;
+	uint32_t n_stats;
+	uint64_t data[0];
+};
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+	ETH_SS_TEST             = 0,
+	ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+	u32     cmd;            /* ETHTOOL_GSTRINGS */
+	u32     string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+	u32     len;            /* number of strings in the string set */
+	u8      data[0];
+};
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+	ETH_TEST_FL_OFFLINE	= (1 << 0),
+	ETH_TEST_FL_FAILED	= (1 << 1),
+};
+struct ethtool_test {
+	uint32_t cmd;
+	uint32_t flags;
+	uint32_t reserved;
+	uint32_t len;
+	uint64_t data[0];
+};
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+	uint32_t cmd;
+	uint32_t magic;
+	uint32_t offset;
+	uint32_t len;
+	uint8_t	 data[0];
+};
+
+struct ethtool_value {
+	uint32_t cmd;
+	uint32_t data;
+};
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* Ethtool version without link support */
+#endif /* Ethtool version without eeprom support */
+#endif /* Ethtool version without test support */
+#endif /* Ethtool version without strings support */
+#endif /* Ethtool version wihtout adapter id support */
+#endif /* Ethtool version without statistics support */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS		0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+	u32	cmd;
+	u32	version; /* driver-specific, indicates different chips/revs */
+	u32	len; /* bytes */
+	u8	data[0];
+};
+#endif
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK		0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+	u32	cmd;	/* ETHTOOL_{G,S}COALESCE */
+
+	/* How many usecs to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	rx_coalesce_usecs;
+
+	/* How many packets to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause RX interrupts to never be
+	 * generated.
+	 */
+	u32	rx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	rx_coalesce_usecs_irq;
+	u32	rx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	tx_coalesce_usecs;
+
+	/* How many packets to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause TX interrupts to never be
+	 * generated.
+	 */
+	u32	tx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	tx_coalesce_usecs_irq;
+	u32	tx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay in-memory statistics
+	 * block updates.  Some drivers do not have an in-memory
+	 * statistic block, and in such cases this value is ignored.
+	 * This value must not be zero.
+	 */
+	u32	stats_block_coalesce_usecs;
+
+	/* Adaptive RX/TX coalescing is an algorithm implemented by
+	 * some drivers to improve latency under low packet rates and
+	 * improve throughput under high packet rates.  Some drivers
+	 * only implement one of RX or TX adaptive coalescing.  Anything
+	 * not implemented by the driver causes these values to be
+	 * silently ignored.
+	 */
+	u32	use_adaptive_rx_coalesce;
+	u32	use_adaptive_tx_coalesce;
+
+	/* When the packet rate (measured in packets per second)
+	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+	 * used.
+	 */
+	u32	pkt_rate_low;
+	u32	rx_coalesce_usecs_low;
+	u32	rx_max_coalesced_frames_low;
+	u32	tx_coalesce_usecs_low;
+	u32	tx_max_coalesced_frames_low;
+
+	/* When the packet rate is below pkt_rate_high but above
+	 * pkt_rate_low (both measured in packets per second) the
+	 * normal {rx,tx}_* coalescing parameters are used.
+	 */
+
+	/* When the packet rate is (measured in packets per second)
+	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+	 * used.
+	 */
+	u32	pkt_rate_high;
+	u32	rx_coalesce_usecs_high;
+	u32	rx_max_coalesced_frames_high;
+	u32	tx_coalesce_usecs_high;
+	u32	tx_max_coalesced_frames_high;
+
+	/* How often to do adaptive coalescing packet rate sampling,
+	 * measured in seconds.  Must not be zero.
+	 */
+	u32	rate_sample_interval;
+};
+#endif
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+	u32	cmd;	/* ETHTOOL_{G,S}RINGPARAM */
+
+	/* Read only attributes.  These indicate the maximum number
+	 * of pending RX/TX ring entries the driver will allow the
+	 * user to set.
+	 */
+	u32	rx_max_pending;
+	u32	rx_mini_max_pending;
+	u32	rx_jumbo_max_pending;
+	u32	tx_max_pending;
+
+	/* Values changeable by the user.  The valid values are
+	 * in the range 1 to the "*_max_pending" counterpart above.
+	 */
+	u32	rx_pending;
+	u32	rx_mini_pending;
+	u32	rx_jumbo_pending;
+	u32	tx_pending;
+};
+#endif
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+	u32	cmd;	/* ETHTOOL_{G,S}PAUSEPARAM */
+
+	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+	 * being true) the user may set 'autonet' here non-zero to have the
+	 * pause parameters be auto-negotiated too.  In such a case, the
+	 * {rx,tx}_pause values below determine what capabilities are
+	 * advertised.
+	 *
+	 * If 'autoneg' is zero or the link is not being auto-negotiated,
+	 * then {rx,tx}_pause force the driver to use/not-use pause
+	 * flow control.
+	 */
+	u32	autoneg;
+	u32	rx_pause;
+	u32	tx_pause;
+};
+#endif
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
+					    * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
+					    * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST		0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0 /* driver took care of the packet */
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1 /* driver tx path was busy */
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
+#endif
+
+/* if we do not have the infrastructure to detect if skb_header is cloned *
+ * just return false in all cases */
+#ifndef SKB_DATAREF_SHIFT
+#define skb_header_cloned(x) 0
+#endif /* SKB_DATAREF_SHIFT not defined */
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#define USE_DRIVER_SHUTDOWN_HANDLER
+
+#ifndef SA_PROBEIRQ
+#define SA_PROBEIRQ 0
+#endif
+
+#endif /* _KCOMPAT_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/e1000/e1000_main.c	2022-03-21 12:58:29.477888103 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/e1000_osdep.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* Change Log
+ *
+ * Port to rtnet (0.9.3) by Mathias Koehrer. Base version: e1000-7.1.9
+ *             8-Aug-2006
+ *
+ * 7.0.36      10-Mar-2006
+ *   o fixups for compilation issues on older kernels
+ * 7.0.35      3-Mar-2006
+ * 7.0.34
+ *   o Major performance fixes by understanding relationship of rx_buffer_len
+ *     to window size growth.  _ps and legacy receive paths changed
+ *   o merge with kernel changes
+ *   o legacy receive path went back to single descriptor model for jumbos
+ * 7.0.33      3-Feb-2006
+ *   o Added another fix for the pass false carrier bit
+ * 7.0.32      24-Jan-2006
+ *   o Need to rebuild with noew version number for the pass false carrier
+ *     fix in e1000_hw.c
+ * 7.0.30      18-Jan-2006
+ *   o fixup for tso workaround to disable it for pci-x
+ *   o fix mem leak on 82542
+ *   o fixes for 10 Mb/s connections and incorrect stats
+ * 7.0.28      01/06/2006
+ *   o hardware workaround to only set "speed mode" bit for 1G link.
+ * 7.0.26      12/23/2005
+ *   o wake on lan support modified for device ID 10B5
+ *   o fix dhcp + vlan issue not making it to the iAMT firmware
+ * 7.0.24      12/9/2005
+ *   o New hardware support for the Gigabit NIC embedded in the south bridge
+ *   o Fixes to the recycling logic (skb->tail) from IBM LTC
+ * 6.3.7	11/18/2005
+ *   o Honor eeprom setting for enabling/disabling Wake On Lan
+ * 6.3.5	11/17/2005
+ *   o Fix memory leak in rx ring handling for PCI Express adapters
+ * 6.3.4	11/8/05
+ *   o Patch from Jesper Juhl to remove redundant NULL checks for kfree
+ * 6.3.2	9/20/05
+ *   o Render logic that sets/resets DRV_LOAD as inline functions to
+ *     avoid code replication. If f/w is AMT then set DRV_LOAD only when
+ *     network interface is open.
+ *   o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
+ *   o Adjust PBA partioning for Jumbo frames using MTU size and not
+ *     rx_buffer_len
+ * 6.3.1	9/19/05
+ *   o Use adapter->tx_timeout_factor in Tx Hung Detect logic
+ *      (e1000_clean_tx_irq)
+ *   o Support for 8086:10B5 device (Quad Port)
+ */
+
+char e1000_driver_name[] = "rt_e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#ifndef CONFIG_E1000_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+#define DRV_VERSION "7.1.9"DRIVERNAPI
+char e1000_driver_version[] = DRV_VERSION;
+static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+	INTEL_E1000_ETHERNET_DEVICE(0x1000),
+	INTEL_E1000_ETHERNET_DEVICE(0x1001),
+	INTEL_E1000_ETHERNET_DEVICE(0x1004),
+	INTEL_E1000_ETHERNET_DEVICE(0x1008),
+	INTEL_E1000_ETHERNET_DEVICE(0x1009),
+	INTEL_E1000_ETHERNET_DEVICE(0x100C),
+	INTEL_E1000_ETHERNET_DEVICE(0x100D),
+	INTEL_E1000_ETHERNET_DEVICE(0x100E),
+	INTEL_E1000_ETHERNET_DEVICE(0x100F),
+	INTEL_E1000_ETHERNET_DEVICE(0x1010),
+	INTEL_E1000_ETHERNET_DEVICE(0x1011),
+	INTEL_E1000_ETHERNET_DEVICE(0x1012),
+	INTEL_E1000_ETHERNET_DEVICE(0x1013),
+	INTEL_E1000_ETHERNET_DEVICE(0x1014),
+	INTEL_E1000_ETHERNET_DEVICE(0x1015),
+	INTEL_E1000_ETHERNET_DEVICE(0x1016),
+	INTEL_E1000_ETHERNET_DEVICE(0x1017),
+	INTEL_E1000_ETHERNET_DEVICE(0x1018),
+	INTEL_E1000_ETHERNET_DEVICE(0x1019),
+	INTEL_E1000_ETHERNET_DEVICE(0x101A),
+	INTEL_E1000_ETHERNET_DEVICE(0x101D),
+	INTEL_E1000_ETHERNET_DEVICE(0x101E),
+	INTEL_E1000_ETHERNET_DEVICE(0x1026),
+	INTEL_E1000_ETHERNET_DEVICE(0x1027),
+	INTEL_E1000_ETHERNET_DEVICE(0x1028),
+	INTEL_E1000_ETHERNET_DEVICE(0x1049),
+	INTEL_E1000_ETHERNET_DEVICE(0x104A),
+	INTEL_E1000_ETHERNET_DEVICE(0x104B),
+	INTEL_E1000_ETHERNET_DEVICE(0x104C),
+	INTEL_E1000_ETHERNET_DEVICE(0x104D),
+	INTEL_E1000_ETHERNET_DEVICE(0x105E),
+	INTEL_E1000_ETHERNET_DEVICE(0x105F),
+	INTEL_E1000_ETHERNET_DEVICE(0x1060),
+	INTEL_E1000_ETHERNET_DEVICE(0x1075),
+	INTEL_E1000_ETHERNET_DEVICE(0x1076),
+	INTEL_E1000_ETHERNET_DEVICE(0x1077),
+	INTEL_E1000_ETHERNET_DEVICE(0x1078),
+	INTEL_E1000_ETHERNET_DEVICE(0x1079),
+	INTEL_E1000_ETHERNET_DEVICE(0x107A),
+	INTEL_E1000_ETHERNET_DEVICE(0x107B),
+	INTEL_E1000_ETHERNET_DEVICE(0x107C),
+	INTEL_E1000_ETHERNET_DEVICE(0x107D),
+	INTEL_E1000_ETHERNET_DEVICE(0x107E),
+	INTEL_E1000_ETHERNET_DEVICE(0x107F),
+	INTEL_E1000_ETHERNET_DEVICE(0x108A),
+	INTEL_E1000_ETHERNET_DEVICE(0x108B),
+	INTEL_E1000_ETHERNET_DEVICE(0x108C),
+	INTEL_E1000_ETHERNET_DEVICE(0x1096),
+	INTEL_E1000_ETHERNET_DEVICE(0x1098),
+	INTEL_E1000_ETHERNET_DEVICE(0x1099),
+	INTEL_E1000_ETHERNET_DEVICE(0x109A),
+	INTEL_E1000_ETHERNET_DEVICE(0x10A4),
+	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	INTEL_E1000_ETHERNET_DEVICE(0x10B9),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BA),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+	INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+	INTEL_E1000_ETHERNET_DEVICE(0x10C5),
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+			     struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+			     struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+			     struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+			     struct e1000_rx_ring *rx_ring);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct rtnet_device *netdev);
+static int e1000_close(struct rtnet_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring);
+static void e1000_set_multi(struct rtnet_device *netdev);
+static void e1000_update_phy_info_task(struct work_struct *work);
+static void e1000_watchdog(struct work_struct *work);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
+static int e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev);
+static int e1000_intr(rtdm_irq_t *irq_handle);
+static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    nanosecs_abs_t *time_stamp);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count);
+#ifdef SIOCGMIIPHY
+#endif
+void e1000_set_ethtool_ops(struct rtnet_device *netdev);
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct rtskb *skb);
+
+
+
+
+
+/* Exported from other modules */
+
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+static struct pci_driver e1000_driver = {
+	.name     = e1000_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = e1000_remove,
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver for rtnet");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int local_debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param_named(debug, local_debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+
+#define MAX_UNITS           8
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (eg. 1,0,1)");
+
+
+#define kmalloc(a,b) rtdm_malloc(a)
+#define vmalloc(a) rtdm_malloc(a)
+#define kfree(a) rtdm_free(a)
+#define vfree(a) rtdm_free(a)
+
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init
+e1000_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "%s - version %s\n",
+	       e1000_driver_string, e1000_driver_version);
+
+	printk(KERN_INFO "%s\n", e1000_copyright);
+
+	ret = pci_register_driver(&e1000_driver);
+	return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit
+e1000_exit_module(void)
+{
+	pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int flags, err = 0;
+
+	flags = RTDM_IRQTYPE_SHARED;
+#ifdef CONFIG_PCI_MSI
+	if (adapter->hw.mac_type > e1000_82547_rev_2) {
+		adapter->have_msi = TRUE;
+		if ((err = pci_enable_msi(adapter->pdev))) {
+			DPRINTK(PROBE, ERR,
+			 "Unable to allocate MSI interrupt Error: %d\n", err);
+			adapter->have_msi = FALSE;
+		}
+	}
+	if (adapter->have_msi)
+		flags = 0;
+#endif
+	rt_stack_connect(netdev, &STACK_manager);
+	if ((err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq,
+				    e1000_intr, flags, netdev->name, netdev)))
+		DPRINTK(PROBE, ERR,
+		    "Unable to allocate interrupt Error: %d\n", err);
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	// struct rtnet_device *netdev = adapter->netdev;
+
+	rtdm_irq_free(&adapter->irq_handle);
+
+#ifdef CONFIG_PCI_MSI
+	if (adapter->have_msi)
+		pci_disable_msi(adapter->pdev);
+#endif
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	atomic_inc(&adapter->irq_sem);
+	E1000_WRITE_REG(&adapter->hw, IMC, ~0);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+		E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
+		E1000_WRITE_FLUSH(&adapter->hw);
+	}
+}
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the netowrk i/f is closed.
+ *
+ **/
+
+static void
+e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+	uint32_t ctrl_ext;
+	uint32_t swsm;
+	uint32_t extcnf;
+
+	/* Let firmware taken over control of h/w */
+	switch (adapter->hw.mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, SWSM);
+		E1000_WRITE_REG(&adapter->hw, SWSM,
+				swsm & ~E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_ich8lan:
+		extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+				extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the netowrk i/f is open.
+ *
+ **/
+
+static void
+e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+	uint32_t ctrl_ext;
+	uint32_t swsm;
+	uint32_t extcnf;
+	/* Let firmware know the driver has taken over */
+	switch (adapter->hw.mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, SWSM);
+		E1000_WRITE_REG(&adapter->hw, SWSM,
+				swsm | E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_ich8lan:
+		extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
+		E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
+				extcnf | E1000_EXTCNF_CTRL_SWFLAG);
+		break;
+	default:
+		break;
+	}
+}
+
+int
+e1000_up(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int i;
+
+	/* hardware has been reset, we need to reload some things */
+
+	e1000_set_multi(netdev);
+
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	/* call E1000_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+		adapter->alloc_rx_buf(adapter, ring,
+				      E1000_DESC_UNUSED(ring));
+	}
+
+	// TODO makoehre adapter->tx_queue_len = netdev->tx_queue_len;
+
+	schedule_delayed_work(&adapter->watchdog_task, 1);
+
+	e1000_irq_enable(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+static void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+	uint16_t mii_reg = 0;
+
+	/* Just clear the power down bit to wake the phy back up */
+	if (adapter->hw.media_type == e1000_media_type_copper) {
+		/* according to the manual, the phy will retain its
+		 * settings across a power-down/up cycle */
+		e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+		mii_reg &= ~MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+	}
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+	boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+				      e1000_check_mng_mode(&adapter->hw);
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+	   adapter->hw.mac_type != e1000_ich8lan &&
+	   adapter->hw.media_type == e1000_media_type_copper &&
+	   !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+	   !mng_mode_enabled &&
+	   !e1000_check_phy_reset_block(&adapter->hw)) {
+		uint16_t mii_reg = 0;
+		e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+		mii_reg |= MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+		mdelay(1);
+	}
+}
+
+static void e1000_down_and_stop(struct e1000_adapter *adapter)
+{
+	cancel_work_sync(&adapter->reset_task);
+	cancel_delayed_work_sync(&adapter->watchdog_task);
+	cancel_delayed_work_sync(&adapter->phy_info_task);
+	cancel_delayed_work_sync(&adapter->fifo_stall_task);
+}
+
+void
+e1000_down(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+
+	e1000_irq_disable(adapter);
+
+	e1000_down_and_stop(adapter);
+
+	// TODO makoehre     netdev->tx_queue_len = adapter->tx_queue_len;
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+	rtnetif_carrier_off(netdev);
+	rtnetif_stop_queue(netdev);
+
+	e1000_reset(adapter);
+	e1000_clean_all_tx_rings(adapter);
+	e1000_clean_all_rx_rings(adapter);
+}
+
+void
+e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	if (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+	e1000_down(adapter);
+	e1000_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void
+e1000_reset(struct e1000_adapter *adapter)
+{
+	uint32_t pba;
+	uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+
+	switch (adapter->hw.mac_type) {
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		pba = E1000_PBA_30K;
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		pba = E1000_PBA_38K;
+		break;
+	case e1000_82573:
+		pba = E1000_PBA_12K;
+		break;
+	case e1000_ich8lan:
+		pba = E1000_PBA_8K;
+		break;
+	default:
+		pba = E1000_PBA_48K;
+		break;
+	}
+
+	if ((adapter->hw.mac_type != e1000_82573) &&
+	   (adapter->netdev->mtu > E1000_RXBUFFER_8192))
+		pba -= 8; /* allocate more FIFO for Tx */
+
+
+	if (adapter->hw.mac_type == e1000_82547) {
+		adapter->tx_fifo_head = 0;
+		adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+		adapter->tx_fifo_size =
+			(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+		atomic_set(&adapter->tx_fifo_stall, 0);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, PBA, pba);
+
+	/* flow control settings */
+	/* Set the FC high water mark to 90% of the FIFO size.
+	 * Required to clear last 3 LSB */
+	fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+	/* We can't use 90% on small FIFOs because the remainder
+	 * would be less than 1 full frame.  In this case, we size
+	 * it to allow at least a full frame above the high water
+	 *  mark. */
+	if (pba < E1000_PBA_16K)
+		fc_high_water_mark = (pba * 1024) - 1600;
+
+	adapter->hw.fc_high_water = fc_high_water_mark;
+	adapter->hw.fc_low_water = fc_high_water_mark - 8;
+	if (adapter->hw.mac_type == e1000_80003es2lan)
+		adapter->hw.fc_pause_time = 0xFFFF;
+	else
+		adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
+	adapter->hw.fc_send_xon = 1;
+	adapter->hw.fc = adapter->hw.original_fc;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(&adapter->hw);
+	if (adapter->hw.mac_type >= e1000_82544)
+		E1000_WRITE_REG(&adapter->hw, WUC, 0);
+	if (e1000_init_hw(&adapter->hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	E1000_WRITE_REG(&adapter->hw, AIT, 0);  // Set adaptive interframe spacing to zero
+
+	// e1000_reset_adaptive(&adapter->hw);
+	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+
+	if (!adapter->smart_power_down &&
+	    (adapter->hw.mac_type == e1000_82571 ||
+	     adapter->hw.mac_type == e1000_82572)) {
+		uint16_t phy_data = 0;
+		/* speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed */
+		e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				   &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				    phy_data);
+	}
+
+}
+
+static void
+e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter =
+		container_of(work, struct e1000_adapter, reset_task);
+
+	e1000_reinit_locked(adapter);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+
+static int e1000_probe(struct pci_dev *pdev,
+	    const struct pci_device_id *ent)
+{
+	struct rtnet_device *netdev;
+	struct e1000_adapter *adapter;
+	unsigned long mmio_start, mmio_len;
+	unsigned long flash_start, flash_len;
+
+	static int cards_found = 0;
+	static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
+	int i, err;
+	uint16_t eeprom_data;
+	uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
+
+	if (cards[cards_found++] == 0)
+	{
+	    return -ENODEV;
+	}
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) ||
+	    (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+		if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) &&
+		    (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			E1000_ERR("No usable DMA configuration, aborting\n");
+			return err;
+		}
+	}
+
+	if ((err = pci_request_regions(pdev, e1000_driver_name)))
+		return err;
+
+	pci_set_master(pdev);
+
+	netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter), 48);
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+	memset(netdev->priv, 0, sizeof(struct e1000_adapter));
+
+	rt_rtdev_connect(netdev, &RTDEV_manager);
+
+
+	// SET_NETDEV_DEV(netdev, &pdev->dev);
+	netdev->vers = RTDEV_VERS_2_0;
+	netdev->sysbind = &pdev->dev;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev->priv;
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+	adapter->msg_enable = (1 << local_debug) - 1;
+
+	mmio_start = pci_resource_start(pdev, BAR_0);
+	mmio_len = pci_resource_len(pdev, BAR_0);
+
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	for (i = BAR_1; i <= BAR_5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+			adapter->hw.io_base = pci_resource_start(pdev, i);
+			break;
+		}
+	}
+
+	netdev->open = &e1000_open;
+	netdev->stop = &e1000_close;
+	netdev->hard_start_xmit = &e1000_xmit_frame;
+	// netdev->get_stats = &e1000_get_stats;
+	// netdev->set_multicast_list = &e1000_set_multi;
+	// netdev->set_mac_address = &e1000_set_mac;
+	// netdev->change_mtu = &e1000_change_mtu;
+	// netdev->do_ioctl = &e1000_ioctl;
+	// e1000_set_ethtool_ops(netdev);
+	strcpy(netdev->name, pci_name(pdev));
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+	netdev->base_addr = adapter->hw.io_base;
+
+	adapter->bd_number = cards_found - 1;
+
+	/* setup the private structure */
+
+	if ((err = e1000_sw_init(adapter)))
+		goto err_sw_init;
+
+	/* Flash BAR mapping must happen after e1000_sw_init
+	 * because it depends on mac_type */
+	if ((adapter->hw.mac_type == e1000_ich8lan) &&
+	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		flash_start = pci_resource_start(pdev, 1);
+		flash_len = pci_resource_len(pdev, 1);
+		adapter->hw.flash_address = ioremap(flash_start, flash_len);
+		if (!adapter->hw.flash_address) {
+			err = -EIO;
+			goto err_flashmap;
+		}
+	}
+
+	if ((err = e1000_check_phy_reset_block(&adapter->hw)))
+		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+	/* if ksp3, indicate if it's port a being setup */
+	if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
+			e1000_ksp3_port_a == 0)
+		adapter->ksp3_port_a = 1;
+	e1000_ksp3_port_a++;
+	/* Reset for multiple KP3 adapters */
+	if (e1000_ksp3_port_a == 4)
+		e1000_ksp3_port_a = 0;
+
+	netdev->features |= NETIF_F_LLTX;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+
+	/* initialize eeprom parameters */
+
+	if (e1000_init_eeprom_params(&adapter->hw)) {
+		E1000_ERR("EEPROM initialization failed\n");
+		return -EIO;
+	}
+
+	/* before reading the EEPROM, reset the controller to
+	 * put the device in a known good starting state */
+
+	e1000_reset_hw(&adapter->hw);
+
+	/* make sure the EEPROM is good */
+
+	if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* copy the MAC address out of the EEPROM */
+
+	if (e1000_read_mac_addr(&adapter->hw))
+		DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+	memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	e1000_read_part_num(&adapter->hw, &(adapter->part_num));
+
+	e1000_get_bus_info(&adapter->hw);
+
+	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
+	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
+			  e1000_82547_tx_fifo_stall_task);
+	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
+	INIT_WORK(&adapter->reset_task,
+		(void (*)(struct work_struct *))e1000_reset_task);
+
+	/* we're going to reset, so assume we have no link for now */
+
+	rtnetif_carrier_off(netdev);
+	rtnetif_stop_queue(netdev);
+
+	e1000_check_options(adapter);
+
+	/* Initial Wake on LAN setting
+	 * If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	switch (adapter->hw.mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+	case e1000_82543:
+		break;
+	case e1000_82544:
+		e1000_read_eeprom(&adapter->hw,
+			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_82544_APM;
+		break;
+	case e1000_ich8lan:
+		e1000_read_eeprom(&adapter->hw,
+			EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+		break;
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82571:
+	case e1000_80003es2lan:
+		if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
+			e1000_read_eeprom(&adapter->hw,
+				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+			break;
+		}
+		fallthrough;
+	default:
+		e1000_read_eeprom(&adapter->hw,
+			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+		break;
+	}
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	/* print bus type/speed/width info */
+	{
+	struct e1000_hw *hw = &adapter->hw;
+	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+		((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+		 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+		((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+		 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+		 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+		 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+		((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+		 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+		 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+		 "32-bit"));
+	}
+
+	printk(KERN_INFO "e1000: hw ");
+	for (i = 0; i < 6; i++)
+		printk(KERN_CONT "%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+	/* reset the hardware with the new settings */
+	e1000_reset(adapter);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (adapter->hw.mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	strcpy(netdev->name, "rteth%d");
+	if ((err = rt_register_rtnetdev(netdev)))
+		goto err_register;
+
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+	return 0;
+
+err_register:
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+err_flashmap:
+err_sw_init:
+err_eeprom:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void e1000_remove(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	uint32_t manc;
+
+	e1000_down_and_stop(adapter);
+
+	if (adapter->hw.mac_type >= e1000_82540 &&
+	   adapter->hw.mac_type != e1000_ich8lan &&
+	   adapter->hw.media_type == e1000_media_type_copper) {
+		manc = E1000_READ_REG(&adapter->hw, MANC);
+		if (manc & E1000_MANC_SMBUS_EN) {
+			manc |= E1000_MANC_ARP_EN;
+			E1000_WRITE_REG(&adapter->hw, MANC, manc);
+		}
+	}
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	rt_unregister_rtnetdev(netdev);
+
+	if (!e1000_check_phy_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_regions(pdev);
+
+	rtdev_free(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+	adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
+	hw->max_frame_size = netdev->mtu +
+			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+	/* identify the MAC */
+
+	if (e1000_set_mac_type(hw)) {
+		DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+		return -EIO;
+	}
+
+	switch (hw->mac_type) {
+	default:
+		break;
+	case e1000_82541:
+	case e1000_82547:
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		hw->phy_init_script = 1;
+		break;
+	}
+
+	e1000_set_media_type(hw);
+
+	hw->wait_autoneg_complete = FALSE;
+	hw->tbi_compatibility_en = TRUE;
+	hw->adaptive_ifs = FALSE;
+
+	/* Copper options */
+
+	if (hw->media_type == e1000_media_type_copper) {
+		hw->mdix = AUTO_ALL_MODES;
+		hw->disable_polarity_correction = FALSE;
+		hw->master_slave = E1000_MASTER_SLAVE;
+	}
+
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+
+	if (e1000_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&adapter->irq_sem, 1);
+
+	return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	int size;
+
+	size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+	adapter->tx_ring = kmalloc(size, GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+	memset(adapter->tx_ring, 0, size);
+
+	size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
+	adapter->rx_ring = kmalloc(size, GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+	memset(adapter->rx_ring, 0, size);
+
+
+	return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int
+e1000_open(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+
+	if ((err = e1000_setup_all_tx_resources(adapter)))
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+
+	if ((err = e1000_setup_all_rx_resources(adapter)))
+		goto err_setup_rx;
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_up;
+
+	e1000_power_up_phy(adapter);
+
+	if ((err = e1000_up(adapter)))
+		goto err_up;
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now open */
+	if (adapter->hw.mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	/* Wait for the hardware to come up */
+	msleep(3000);
+
+	return E1000_SUCCESS;
+
+err_up:
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+	e1000_reset(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int
+e1000_close(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+	e1000_down(adapter);
+	e1000_power_down_phy(adapter);
+	e1000_free_irq(adapter);
+
+	e1000_free_all_tx_resources(adapter);
+	e1000_free_all_rx_resources(adapter);
+
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now closed */
+	if (adapter->hw.mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_release_hw_control(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static boolean_t
+e1000_check_64k_bound(struct e1000_adapter *adapter,
+		      void *start, unsigned long len)
+{
+	unsigned long begin = (unsigned long) start;
+	unsigned long end = begin + len;
+
+	/* First rev 82545 and 82546 need to not allow any memory
+	 * write location to cross 64k boundary due to errata 23 */
+	if (adapter->hw.mac_type == e1000_82545 ||
+	    adapter->hw.mac_type == e1000_82546) {
+		return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
+	}
+
+	return TRUE;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int
+e1000_setup_tx_resources(struct e1000_adapter *adapter,
+			 struct e1000_tx_ring *txdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct e1000_buffer) * txdr->count;
+	txdr->buffer_info = vmalloc(size);
+	if (!txdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(txdr->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+
+	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+	E1000_ROUNDUP(txdr->size, 4096);
+
+	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+	if (!txdr->desc) {
+setup_tx_desc_die:
+		vfree(txdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+		void *olddesc = txdr->desc;
+		dma_addr_t olddma = txdr->dma;
+		DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
+				     "at %p\n", txdr->size, txdr->desc);
+		/* Try again, without freeing the previous */
+		txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+		/* Failed allocation, critical failure */
+		if (!txdr->desc) {
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+			goto setup_tx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+			/* give up */
+			pci_free_consistent(pdev, txdr->size, txdr->desc,
+					    txdr->dma);
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the transmit descriptor ring\n");
+			vfree(txdr->buffer_info);
+			return -ENOMEM;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+		}
+	}
+	memset(txdr->desc, 0, txdr->size);
+
+	txdr->next_to_use = 0;
+	txdr->next_to_clean = 0;
+	rtdm_lock_init(&txdr->tx_lock);
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ *				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void
+e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	uint64_t tdba;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t tdlen, tctl, tipg, tarc;
+	uint32_t ipgr1, ipgr2;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+
+	switch (adapter->num_tx_queues) {
+	case 1:
+	default:
+		tdba = adapter->tx_ring[0].dma;
+		tdlen = adapter->tx_ring[0].count *
+			sizeof(struct e1000_tx_desc);
+		E1000_WRITE_REG(hw, TDLEN, tdlen);
+		E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+		E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, TDT, 0);
+		E1000_WRITE_REG(hw, TDH, 0);
+		adapter->tx_ring[0].tdh = E1000_TDH;
+		adapter->tx_ring[0].tdt = E1000_TDT;
+		break;
+	}
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+
+	if (hw->media_type == e1000_media_type_fiber ||
+	    hw->media_type == e1000_media_type_internal_serdes)
+		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+	else
+		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+		tipg = DEFAULT_82542_TIPG_IPGT;
+		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+		break;
+	case e1000_80003es2lan:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+		break;
+	default:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+		break;
+	}
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	E1000_WRITE_REG(hw, TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+
+	E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+	if (hw->mac_type >= e1000_82540)
+		E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
+
+	/* Program the Transmit Control Register */
+
+	tctl = E1000_READ_REG(hw, TCTL);
+
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+#ifdef DISABLE_MULR
+	/* disable Multiple Reads for debugging */
+	tctl &= ~E1000_TCTL_MULR;
+#endif
+
+	if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+		tarc = E1000_READ_REG(hw, TARC0);
+		tarc |= ((1 << 25) | (1 << 21));
+		E1000_WRITE_REG(hw, TARC0, tarc);
+		tarc = E1000_READ_REG(hw, TARC1);
+		tarc |= (1 << 25);
+		if (tctl & E1000_TCTL_MULR)
+			tarc &= ~(1 << 28);
+		else
+			tarc |= (1 << 28);
+		E1000_WRITE_REG(hw, TARC1, tarc);
+	} else if (hw->mac_type == e1000_80003es2lan) {
+		tarc = E1000_READ_REG(hw, TARC0);
+		tarc |= 1;
+		if (hw->media_type == e1000_media_type_internal_serdes)
+			tarc |= (1 << 20);
+		E1000_WRITE_REG(hw, TARC0, tarc);
+		tarc = E1000_READ_REG(hw, TARC1);
+		tarc |= 1;
+		E1000_WRITE_REG(hw, TARC1, tarc);
+	}
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
+		E1000_TXD_CMD_IFCS;
+
+	if (hw->mac_type < e1000_82543)
+		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+	else
+		adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	/* Cache if we're 82544 running in PCI-X because we'll
+	 * need this to apply a workaround later in the send path. */
+	if (hw->mac_type == e1000_82544 &&
+	    hw->bus_type == e1000_bus_type_pcix)
+		adapter->pcix_82544 = 1;
+
+	E1000_WRITE_REG(hw, TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+e1000_setup_rx_resources(struct e1000_adapter *adapter,
+			 struct e1000_rx_ring *rxdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct e1000_buffer) * rxdr->count;
+	rxdr->buffer_info = vmalloc(size);
+	if (!rxdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->buffer_info, 0, size);
+
+	size = sizeof(struct e1000_ps_page) * rxdr->count;
+	rxdr->ps_page = kmalloc(size, GFP_KERNEL);
+	if (!rxdr->ps_page) {
+		vfree(rxdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->ps_page, 0, size);
+
+	size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
+	rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
+	if (!rxdr->ps_page_dma) {
+		vfree(rxdr->buffer_info);
+		kfree(rxdr->ps_page);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->ps_page_dma, 0, size);
+
+	if (adapter->hw.mac_type <= e1000_82547_rev_2)
+		desc_len = sizeof(struct e1000_rx_desc);
+	else
+		desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+
+	rxdr->size = rxdr->count * desc_len;
+	E1000_ROUNDUP(rxdr->size, 4096);
+
+	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+	if (!rxdr->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+		vfree(rxdr->buffer_info);
+		kfree(rxdr->ps_page);
+		kfree(rxdr->ps_page_dma);
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+		void *olddesc = rxdr->desc;
+		dma_addr_t olddma = rxdr->dma;
+		DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
+				     "at %p\n", rxdr->size, rxdr->desc);
+		/* Try again, without freeing the previous */
+		rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+		/* Failed allocation, critical failure */
+		if (!rxdr->desc) {
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+			/* give up */
+			pci_free_consistent(pdev, rxdr->size, rxdr->desc,
+					    rxdr->dma);
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+		}
+	}
+	memset(rxdr->desc, 0, rxdr->size);
+
+	rxdr->next_to_clean = 0;
+	rxdr->next_to_use = 0;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ *				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void
+e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	uint32_t rctl;
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+	uint32_t pages = 0;
+#endif
+
+	rctl = E1000_READ_REG(&adapter->hw, RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* FIXME: disable the stripping of CRC because it breaks
+	 * BMC firmware connected over SMBUS
+	if (adapter->hw.mac_type > e1000_82543)
+		rctl |= E1000_RCTL_SECRC;
+	*/
+
+	if (adapter->hw.tbi_compatibility_on == 1)
+		rctl |= E1000_RCTL_SBP;
+	else
+		rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case E1000_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case E1000_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+	}
+
+	adapter->rx_ps_pages = 0;
+
+	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void
+e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	uint64_t rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t rdlen, rctl, rxcsum, ctrl_ext;
+
+	{
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(struct e1000_rx_desc);
+		adapter->clean_rx = NULL; /* unused */
+		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+	}
+
+	/* disable receives while setting up the descriptors */
+	rctl = E1000_READ_REG(hw, RCTL);
+	E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+
+	/* set the Receive Delay Timer Register */
+	E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
+
+	if (hw->mac_type >= e1000_82540) {
+		E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
+		if (adapter->itr > 1)
+			E1000_WRITE_REG(hw, ITR,
+				1000000000 / (adapter->itr * 256));
+	}
+
+	if (hw->mac_type >= e1000_82571) {
+		ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+		/* Reset delay timers after every interrupt */
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+		E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+		E1000_WRITE_REG(hw, IAM, ~0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	switch (adapter->num_rx_queues) {
+	case 1:
+	default:
+		rdba = adapter->rx_ring[0].dma;
+		E1000_WRITE_REG(hw, RDLEN, rdlen);
+		E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+		E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, RDT, 0);
+		E1000_WRITE_REG(hw, RDH, 0);
+		adapter->rx_ring[0].rdh = E1000_RDH;
+		adapter->rx_ring[0].rdt = E1000_RDT;
+		break;
+	}
+
+	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
+	if (hw->mac_type >= e1000_82543) {
+		rxcsum = E1000_READ_REG(hw, RXCSUM);
+		if (adapter->rx_csum == TRUE) {
+			rxcsum |= E1000_RXCSUM_TUOFL;
+
+		} else {
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+			/* don't need to clear IPPCSE as it defaults to 0 */
+		}
+		E1000_WRITE_REG(hw, RXCSUM, rxcsum);
+	}
+
+
+	/* Enable Receives */
+	E1000_WRITE_REG(hw, RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void
+e1000_free_tx_resources(struct e1000_adapter *adapter,
+			struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void
+e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+			struct e1000_buffer *buffer_info)
+{
+	if (buffer_info->dma) {
+		pci_unmap_page(adapter->pdev,
+				buffer_info->dma,
+				buffer_info->length,
+				PCI_DMA_TODEVICE);
+	}
+	if (buffer_info->skb)
+		kfree_rtskb(buffer_info->skb);
+	memset(buffer_info, 0, sizeof(struct e1000_buffer));
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void
+e1000_clean_tx_ring(struct e1000_adapter *adapter,
+		    struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->last_tx_tso = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->tdh);
+	writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void
+e1000_free_rx_resources(struct e1000_adapter *adapter,
+			struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+	kfree(rx_ring->ps_page);
+	rx_ring->ps_page = NULL;
+	kfree(rx_ring->ps_page_dma);
+	rx_ring->ps_page_dma = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void
+e1000_clean_rx_ring(struct e1000_adapter *adapter,
+		    struct e1000_rx_ring *rx_ring)
+{
+	struct e1000_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->skb) {
+			pci_unmap_single(pdev,
+					 buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_FROMDEVICE);
+
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct e1000_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+	size = sizeof(struct e1000_ps_page) * rx_ring->count;
+	memset(rx_ring->ps_page, 0, size);
+	size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
+	memset(rx_ring->ps_page_dma, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->rdh);
+	writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void
+e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	uint32_t rctl;
+
+	e1000_pci_clear_mwi(&adapter->hw);
+
+	rctl = E1000_READ_REG(&adapter->hw, RCTL);
+	rctl |= E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (rtnetif_running(netdev))
+		e1000_clean_all_rx_rings(adapter);
+}
+
+static void
+e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	uint32_t rctl;
+
+	rctl = E1000_READ_REG(&adapter->hw, RCTL);
+	rctl &= ~E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+		e1000_pci_set_mwi(&adapter->hw);
+
+	if (rtnetif_running(netdev)) {
+		/* No need to loop, because 82542 supports only 1 queue */
+		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
+		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+	}
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void
+e1000_set_multi(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t rctl;
+	int i, rar_entries = E1000_RAR_ENTRIES;
+	int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+				E1000_NUM_MTA_REGISTERS_ICH8LAN :
+				E1000_NUM_MTA_REGISTERS;
+
+	if (adapter->hw.mac_type == e1000_ich8lan)
+		rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
+
+	/* reserve RAR[14] for LAA over-write work-around */
+	if (adapter->hw.mac_type == e1000_82571)
+		rar_entries--;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = E1000_READ_REG(hw, RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= E1000_RCTL_MPE;
+		rctl &= ~E1000_RCTL_UPE;
+	} else {
+		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+	}
+
+	E1000_WRITE_REG(hw, RCTL, rctl);
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_enter_82542_rst(adapter);
+
+	/* load the first 14 multicast address into the exact filters 1-14
+	 * RAR 0 is used for the station MAC adddress
+	 * if there are not 14 addresses, go ahead and clear the filters
+	 * -- with 82571 controllers only 0-13 entries are filled here
+	 */
+
+	for (i = 1; i < rar_entries; i++) {
+		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* clear the old settings from the multicast hash table */
+
+	for (i = 0; i < mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_leave_82542_rst(adapter);
+}
+
+/**
+ * e1000_update_phy_info_task - get phy info
+ * @work: work struct contained inside adapter struct
+ *
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     phy_info_task.work);
+	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     fifo_stall_task.work);
+	struct rtnet_device *netdev = adapter->netdev;
+	uint32_t tctl;
+
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((E1000_READ_REG(&adapter->hw, TDT) ==
+		    E1000_READ_REG(&adapter->hw, TDH)) &&
+		   (E1000_READ_REG(&adapter->hw, TDFT) ==
+		    E1000_READ_REG(&adapter->hw, TDFH)) &&
+		   (E1000_READ_REG(&adapter->hw, TDFTS) ==
+		    E1000_READ_REG(&adapter->hw, TDFHS))) {
+			tctl = E1000_READ_REG(&adapter->hw, TCTL);
+			E1000_WRITE_REG(&adapter->hw, TCTL,
+					tctl & ~E1000_TCTL_EN);
+			E1000_WRITE_REG(&adapter->hw, TDFT,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TDFH,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TDFTS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TDFHS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+			E1000_WRITE_FLUSH(&adapter->hw);
+
+			adapter->tx_fifo_head = 0;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+			rtnetif_wake_queue(netdev);
+		} else {
+			schedule_delayed_work(&adapter->fifo_stall_task, 1);
+		}
+	}
+}
+
+/**
+ * e1000_watchdog - work function
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_watchdog(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     watchdog_task.work);
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_tx_ring *txdr = adapter->tx_ring;
+	uint32_t link, tctl;
+	int32_t ret_val;
+
+	ret_val = e1000_check_for_link(&adapter->hw);
+	if ((ret_val == E1000_ERR_PHY) &&
+	    (adapter->hw.phy_type == e1000_phy_igp_3) &&
+	    (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kumeran_lock_loss_workaround() */
+		DPRINTK(LINK, INFO,
+			"Gigabit has been disabled, downgrading speed\n");
+	}
+	if (adapter->hw.mac_type == e1000_82573) {
+		e1000_enable_tx_pkt_filtering(&adapter->hw);
+	}
+
+	if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+	   !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
+		link = !adapter->hw.serdes_link_down;
+	else
+		link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
+
+	if (link) {
+		if (!rtnetif_carrier_ok(netdev)) {
+			boolean_t txb2b = 1;
+			e1000_get_speed_and_duplex(&adapter->hw,
+						   &adapter->link_speed,
+						   &adapter->link_duplex);
+
+			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
+			       "Full Duplex" : "Half Duplex");
+
+			/* tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor */
+			// TODO makoehre netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = 0;
+				// TODO makoehre netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 8;
+				break;
+			case SPEED_100:
+				txb2b = 0;
+				// TODO makoehre netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			if ((adapter->hw.mac_type == e1000_82571 ||
+			     adapter->hw.mac_type == e1000_82572) &&
+			    txb2b == 0) {
+#define SPEED_MODE_BIT (1 << 21)
+				uint32_t tarc0;
+				tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
+				tarc0 &= ~SPEED_MODE_BIT;
+				E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
+			}
+
+
+			/* enable transmits in the hardware, need to do this
+			 * after setting TARC0 */
+			tctl = E1000_READ_REG(&adapter->hw, TCTL);
+			tctl |= E1000_TCTL_EN;
+			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+
+			rtnetif_carrier_on(netdev);
+			rtnetif_wake_queue(netdev);
+			schedule_delayed_work(&adapter->phy_info_task, 2 * HZ);
+			adapter->smartspeed = 0;
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
+			rtnetif_carrier_off(netdev);
+			rtnetif_stop_queue(netdev);
+			schedule_delayed_work(&adapter->phy_info_task, 2 * HZ);
+
+			/* 80003ES2LAN workaround--
+			 * For packet buffer work-around on link down event;
+			 * disable receives in the ISR and
+			 * reset device here in the watchdog
+			 */
+			if (adapter->hw.mac_type == e1000_80003es2lan)
+				/* reset device */
+				schedule_work(&adapter->reset_task);
+		}
+
+		e1000_smartspeed(adapter);
+	}
+
+
+	adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+	adapter->gorcl_old = adapter->stats.gorcl;
+	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+	adapter->gotcl_old = adapter->stats.gotcl;
+
+	// e1000_update_adaptive(&adapter->hw);
+
+	if (!rtnetif_carrier_ok(netdev)) {
+		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+		}
+	}
+
+	/* Dynamic mode for Interrupt Throttle Rate (ITR) */
+	if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
+		/* Symmetric Tx/Rx gets a reduced ITR=2000; Total
+		 * asymmetrical Tx or Rx gets ITR=8000; everyone
+		 * else is between 2000-8000. */
+		uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
+		uint32_t dif = (adapter->gotcl > adapter->gorcl ?
+			adapter->gotcl - adapter->gorcl :
+			adapter->gorcl - adapter->gotcl) / 10000;
+		uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+		E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = TRUE;
+
+	/* With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
+		e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+
+	/* Reschedule the task */
+	schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+
+static boolean_t
+e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+	      struct rtskb *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	uint8_t css;
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		css = skb->h.raw - skb->data;
+
+		i = tx_ring->next_to_use;
+		buffer_info = &tx_ring->buffer_info[i];
+		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+		context_desc->upper_setup.tcp_fields.tucss = css;
+		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+		context_desc->upper_setup.tcp_fields.tucse = 0;
+		context_desc->tcp_seg_setup.data = 0;
+		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
+
+		buffer_info->time_stamp = jiffies;
+
+		if (unlikely(++i == tx_ring->count)) i = 0;
+		tx_ring->next_to_use = i;
+
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+#define E1000_MAX_TXD_PWR	12
+#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
+
+static int
+e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+	     struct rtskb *skb, unsigned int first, unsigned int max_per_txd,
+	     unsigned int nr_frags, unsigned int mss)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned int len = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+
+	i = tx_ring->next_to_use;
+
+	while (len) {
+		buffer_info = &tx_ring->buffer_info[i];
+		size = min(len, max_per_txd);
+		/* work-around for errata 10 and it applies
+		 * to all controllers in PCI-X mode
+		 * The fix is to make sure that the first descriptor of a
+		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+		 */
+		if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+				(size > 2015) && count == 0))
+			size = 2015;
+
+		/* Workaround for potential 82544 hang in PCI-X.  Avoid
+		 * terminating buffers within evenly-aligned dwords. */
+		if (unlikely(adapter->pcix_82544 &&
+		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+		   size > 4))
+			size -= 4;
+
+		buffer_info->length = size;
+		buffer_info->dma =
+			pci_map_single(adapter->pdev,
+				skb->data + offset,
+				size,
+				PCI_DMA_TODEVICE);
+		buffer_info->time_stamp = jiffies;
+
+		len -= size;
+		offset += size;
+		count++;
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+
+	i = (i == 0) ? tx_ring->count - 1 : i - 1;
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void
+e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+	       int tx_flags, int count, nanosecs_abs_t *xmit_stamp)
+{
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+
+
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	i = tx_ring->next_to_use;
+
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+	if (xmit_stamp)
+		*xmit_stamp = cpu_to_be64(rtdm_clock_read() + *xmit_stamp);
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time.  This gives the Tx FIFO an opportunity to
+ * flush all packets.  When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR			0x10
+#define E1000_82547_PAD_LEN		0x3E0
+
+static int
+e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct rtskb *skb)
+{
+	uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+	E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
+
+	if (adapter->link_duplex != HALF_DUPLEX)
+		goto no_fifo_stall_required;
+
+	if (atomic_read(&adapter->tx_fifo_stall))
+		return 1;
+
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+		atomic_set(&adapter->tx_fifo_stall, 1);
+		return 1;
+	}
+
+no_fifo_stall_required:
+	adapter->tx_fifo_head += skb_fifo_len;
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+		adapter->tx_fifo_head -= adapter->tx_fifo_size;
+	return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int
+e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct rtskb *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	uint16_t length, offset;
+	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+		struct ethhdr *eth = (struct ethhdr *) skb->data;
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
+				(struct iphdr *)((uint8_t *)skb->data+14);
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((uint8_t *)ip +
+						(ip->ihl << 2));
+				if (ntohs(udp->dest) == 67) {
+					offset = (uint8_t *)udp + 8 - skb->data;
+					length = skb->len - offset;
+
+					return e1000_mng_write_dhcp_info(hw,
+							(uint8_t *)udp + 8,
+							length);
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int
+e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_tx_ring *tx_ring;
+	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+	unsigned int tx_flags = 0;
+	unsigned int len = skb->len;
+	rtdm_lockctx_t context;
+	unsigned int nr_frags = 0;
+	unsigned int mss = 0;
+	int count = 0;
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	tx_ring = adapter->tx_ring;
+
+	if (unlikely(skb->len <= 0)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		count++;
+
+
+	count += TXD_USE_COUNT(len, max_txd_pwr);
+
+	if (adapter->pcix_82544)
+		count++;
+
+	/* work-around for errata 10 and it applies to all controllers
+	 * in PCI-X mode, so add one more descriptor to the count
+	 */
+	if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+			(len > 2015)))
+		count++;
+
+
+	if (adapter->hw.tx_pkt_filtering &&
+	    (adapter->hw.mac_type == e1000_82573))
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	rtdm_lock_get_irqsave(&tx_ring->tx_lock, context);
+
+	/* need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time */
+	if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
+		rtnetif_stop_queue(netdev);
+		rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context);
+		rtdm_printk("FATAL: rt_e1000 ran into tail close to head situation!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(adapter->hw.mac_type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+			rtnetif_stop_queue(netdev);
+			rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context);
+
+			/* FIXME: warn the user earlier, i.e. on startup if
+			   half-duplex is detected! */
+			rtdm_printk("FATAL: rt_e1000 ran into 82547 "
+				    "controller bug!\n");
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	first = tx_ring->next_to_use;
+
+	if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+		tx_flags |= E1000_TX_FLAGS_CSUM;
+
+	e1000_tx_queue(adapter, tx_ring, tx_flags,
+		       e1000_tx_map(adapter, tx_ring, skb, first,
+				    max_per_txd, nr_frags, mss),
+		       skb->xmit_stamp);
+
+	rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ **/
+
+static int
+e1000_intr(rtdm_irq_t *irq_handle)
+    /* int irq, void *data, struct pt_regs *regs) */
+{
+
+	struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
+	int i;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	if (unlikely(!icr)) {
+		return RTDM_IRQ_NONE;  /* Not our interrupt */
+	}
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+		hw->get_link_status = 1;
+		/* 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(netdev) &&
+		    (adapter->hw.mac_type == e1000_80003es2lan)) {
+			/* disable receives */
+			rctl = E1000_READ_REG(hw, RCTL);
+			E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+		}
+		/* FIXME: we need to handle this via some yet-to-be-invented
+		   error manager (Linux botton-half and/or kthread)
+		mod_timer(&adapter->watchdog_timer, jiffies);*/
+	}
+
+	/* Writing IMC and IMS is needed for 82547.
+	 * Due to Hub Link bus being occupied, an interrupt
+	 * de-assertion message is not able to be sent.
+	 * When an interrupt assertion message is generated later,
+	 * two messages are re-ordered and sent out.
+	 * That causes APIC to think 82547 is in de-assertion
+	 * state, while 82547 is in assertion state, resulting
+	 * in dead lock. Writing IMC forces 82547 into
+	 * de-assertion state.
+	 */
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
+		atomic_inc(&adapter->irq_sem);
+		E1000_WRITE_REG(hw, IMC, ~0);
+	}
+
+	adapter->data_received = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++)
+		if (unlikely(!e1000_clean_rx_irq(adapter, adapter->rx_ring,
+						 &time_stamp) &
+		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+			break;
+
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
+		e1000_irq_enable(adapter);
+
+
+	if (adapter->data_received)
+		rt_mark_stack_mgr(netdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+e1000_clean_tx_irq(struct e1000_adapter *adapter,
+		   struct e1000_tx_ring *tx_ring)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	boolean_t cleaned = FALSE;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+		for (cleaned = FALSE; !cleaned; ) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+			memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+
+
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (unlikely(cleaned && rtnetif_queue_stopped(netdev) &&
+		     rtnetif_carrier_ok(netdev))) {
+		rtdm_lock_get(&tx_ring->tx_lock);
+		if (rtnetif_queue_stopped(netdev) &&
+		    (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
+			rtnetif_wake_queue(netdev);
+		rtdm_lock_put(&tx_ring->tx_lock);
+	}
+
+	if (adapter->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		adapter->detect_tx_hung = FALSE;
+		if (tx_ring->buffer_info[eop].dma &&
+		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+			       (adapter->tx_timeout_factor * HZ))
+		    && !(E1000_READ_REG(&adapter->hw, STATUS) &
+			 E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct e1000_tx_ring)),
+				readl(adapter->hw.hw_addr + tx_ring->tdh),
+				readl(adapter->hw.hw_addr + tx_ring->tdt),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[eop].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->upper.fields.status);
+			rtnetif_stop_queue(netdev);
+		}
+	}
+	return cleaned;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:        receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+
+static void
+e1000_rx_checksum(struct e1000_adapter *adapter,
+		  uint32_t status_err, uint32_t csum,
+		  struct rtskb *skb)
+{
+	uint16_t status = (uint16_t)status_err;
+	uint8_t errors = (uint8_t)(status_err >> 24);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* 82543 or newer only */
+	if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
+	/* Ignore Checksum bit is set */
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	/* TCP/UDP checksum error bit is set */
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* TCP/UDP Checksum has not been calculated */
+	if (adapter->hw.mac_type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
+			return;
+	} else {
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+			return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (likely(status & E1000_RXD_STAT_TCPCS)) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (adapter->hw.mac_type > e1000_82547_rev_2) {
+		/* IP fragment with UDP payload */
+		/* Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		csum = ntohl(csum ^ 0xFFFF);
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_PARTIAL;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+e1000_clean_rx_irq(struct e1000_adapter *adapter,
+		   struct e1000_rx_ring *rx_ring,
+		   nanosecs_abs_t *time_stamp)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	uint32_t length;
+	uint8_t last_byte;
+	unsigned int i;
+	int cleaned_count = 0;
+	boolean_t cleaned = FALSE;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct rtskb *skb, *next_skb;
+		u8 status;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+		next_skb = next_buffer->skb;
+		prefetch(next_skb->data - NET_IP_ALIGN);
+
+		cleaned = TRUE;
+		cleaned_count++;
+		pci_unmap_single(pdev,
+				 buffer_info->dma,
+				 buffer_info->length,
+				 PCI_DMA_FROMDEVICE);
+
+		length = le16_to_cpu(rx_desc->length);
+
+		if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+			/* All receives must fit into a single buffer */
+			E1000_DBG("%s: Receive packet consumed multiple"
+				  " buffers\n", netdev->name);
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+			last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(&adapter->hw, status,
+				      rx_desc->errors, length, last_byte)) {
+				length--;
+			} else {
+				/* recycle */
+				buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		/* code added for copybreak, this should improve
+		 * performance for small packets with large amounts
+		 * of reassembly being done in the stack */
+		rtskb_put(skb, length);
+
+		/* end copybreak code */
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (uint32_t)(status) |
+				  ((uint32_t)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+		skb->time_stamp = *time_stamp;
+		rtnetif_rx(skb);
+		adapter->data_received = 1; // Set flag for the main interrupt routine
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void
+e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+		       struct e1000_rx_ring *rx_ring,
+		       int cleaned_count)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct rtskb *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		if (!(skb = buffer_info->skb))
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+		else {
+			rtskb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct rtskb *oldskb = skb;
+			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				kfree_rtskb(oldskb);
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				kfree_rtskb(skb);
+				kfree_rtskb(oldskb);
+				break; /* while !buffer_info->skb */
+			} else {
+				/* Use new allocation */
+				kfree_rtskb(oldskb);
+			}
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		rtskb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+		buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+		buffer_info->dma = pci_map_single(pdev,
+						  skb->data,
+						  adapter->rx_buffer_len,
+						  PCI_DMA_FROMDEVICE);
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter,
+					(void *)(unsigned long)buffer_info->dma,
+					adapter->rx_buffer_len)) {
+			DPRINTK(RX_ERR, ERR,
+				"dma align check failed: %u bytes at %p\n",
+				adapter->rx_buffer_len,
+				(void *)(unsigned long)buffer_info->dma);
+			kfree_rtskb(skb);
+			buffer_info->skb = NULL;
+
+			pci_unmap_single(pdev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
+
+			break; /* while !buffer_info->skb */
+		}
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
+}
+
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void
+e1000_smartspeed(struct e1000_adapter *adapter)
+{
+	uint16_t phy_status;
+	uint16_t phy_ctrl;
+
+	if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
+	   !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
+		return;
+
+	if (adapter->smartspeed == 0) {
+		/* If Master/Slave config fault is asserted twice,
+		 * we assume back-to-back */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
+			phy_ctrl &= ~CR_1000T_MS_ENABLE;
+			e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+					    phy_ctrl);
+			adapter->smartspeed++;
+			if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+			   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
+					       &phy_ctrl)) {
+				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+					     MII_CR_RESTART_AUTO_NEG);
+				e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
+						    phy_ctrl);
+			}
+		}
+		return;
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+		/* If still no link, perhaps using 2/3 pair cable */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		phy_ctrl |= CR_1000T_MS_ENABLE;
+		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
+		if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+		   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
+			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+				     MII_CR_RESTART_AUTO_NEG);
+			e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
+		}
+	}
+	/* Restart process after E1000_SMARTSPEED_MAX iterations */
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+		adapter->smartspeed = 0;
+}
+
+
+
+void
+e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+#ifdef HAVE_PCI_SET_MWI
+	int ret_val = pci_set_mwi(adapter->pdev);
+
+	if (ret_val)
+		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+#else
+	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+			      adapter->hw.pci_cmd_word |
+			      PCI_COMMAND_INVALIDATE);
+#endif
+}
+
+void
+e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+#ifdef HAVE_PCI_SET_MWI
+	pci_clear_mwi(adapter->pdev);
+#else
+	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+			      adapter->hw.pci_cmd_word &
+			      ~PCI_COMMAND_INVALIDATE);
+#endif
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+uint32_t
+e1000_io_read(struct e1000_hw *hw, unsigned long port)
+{
+	return inl(port);
+}
+
+void
+e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
+{
+	outl(value, port);
+}
+
+
+int
+e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
+{
+	adapter->hw.autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((adapter->hw.media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		adapter->hw.forced_speed_duplex = e1000_10_half;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		adapter->hw.forced_speed_duplex = e1000_10_full;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		adapter->hw.forced_speed_duplex = e1000_100_half;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		adapter->hw.forced_speed_duplex = e1000_100_full;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		adapter->hw.autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+++ linux-patched/drivers/xenomai/net/drivers/e1000/e1000_osdep.h	2022-03-21 12:58:29.472888152 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/e1000/e1000.h	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+#define usec_delay(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x)	do { if(in_interrupt()) { \
+				/* Don't mdelay in interrupt context! */ \
+				BUG(); \
+			} else { \
+				msleep(x); \
+			} } while (0)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context.  Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+#endif
+
+#define PCI_COMMAND_REGISTER   PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+
+typedef enum {
+#undef FALSE
+    FALSE = 0,
+#undef TRUE
+    TRUE = 1
+} boolean_t;
+
+#define MSGOUT(S, A, B)	printk(KERN_DEBUG S "\n", A, B)
+
+#ifdef DBG
+#define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F)
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#ifdef __BIG_ENDIAN
+#define E1000_BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+    writel((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_READ_REG(a, reg) ( \
+    readl((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	(offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	(offset)))
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
+
+#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG(a, reg) ( \
+    readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG16(a, reg) ( \
+    readw((a)->flash_address + reg))
+
+
+#endif /* _E1000_OSDEP_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/e1000/e1000.h	2022-03-21 12:58:29.467888201 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/eth1394.c	1970-01-01 01:00:00.000000000 +0100
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#ifdef NETIF_F_ISO
+#undef NETIF_F_ISO
+#endif
+
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#endif
+#ifdef SIOCGMIIPHY
+#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#undef NETIF_F_HW_VLAN_TX
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+// RTNET
+#include <rtnet_port.h>
+
+
+#define BAR_0		0
+#define BAR_1		1
+#define BAR_5		5
+
+#include "kcompat.h"
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args))
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD                  256
+#define E1000_MAX_TXD                      256
+#define E1000_MIN_TXD                       80
+#define E1000_MAX_82544_TXD               4096
+
+#define E1000_DEFAULT_RXD                  256
+#define E1000_MAX_RXD                      256
+#define E1000_MIN_RXD                       80
+#define E1000_MAX_82544_RXD               4096
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128   128    /* Used for packet split */
+#define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
+#define E1000_RXBUFFER_2048  2048
+#define E1000_RXBUFFER_4096  4096
+#define E1000_RXBUFFER_8192  8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX       15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638  /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640   /* Low:  5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define E1000_EEPROM_82544_APM    0x0004
+#define E1000_EEPROM_ICH8_APME    0x0004
+#define E1000_EEPROM_APME         0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#define E1000_MNG_VLAN_NONE -1
+#endif
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
+
+/* only works for sizes that are powers of 2 */
+#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+	struct rtskb *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	uint16_t length;
+	uint16_t next_to_watch;
+};
+
+
+struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
+
+struct e1000_tx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	rtdm_lock_t tx_lock;
+	uint16_t tdh;
+	uint16_t tdt;
+	boolean_t last_tx_tso;
+};
+
+struct e1000_rx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+	/* arrays of page information for packet split */
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+
+	/* cpu for rx queue */
+	int cpu;
+
+	uint16_t rdh;
+	uint16_t rdt;
+};
+
+#define E1000_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_PS(R, i)	    \
+	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i)	    \
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+	uint16_t mng_vlan_id;
+#endif
+	uint32_t bd_number;
+	uint32_t rx_buffer_len;
+	uint32_t part_num;
+	uint32_t wol;
+	uint32_t ksp3_port_a;
+	uint32_t smartspeed;
+	uint32_t en_mng_pt;
+	uint16_t link_speed;
+	uint16_t link_duplex;
+#ifdef CONFIG_E1000_NAPI
+	spinlock_t tx_queue_lock;
+#endif
+	atomic_t irq_sem;
+	struct work_struct reset_task;
+	uint8_t fc_autoneg;
+
+#ifdef ETHTOOL_PHYS_ID
+	struct timer_list blink_timer;
+	unsigned long led_status;
+#endif
+
+	/* TX */
+	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+	unsigned long tx_queue_len;
+	uint32_t txd_cmd;
+	uint32_t tx_int_delay;
+	uint32_t tx_abs_int_delay;
+	uint32_t gotcl;
+	uint64_t gotcl_old;
+	uint64_t tpt_old;
+	uint64_t colc_old;
+	uint32_t tx_timeout_count;
+	uint32_t tx_fifo_head;
+	uint32_t tx_head_addr;
+	uint32_t tx_fifo_size;
+	uint8_t  tx_timeout_factor;
+	atomic_t tx_fifo_stall;
+	boolean_t pcix_82544;
+	boolean_t detect_tx_hung;
+
+	/* RX */
+#ifdef CONFIG_E1000_NAPI
+	boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+#else
+	boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring);
+#endif
+	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+			      struct e1000_rx_ring *rx_ring,
+				int cleaned_count);
+	struct e1000_rx_ring *rx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_NAPI
+	struct net_device *polling_netdev;  /* One per active queue */
+#endif
+	int num_tx_queues;
+	int num_rx_queues;
+
+	uint64_t hw_csum_err;
+	uint64_t hw_csum_good;
+	uint64_t rx_hdr_split;
+	uint32_t alloc_rx_buff_failed;
+	uint32_t rx_int_delay;
+	uint32_t rx_abs_int_delay;
+	boolean_t rx_csum;
+	unsigned int rx_ps_pages;
+	uint32_t gorcl;
+	uint64_t gorcl_old;
+	uint16_t rx_ps_bsize0;
+
+	/* Interrupt Throttle Rate */
+	uint32_t itr;
+
+	/* OS defined structs */
+	struct rtnet_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	rtdm_irq_t irq_handle;
+	boolean_t data_received;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+#ifdef ETHTOOL_TEST
+	uint32_t test_icr;
+	struct e1000_tx_ring test_tx_ring;
+	struct e1000_rx_ring test_rx_ring;
+#endif
+
+#ifdef E1000_COUNT_ICR
+	uint64_t icr_txdw;
+	uint64_t icr_txqe;
+	uint64_t icr_lsc;
+	uint64_t icr_rxseq;
+	uint64_t icr_rxdmt;
+	uint64_t icr_rxo;
+	uint64_t icr_rxt;
+	uint64_t icr_mdac;
+	uint64_t icr_rxcfg;
+	uint64_t icr_gpi;
+#endif
+
+	uint32_t *config_space;
+	int msg_enable;
+#ifdef CONFIG_PCI_MSI
+	boolean_t have_msi;
+#endif
+	/* to not mess up cache alignment, always add to the bottom */
+#ifdef NETIF_F_TSO
+	boolean_t tso_force;
+#endif
+	boolean_t smart_power_down;	/* phy smart power down */
+	unsigned long flags;
+
+	struct delayed_work watchdog_task;
+	struct delayed_work fifo_stall_task;
+	struct delayed_work phy_info_task;
+};
+
+enum e1000_state_t {
+	__E1000_DRIVER_TESTING,
+	__E1000_RESETTING,
+};
+#endif /* _E1000_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/eth1394.c	2022-03-21 12:58:29.462888250 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * eth1394.h -- RTnet Driver for Ethernet emulation over FireWire
+ *              (adapted from Linux1394)
+ *
+ * Copyright (C) 2005 Zhang Yuchen <yuchen623@gmail.com>
+ *
+ * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <net/arp.h>
+
+#define rtos_spinlock_t rtdm_lock_t
+#define nanosecs_abs_t  nanosecs_t
+
+#include <rt_eth1394.h>
+
+#include <rtnet_port.h>
+
+#include <ieee1394_types.h>
+#include <ieee1394_core.h>
+#include <ieee1394_transactions.h>
+#include <ieee1394.h>
+#include <highlevel.h>
+#include <iso.h>
+
+#define driver_name	"RT-ETH1394"
+
+
+#define ETH1394_PRINT_G(level, fmt, args...) \
+	rtdm_printk(level "%s: " fmt, driver_name, ## args)
+
+#define ETH1394_PRINT(level, dev_name, fmt, args...) \
+	rtdm_printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
+
+//#define ETH1394_DEBUG 1
+
+#ifdef ETH1394_DEBUG
+#define DEBUGP(fmt, args...) \
+	rtdm_printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
+#else
+#define DEBUGP(fmt, args...)
+#endif
+
+#define TRACE() rtdm_printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
+
+/* Change this to IEEE1394_SPEED_S100 to make testing easier */
+#define ETH1394_SPEED_DEF	0x03 /*IEEE1394_SPEED_MAX*/
+
+/* For now, this needs to be 1500, so that XP works with us */
+#define ETH1394_DATA_LEN		1500/*ETH_DATA_LEN*/
+
+struct fragment_info {
+	struct list_head list;
+	int offset;
+	int len;
+};
+
+struct partial_datagram {
+	struct list_head list;
+	u16 dgl;
+	u16 dg_size;
+	u16 ether_type;
+	struct rtskb *skb;
+	char *pbuf;
+	struct list_head frag_info;
+};
+
+ static const u16 eth1394_speedto_maxpayload[] = {
+/*     S100, S200, S400, S800, S1600, S3200 */
+	512, 1024, 2048, 4096,  4096,  4096
+};
+
+static struct hpsb_highlevel eth1394_highlevel;
+
+/* Use common.lf to determine header len */
+static const int hdr_type_len[] = {
+	sizeof (struct eth1394_uf_hdr),
+	sizeof (struct eth1394_ff_hdr),
+	sizeof (struct eth1394_sf_hdr),
+	sizeof (struct eth1394_sf_hdr)
+};
+
+/* The max_partial_datagrams parameter is the maximum number of fragmented
+ * datagrams per node that eth1394 will keep in memory.  Providing an upper
+ * bound allows us to limit the amount of memory that partial datagrams
+ * consume in the event that some partial datagrams are never completed.  This
+ * should probably change to a sysctl item or the like if possible.
+ */
+static int max_partial_datagrams = 25;
+module_param(max_partial_datagrams, int, 0444);
+MODULE_PARM_DESC(max_partial_datagrams,
+		 "Maximum number of partially received fragmented datagrams "
+		 "(default = 25).");
+
+
+static int eth1394_header(struct rtskb *skb, struct rtnet_device *dev,
+			    unsigned short type, void *daddr, void *saddr,
+			    unsigned len);
+
+static int eth1394_write(struct hpsb_host *host,struct hpsb_packet *packet, unsigned int length);
+
+static inline void purge_partial_datagram(struct list_head *old);
+static int eth1394_tx(struct rtskb *skb, struct rtnet_device *dev);
+static void eth1394_iso(struct hpsb_iso *iso, void *arg);
+
+/* Function for incoming 1394 packets */
+static struct hpsb_address_ops eth1394_ops = {
+	.write =	eth1394_write,
+};
+
+static void eth1394_add_host (struct hpsb_host *host);
+static void eth1394_remove_host (struct hpsb_host *host);
+static void eth1394_host_reset (struct hpsb_host *host);
+
+/* Ieee1394 highlevel driver functions */
+static struct hpsb_highlevel eth1394_highlevel = {
+	.name =		driver_name,
+	.add_host =	eth1394_add_host,
+	.remove_host =	eth1394_remove_host,
+	.host_reset =	eth1394_host_reset,
+};
+
+static void eth1394_iso_shutdown(struct eth1394_priv *priv)
+{
+	priv->bc_state = ETHER1394_BC_CLOSED;
+
+	if (priv->iso != NULL) {
+		//~ if (!in_interrupt())
+			hpsb_iso_shutdown(priv->iso);
+		priv->iso = NULL;
+	}
+}
+
+static int eth1394_init_bc(struct rtnet_device *dev)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+
+	/* First time sending?  Need a broadcast channel for ARP and for
+	 * listening on */
+	if (priv->bc_state == ETHER1394_BC_CHECK) {
+		quadlet_t bc;
+
+		/* Get the local copy of the broadcast channel and check its
+		 * validity (the IRM should validate it for us) */
+
+		bc = priv->host->csr.broadcast_channel;
+
+		if ((bc & 0x80000000) != 0x80000000) { //used to be 0xc0000000
+			/* broadcast channel not validated yet */
+			ETH1394_PRINT(KERN_WARNING, dev->name,
+				      "Error BROADCAST_CHANNEL register valid "
+				      "bit not set, can't send IP traffic\n");
+
+			eth1394_iso_shutdown(priv);
+
+			return -EAGAIN;
+		}
+		if (priv->broadcast_channel != (bc & 0x3f)) {
+			/* This really shouldn't be possible, but just in case
+			 * the IEEE 1394 spec changes regarding broadcast
+			 * channels in the future. */
+
+			eth1394_iso_shutdown(priv);
+
+			//~ if (in_interrupt())
+				//~ return -EAGAIN;
+
+			priv->broadcast_channel = bc & 0x3f;
+			ETH1394_PRINT(KERN_INFO, dev->name,
+				      "Changing to broadcast channel %d...\n",
+				      priv->broadcast_channel);
+
+			priv->iso = hpsb_iso_recv_init(priv->host, 16 * 4096,
+						       16, priv->broadcast_channel, HPSB_ISO_DMA_PACKET_PER_BUFFER,
+						       1, eth1394_iso, 0, "eth1394_iso", IEEE1394_PRIORITY_HIGHEST);
+
+			if (priv->iso == NULL) {
+				ETH1394_PRINT(KERN_ERR, dev->name,
+					      "failed to change broadcast "
+					      "channel\n");
+				return -EAGAIN;
+			}
+		}
+		if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0) {
+			ETH1394_PRINT(KERN_ERR, dev->name,
+				      "Could not start data stream reception\n");
+
+			eth1394_iso_shutdown(priv);
+
+			return -EAGAIN;
+		}
+		priv->bc_state = ETHER1394_BC_OPENED;
+	}
+
+	return 0;
+}
+
+static int eth1394_open (struct rtnet_device *dev)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	rtdm_lockctx_t context;
+	int ret;
+
+	/* Something bad happened, don't even try */
+	if (priv->bc_state == ETHER1394_BC_CLOSED)
+	{
+		return -EAGAIN;
+	}
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	ret = eth1394_init_bc(dev);
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	if (ret)
+		return ret;
+	rt_stack_connect(dev,&STACK_manager);
+	rtnetif_start_queue (dev);
+	return 0;
+}
+
+static int eth1394_stop (struct rtnet_device *dev)
+{
+	rtnetif_stop_queue (dev);
+	rt_stack_disconnect(dev);
+	return 0;
+}
+
+/* Return statistics to the caller */
+static struct net_device_stats *eth1394_stats (struct rtnet_device *dev)
+{
+	return &(((struct eth1394_priv *)dev->priv)->stats);
+}
+
+static inline void eth1394_register_limits(int nodeid, u16 maxpayload,
+					     unsigned char sspd,
+					     struct eth1394_priv *priv)
+{
+
+	if (nodeid < 0 || nodeid >= ALL_NODES) {
+		ETH1394_PRINT_G (KERN_ERR, "Cannot register invalid nodeid %d\n", nodeid);
+		return;
+	}
+
+	priv->maxpayload[nodeid]	= maxpayload;
+	priv->sspd[nodeid]		= sspd;
+	priv->maxpayload[ALL_NODES] = min(priv->maxpayload[ALL_NODES], maxpayload);
+	priv->sspd[ALL_NODES] = min(priv->sspd[ALL_NODES], sspd);
+
+	return;
+}
+
+
+static void eth1394_reset_priv (struct rtnet_device *dev, int set_mtu)
+{
+	rtdm_lockctx_t context;
+	int i;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	struct hpsb_host *host = priv->host;
+	int phy_id = NODEID_TO_NODE(host->node_id);
+	u16 maxpayload = 1 << (host->csr.max_rec + 1);
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	/* Clear the speed/payload/offset tables */
+	memset (priv->maxpayload, 0, sizeof (priv->maxpayload));
+	memset (priv->sspd, 0, sizeof (priv->sspd));
+
+	priv->sspd[ALL_NODES] = ETH1394_SPEED_DEF;
+	priv->maxpayload[ALL_NODES] = eth1394_speedto_maxpayload[priv->sspd[ALL_NODES]];
+
+	priv->bc_state = ETHER1394_BC_CHECK;
+
+	/* Register our limits now */
+	eth1394_register_limits(phy_id, maxpayload,
+				    host->speed_map[(phy_id << 6) + phy_id], priv);
+
+	/* We'll use our maxpayload as the default mtu */
+	if (set_mtu) {
+		dev->mtu = min(ETH1394_DATA_LEN, (int)(priv->maxpayload[phy_id] -
+			       (sizeof(union eth1394_hdr) + ETHER1394_GASP_OVERHEAD)));
+
+		//~ /* Set our hardware address while we're at it */
+		//~ *(u64*)dev->dev_addr = guid;
+		//~ *(u64*)dev->broadcast = ~0x0ULL;
+		*(u16*)dev->dev_addr = LOCAL_BUS | phy_id; //we directly use FireWire address for our MAC address
+		*(u16*)dev->broadcast =  LOCAL_BUS | ALL_NODES;
+	}
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	for (i = 0; i < ALL_NODES; i++) {
+		struct list_head *lh, *n;
+
+		rtdm_lock_get_irqsave(&priv->pdg[i].lock, context);
+		if (!set_mtu) {
+			list_for_each_safe(lh, n, &priv->pdg[i].list) {
+				//~ purge_partial_datagram(lh);
+			}
+		}
+		INIT_LIST_HEAD(&(priv->pdg[i].list));
+		priv->pdg[i].sz = 0;
+		rtdm_lock_put_irqrestore(&priv->pdg[i].lock, context);
+	}
+
+}
+
+static void eth1394_add_host (struct hpsb_host *host)
+{
+	int i;
+	struct host_info *hi = NULL;
+
+	//*******RTnet********
+	struct rtnet_device *dev = NULL;
+	//
+	struct eth1394_priv *priv;
+
+	/* We should really have our own alloc_hpsbdev() function in
+	 * net_init.c instead of calling the one for ethernet then hijacking
+	 * it for ourselves.  That way we'd be a real networking device. */
+
+	//******RTnet******
+
+	dev = rt_alloc_etherdev(sizeof (struct eth1394_priv),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL) {
+		ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate "
+				 "etherdevice for IEEE 1394 device\n");
+		goto free_dev;
+	}
+	rtdev_alloc_name(dev, "rteth%d");
+	memset(dev->priv, 0, sizeof(struct eth1394_priv));
+	rt_rtdev_connect(dev, &RTDEV_manager);
+
+	//dev->init = eth1394_init_dev;
+
+	dev->vers = RTDEV_VERS_2_0;
+	dev->open = eth1394_open;
+	dev->hard_start_xmit = eth1394_tx;
+	dev->stop = eth1394_stop;
+	dev->hard_header = eth1394_header;
+	dev->get_stats = eth1394_stats;
+	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
+	dev->addr_len		= ETH_ALEN;
+	dev->hard_header_len	= ETH_HLEN;
+	dev->type		= ARPHRD_IEEE1394;
+
+	//rtdev->do_ioctl = NULL;
+	priv = (struct eth1394_priv *)dev->priv;
+
+	rtdm_lock_init(&priv->lock);
+	priv->host = host;
+
+	for (i = 0; i < ALL_NODES; i++) {
+		rtdm_lock_init(&priv->pdg[i].lock);
+		INIT_LIST_HEAD(&priv->pdg[i].list);
+		priv->pdg[i].sz = 0;
+	}
+
+	hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
+	if (hi == NULL) {
+		ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create "
+				 "hostinfo for IEEE 1394 device\n");
+		goto free_hi;
+	}
+
+	if(rt_register_rtnetdev(dev))
+	{
+		ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
+		goto free_hi;
+	}
+
+	ETH1394_PRINT (KERN_ERR, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet\n");
+
+	hi->host = host;
+	hi->dev = dev;
+
+	eth1394_reset_priv (dev, 1);
+
+	/* Ignore validity in hopes that it will be set in the future.  It'll
+	 * be checked when the eth device is opened. */
+	priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
+
+	priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 *
+					      2048), // XXX workaround for limitation in rawiso
+					      //(1 << (host->csr.max_rec + 1))),
+				       ETHER1394_GASP_BUFFERS,
+				       priv->broadcast_channel,
+				       HPSB_ISO_DMA_PACKET_PER_BUFFER,
+				       1, eth1394_iso, 0, "eth1394_iso", IEEE1394_PRIORITY_HIGHEST);
+
+
+
+	if (priv->iso == NULL) {
+		ETH1394_PRINT(KERN_ERR, dev->name,
+			      "Could not allocate isochronous receive context "
+			      "for the broadcast channel\n");
+		priv->bc_state = ETHER1394_BC_ERROR;
+		goto unregister_dev;
+	} else {
+		if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0){
+			priv->bc_state = ETHER1394_BC_STOPPED;
+			goto unregister_dev;
+		}
+		else
+			priv->bc_state = ETHER1394_BC_RUNNING;
+	}
+
+	hpsb_register_addrspace(&eth1394_highlevel, host, &eth1394_ops, ETHER1394_REGION_ADDR,
+				 ETHER1394_REGION_ADDR_END);
+
+	return;
+
+unregister_dev:
+	rt_unregister_rtnetdev(dev);
+free_hi:
+	hpsb_destroy_hostinfo(&eth1394_highlevel, host);
+free_dev:
+	rtdev_free(dev);
+
+	return;
+}
+
+static void eth1394_remove_host (struct hpsb_host *host)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+
+	if (hi != NULL) {
+		struct eth1394_priv *priv = (struct eth1394_priv *)hi->dev->priv;
+
+		eth1394_iso_shutdown(priv);
+
+		if (hi->dev) {
+			rt_stack_disconnect(hi->dev);
+			rt_unregister_rtnetdev (hi->dev);
+			rtdev_free(hi->dev);
+		}
+	}
+	return;
+}
+
+static void eth1394_host_reset (struct hpsb_host *host)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+	struct rtnet_device *dev;
+
+	/* This can happen for hosts that we don't use */
+	if (hi == NULL)
+		return;
+
+	dev = hi->dev;
+
+	/* Reset our private host data, but not our mtu */
+	rtnetif_stop_queue (dev);
+	eth1394_reset_priv (dev, 1);
+	rtnetif_wake_queue (dev);
+}
+
+
+/******************************************
+ * HW Header net device functions
+ ******************************************/
+/* These functions have been adapted from net/ethernet/eth.c */
+
+
+/* Create a fake MAC header for an arbitrary protocol layer.
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp). */
+static int eth1394_header(struct rtskb *skb, struct rtnet_device *dev,
+			    unsigned short type, void *daddr, void *saddr,
+			    unsigned len)
+{
+	struct ethhdr *eth = (struct ethhdr *)rtskb_push(skb,ETH_HLEN);
+	memset(eth, 0, sizeof(*eth));
+
+	eth->h_proto = htons(type);
+
+	if (saddr)
+		memcpy(eth->h_source, saddr, sizeof(nodeid_t));
+	else
+		memcpy(eth->h_source, dev->dev_addr, sizeof(nodeid_t));
+
+	if (dev->flags & (IFF_LOOPBACK|IFF_NOARP))
+	{
+		memset(eth->h_dest, 0, dev->addr_len);
+		return(dev->hard_header_len);
+	}
+
+	if (daddr)
+	{
+		memcpy(eth->h_dest,daddr, sizeof(nodeid_t));
+		return dev->hard_header_len;
+	}
+
+	return -dev->hard_header_len;
+
+}
+
+
+/******************************************
+ * Datagram reception code
+ ******************************************/
+
+/* Copied from net/ethernet/eth.c */
+static inline u16 eth1394_type_trans(struct rtskb *skb,
+				       struct rtnet_device *dev)
+{
+	struct ethhdr *eth;
+	unsigned char *rawp;
+
+	skb->mac.raw = skb->data;
+	rtskb_pull (skb, ETH_HLEN);
+	eth = (struct ethhdr*)skb->mac.raw;
+
+	if (*eth->h_dest & 1) {
+		if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0)
+			skb->pkt_type = PACKET_BROADCAST;
+	} else {
+		if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
+			skb->pkt_type = PACKET_OTHERHOST;
+	}
+
+	if (ntohs (eth->h_proto) >= 1536)
+		return eth->h_proto;
+
+	rawp = skb->data;
+
+	if (*(unsigned short *)rawp == 0xFFFF)
+		return htons (ETH_P_802_3);
+
+	return htons (ETH_P_802_2);
+}
+
+/* Parse an encapsulated IP1394 header into an ethernet frame packet.
+ * We also perform ARP translation here, if need be.  */
+static inline u16 eth1394_parse_encap(struct rtskb *skb,
+					struct rtnet_device *dev,
+					nodeid_t srcid, nodeid_t destid,
+					u16 ether_type)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	unsigned short ret = 0;
+
+	/* If this is an ARP packet, convert it. First, we want to make
+	 * use of some of the fields, since they tell us a little bit
+	 * about the sending machine.  */
+	if (ether_type == __constant_htons (ETH_P_ARP)) {
+		rtdm_lockctx_t context;
+		struct eth1394_arp *arp1394 =
+				(struct eth1394_arp*)((u8 *)skb->data);
+		struct arphdr *arp =
+				(struct arphdr *)((u8 *)skb->data);
+		unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+		u8 max_rec = min(priv->host->csr.max_rec,
+				 (u8)(arp1394->max_rec));
+		int sspd = arp1394->sspd;
+		u16 maxpayload;
+		/* Sanity check. MacOSX seems to be sending us 131 in this
+		 * field (atleast on my Panther G5). Not sure why. */
+		if (sspd > 5 || sspd < 0)
+			sspd = 0;
+
+		maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1)));
+
+
+
+		/* Update our speed/payload/fifo_offset table */
+		rtdm_lock_get_irqsave(&priv->lock, context);
+		eth1394_register_limits(NODEID_TO_NODE(srcid), maxpayload,
+					  arp1394->sspd,
+						priv);
+		rtdm_lock_put_irqrestore(&priv->lock, context);
+
+		/* Now that we're done with the 1394 specific stuff, we'll
+		 * need to alter some of the data.  Believe it or not, all
+		 * that needs to be done is sender_IP_address needs to be
+		 * moved, the destination hardware address get stuffed
+		 * in and the hardware address length set to 8.
+		 *
+		 * IMPORTANT: The code below overwrites 1394 specific data
+		 * needed above data so keep the call to
+		 * eth1394_register_limits() before munging the data for the
+		 * higher level IP stack. */
+
+		arp->ar_hln = ETH_ALEN;
+		arp_ptr += arp->ar_hln;		/* skip over sender unique id */
+		*(u32*)arp_ptr = arp1394->sip;	/* move sender IP addr */
+		arp_ptr += arp->ar_pln;		/* skip over sender IP addr */
+
+		if (arp->ar_op == 1)
+			/* just set ARP req target unique ID to 0 */
+			memset(arp_ptr, 0, ETH_ALEN);
+		else
+			memcpy(arp_ptr, dev->dev_addr, ETH_ALEN);
+	}
+
+	/* Now add the ethernet header. */
+	//no need to add ethernet header now, since we did not get rid of it on the sending side
+	if (dev->hard_header (skb, dev, __constant_ntohs (ether_type),
+			      &destid, &srcid, skb->len) >= 0)
+		ret = eth1394_type_trans(skb, dev);
+
+	return ret;
+}
+
+static inline int fragment_overlap(struct list_head *frag_list, int offset, int len)
+{
+	struct list_head *lh;
+	struct fragment_info *fi;
+
+	list_for_each(lh, frag_list) {
+		fi = list_entry(lh, struct fragment_info, list);
+
+		if ( ! ((offset > (fi->offset + fi->len - 1)) ||
+		       ((offset + len - 1) < fi->offset)))
+			return 1;
+	}
+	return 0;
+}
+
+static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
+{
+	struct list_head *lh;
+	struct partial_datagram *pd;
+
+	list_for_each(lh, pdgl) {
+		pd = list_entry(lh, struct partial_datagram, list);
+		if (pd->dgl == dgl)
+			return lh;
+	}
+	return NULL;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static inline int new_fragment(struct list_head *frag_info, int offset, int len)
+{
+	struct list_head *lh;
+	struct fragment_info *fi, *fi2, *new;
+
+	list_for_each(lh, frag_info) {
+		fi = list_entry(lh, struct fragment_info, list);
+		if ((fi->offset + fi->len) == offset) {
+			/* The new fragment can be tacked on to the end */
+			fi->len += len;
+			/* Did the new fragment plug a hole? */
+			fi2 = list_entry(lh->next, struct fragment_info, list);
+			if ((fi->offset + fi->len) == fi2->offset) {
+				/* glue fragments together */
+				fi->len += fi2->len;
+				list_del(lh->next);
+				kfree(fi2);
+			}
+			return 0;
+		} else if ((offset + len) == fi->offset) {
+			/* The new fragment can be tacked on to the beginning */
+			fi->offset = offset;
+			fi->len += len;
+			/* Did the new fragment plug a hole? */
+			fi2 = list_entry(lh->prev, struct fragment_info, list);
+			if ((fi2->offset + fi2->len) == fi->offset) {
+				/* glue fragments together */
+				fi2->len += fi->len;
+				list_del(lh);
+				kfree(fi);
+			}
+			return 0;
+		} else if (offset > (fi->offset + fi->len)) {
+			break;
+		} else if ((offset + len) < fi->offset) {
+			lh = lh->prev;
+			break;
+		}
+	}
+
+	new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	new->offset = offset;
+	new->len = len;
+
+	list_add(&new->list, lh);
+
+	return 0;
+}
+
+static inline int new_partial_datagram(struct rtnet_device *dev,
+				       struct list_head *pdgl, int dgl,
+				       int dg_size, char *frag_buf,
+				       int frag_off, int frag_len)
+{
+	struct partial_datagram *new;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+
+	new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&new->frag_info);
+
+	if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) {
+		kfree(new);
+		return -ENOMEM;
+	}
+
+	new->dgl = dgl;
+	new->dg_size = dg_size;
+
+	new->skb = rtnetdev_alloc_rtskb(dev, dg_size + dev->hard_header_len + 15);
+	if (!new->skb) {
+		struct fragment_info *fi = list_entry(new->frag_info.next,
+						      struct fragment_info,
+						      list);
+		kfree(fi);
+		kfree(new);
+		return -ENOMEM;
+	}
+
+	rtskb_reserve(new->skb, (dev->hard_header_len + 15) & ~15);
+	new->pbuf = rtskb_put(new->skb, dg_size);
+	memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+
+	list_add(&new->list, pdgl);
+
+	return 0;
+}
+
+static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
+					  char *frag_buf, int frag_off, int frag_len)
+{
+	struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+
+	if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) {
+		return -ENOMEM;
+	}
+
+	memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+	/* Move list entry to beginnig of list so that oldest partial
+	 * datagrams percolate to the end of the list */
+	list_del(lh);
+	list_add(lh, pdgl);
+
+	return 0;
+}
+
+static inline void purge_partial_datagram(struct list_head *old)
+{
+	struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+	struct list_head *lh, *n;
+
+	list_for_each_safe(lh, n, &pd->frag_info) {
+		struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+		list_del(lh);
+		kfree(fi);
+	}
+	list_del(old);
+	kfree_rtskb(pd->skb);
+	kfree(pd);
+}
+
+static inline int is_datagram_complete(struct list_head *lh, int dg_size)
+{
+	struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+	struct fragment_info *fi = list_entry(pd->frag_info.next,
+					      struct fragment_info, list);
+
+	return (fi->len == dg_size);
+}
+
+
+
+
+/* Packet reception. We convert the IP1394 encapsulation header to an
+ * ethernet header, and fill it with some of our other fields. This is
+ * an incoming packet from the 1394 bus.  */
+static int eth1394_data_handler(struct rtnet_device *dev, int srcid, int destid,
+				  char *buf, int len, nanosecs_abs_t time_stamp)
+{
+	struct rtskb *skb;
+	rtdm_lockctx_t context;
+	struct eth1394_priv *priv;
+	union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
+	u16 ether_type = 0;  /* initialized to clear warning */
+	int hdr_len;
+
+	//~ nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	priv = (struct eth1394_priv *)dev->priv;
+
+	/* First, did we receive a fragmented or unfragmented datagram? */
+	hdr->words.word1 = ntohs(hdr->words.word1);
+
+	hdr_len = hdr_type_len[hdr->common.lf];
+
+	if (hdr->common.lf == ETH1394_HDR_LF_UF) {
+		DEBUGP("a single datagram has been received\n");
+		/* An unfragmented datagram has been received by the ieee1394
+		 * bus. Build an skbuff around it so we can pass it to the
+		 * high level network layer. */
+
+		//~ if(rtpkb_acquire((struct rtpkb*)packet, &priv->skb_pool)){
+			//~ HPSB_PRINT (KERN_ERR, "eth1394 rx: low on mem\n");
+			//~ priv->stats.rx_dropped++;
+			//~ return -1;
+		//~ }
+
+		skb = rtnetdev_alloc_rtskb(dev, len + dev->hard_header_len + 15);
+		if (!skb) {
+			ETH1394_PRINT_G(KERN_ERR, "eth1394 rx: low on mem\n");
+			priv->stats.rx_dropped++;
+			return -1;
+		}
+		//~ skb = (struct rtskb *)packet;//we can do this, because these two belong to the same common object, rtpkb.
+		//~ rtpkb_put(skb, len-hdr_len);
+		//~ skb->data = (u8 *)packet->data + hdr_len; //we jump over the 1394-specific fragment overhead
+		//~ rtskb_put(skb, );
+		rtskb_reserve(skb, (dev->hard_header_len + 15) & ~15);//we reserve the space to put in fake MAC address
+		memcpy(rtskb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len);
+		ether_type = hdr->uf.ether_type;
+	} else {
+		/* A datagram fragment has been received, now the fun begins. */
+		struct list_head *pdgl, *lh;
+		struct partial_datagram *pd;
+		int fg_off;
+		int fg_len = len - hdr_len;
+		int dg_size;
+		int dgl;
+		int retval;
+		int sid = NODEID_TO_NODE(srcid);
+		struct pdg_list *pdg = &(priv->pdg[sid]);
+
+		DEBUGP("a datagram fragment has been received\n");
+		hdr->words.word3 = ntohs(hdr->words.word3);
+		/* The 4th header word is reserved so no need to do ntohs() */
+
+		if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+			//first fragment
+			ether_type = hdr->ff.ether_type;
+			dgl = hdr->ff.dgl;
+			dg_size = hdr->ff.dg_size + 1;
+			fg_off = 0;
+		} else {
+			hdr->words.word2 = ntohs(hdr->words.word2);
+			dgl = hdr->sf.dgl;
+			dg_size = hdr->sf.dg_size + 1;
+			fg_off = hdr->sf.fg_off;
+		}
+		rtdm_lock_get_irqsave(&pdg->lock, context);
+
+		pdgl = &(pdg->list);
+		lh = find_partial_datagram(pdgl, dgl);
+
+		if (lh == NULL) {
+			if (pdg->sz == max_partial_datagrams) {
+				/* remove the oldest */
+				purge_partial_datagram(pdgl->prev);
+				pdg->sz--;
+			}
+
+			retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
+						      buf + hdr_len, fg_off,
+						      fg_len);
+			if (retval < 0) {
+				rtdm_lock_put_irqrestore(&pdg->lock, context);
+				goto bad_proto;
+			}
+			pdg->sz++;
+			lh = find_partial_datagram(pdgl, dgl);
+		} else {
+			struct partial_datagram *pd;
+
+			pd = list_entry(lh, struct partial_datagram, list);
+
+			if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
+				/* Overlapping fragments, obliterate old
+				 * datagram and start new one. */
+				purge_partial_datagram(lh);
+				retval = new_partial_datagram(dev, pdgl, dgl,
+							      dg_size,
+							      buf + hdr_len,
+							      fg_off, fg_len);
+				if (retval < 0) {
+					pdg->sz--;
+					rtdm_lock_put_irqrestore(&pdg->lock, context);
+					goto bad_proto;
+				}
+			} else {
+				retval = update_partial_datagram(pdgl, lh,
+								 buf + hdr_len,
+								 fg_off, fg_len);
+				if (retval < 0) {
+					/* Couldn't save off fragment anyway
+					 * so might as well obliterate the
+					 * datagram now. */
+					purge_partial_datagram(lh);
+					pdg->sz--;
+					rtdm_lock_put_irqrestore(&pdg->lock, context);
+					goto bad_proto;
+				}
+			} /* fragment overlap */
+		} /* new datagram or add to existing one */
+
+		pd = list_entry(lh, struct partial_datagram, list);
+
+		if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+			pd->ether_type = ether_type;
+		}
+
+		if (is_datagram_complete(lh, dg_size)) {
+			ether_type = pd->ether_type;
+			pdg->sz--;
+			//skb = skb_get(pd->skb);
+			skb = pd->skb;
+			purge_partial_datagram(lh);
+			rtdm_lock_put_irqrestore(&pdg->lock, context);
+		} else {
+			/* Datagram is not complete, we're done for the
+			 * moment. */
+			rtdm_lock_put_irqrestore(&pdg->lock, context);
+			return 0;
+		}
+	} /* unframgented datagram or fragmented one */
+
+	/* Write metadata, and then pass to the receive level */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;	/* don't check it */
+
+	/* Parse the encapsulation header. This actually does the job of
+	 * converting to an ethernet frame header, aswell as arp
+	 * conversion if needed. ARP conversion is easier in this
+	 * direction, since we are using ethernet as our backend.  */
+	skb->protocol = eth1394_parse_encap(skb, dev, srcid, destid,
+					      ether_type);
+
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	if (!skb->protocol) {
+		DEBUG_PRINT("pointer to %s(%s)%d\n",__FILE__,__FUNCTION__,__LINE__);
+		priv->stats.rx_errors++;
+		priv->stats.rx_dropped++;
+		//dev_kfree_skb_any(skb);
+		kfree_rtskb(skb);
+		goto bad_proto;
+	}
+
+	skb->time_stamp = time_stamp;
+	/*if (netif_rx(skb) == NET_RX_DROP) {
+		priv->stats.rx_errors++;
+		priv->stats.rx_dropped++;
+		goto bad_proto;
+	}*/
+	rtnetif_rx(skb);//finally, we deliver the packet
+
+	/* Statistics */
+	priv->stats.rx_packets++;
+	priv->stats.rx_bytes += skb->len;
+	rt_mark_stack_mgr(dev);
+
+bad_proto:
+	if (rtnetif_queue_stopped(dev))
+		rtnetif_wake_queue(dev);
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	//dev->last_rx = jiffies;
+
+	return 0;
+}
+
+
+static int eth1394_write(struct hpsb_host *host, struct hpsb_packet *packet, unsigned int length)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+	int ret;
+
+	if (hi == NULL) {
+		ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
+				host->driver->name);
+		return RCODE_ADDRESS_ERROR;
+	}
+
+	//we need to parse the packet now
+	ret = eth1394_data_handler(hi->dev, packet->header[1]>>16, //source id
+							 packet->header[0]>>16, //dest id
+							 (char *)packet->data, //data
+							packet->data_size, packet->time_stamp);
+	//we only get the request packet, serve it, but dont free it, since it does not belong to us!!!!
+
+	if(ret)
+		return RCODE_ADDRESS_ERROR;
+	else
+		return RCODE_COMPLETE;
+}
+
+
+/**
+ * callback function for broadcast channel
+ * called from hpsb_iso_wake( )
+ */
+static void eth1394_iso(struct hpsb_iso *iso, void *arg)
+{
+	quadlet_t *data;
+	char *buf;
+	struct rtnet_device *dev;
+	unsigned int len;
+	u32 specifier_id;
+	u16 source_id;
+	int i;
+	int nready;
+
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
+	if (hi == NULL) {
+		ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
+				iso->host->driver->name);
+		return;
+	}
+
+	dev = hi->dev;
+
+	nready = hpsb_iso_n_ready(iso);
+	for (i = 0; i < nready; i++) {
+		struct hpsb_iso_packet_info *info =
+			&iso->infos[(iso->first_packet + i) % iso->buf_packets];
+		data = (quadlet_t*) (iso->data_buf.kvirt + info->offset);
+
+		/* skip over GASP header */
+		buf = (char *)data + 8;
+		len = info->len - 8;
+
+		specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) |
+				((be32_to_cpu(data[1]) & 0xff000000) >> 24));
+		source_id = be32_to_cpu(data[0]) >> 16;
+
+		if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) ||
+				specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
+			/* This packet is not for us */
+			continue;
+		}
+		eth1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES,
+				       buf, len, rtdm_clock_read());
+	}
+
+	hpsb_iso_recv_release_packets(iso, i);
+
+	//dev->last_rx = jiffies;
+}
+
+/******************************************
+ * Datagram transmission code
+ ******************************************/
+
+/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire
+ * arphdr) is the same format as the ip1394 header, so they overlap.  The rest
+ * needs to be munged a bit.  The remainder of the arphdr is formatted based
+ * on hwaddr len and ipaddr len.  We know what they'll be, so it's easy to
+ * judge.
+ *
+ * Now that the EUI is used for the hardware address all we need to do to make
+ * this work for 1394 is to insert 2 quadlets that contain max_rec size,
+ * speed, and unicast FIFO address information between the sender_unique_id
+ * and the IP addresses.
+ */
+
+//we dont need the EUI id now. fifo_hi should contain the bus id and node id.
+//fifo_lo should contain the highest 32 bits of in-node address.
+static inline void eth1394_arp_to_1394arp(struct rtskb *skb,
+					    struct rtnet_device *dev)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)(dev->priv);
+	u16 phy_id = NODEID_TO_NODE(priv->host->node_id);
+
+	struct arphdr *arp = (struct arphdr *)skb->data;
+	unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+	struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
+
+	arp1394->hw_addr_len	= 6;
+	arp1394->sip		= *(u32*)(arp_ptr + ETH_ALEN);
+	arp1394->max_rec	= priv->host->csr.max_rec;
+	arp1394->sspd		= priv->sspd[phy_id];
+
+	return;
+}
+
+/* We need to encapsulate the standard header with our own. We use the
+ * ethernet header's proto for our own. */
+static inline unsigned int eth1394_encapsulate_prep(unsigned int max_payload,
+						      int proto,
+						      union eth1394_hdr *hdr,
+						      u16 dg_size, u16 dgl)
+{
+	unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
+
+	/* Does it all fit in one packet? */
+	if (dg_size <= adj_max_payload) {
+		hdr->uf.lf = ETH1394_HDR_LF_UF;
+		hdr->uf.ether_type = proto;
+	} else {
+		hdr->ff.lf = ETH1394_HDR_LF_FF;
+		hdr->ff.ether_type = proto;
+		hdr->ff.dg_size = dg_size - 1;
+		hdr->ff.dgl = dgl;
+		adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
+	}
+	return((dg_size + (adj_max_payload - 1)) / adj_max_payload);
+}
+
+static inline unsigned int eth1394_encapsulate(struct rtskb *skb,
+						 unsigned int max_payload,
+						 union eth1394_hdr *hdr)
+{
+	union eth1394_hdr *bufhdr;
+	int ftype = hdr->common.lf;
+	int hdrsz = hdr_type_len[ftype];
+	unsigned int adj_max_payload = max_payload - hdrsz;
+
+	switch(ftype) {
+	case ETH1394_HDR_LF_UF:
+		bufhdr = (union eth1394_hdr *)rtskb_push(skb, hdrsz);
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = hdr->words.word2;
+		break;
+
+	case ETH1394_HDR_LF_FF:
+		bufhdr = (union eth1394_hdr *)rtskb_push(skb, hdrsz);
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = hdr->words.word2;
+		bufhdr->words.word3 = htons(hdr->words.word3);
+		bufhdr->words.word4 = 0;
+
+		/* Set frag type here for future interior fragments */
+		hdr->common.lf = ETH1394_HDR_LF_IF;
+		hdr->sf.fg_off = 0;
+		break;
+
+	default:
+		hdr->sf.fg_off += adj_max_payload;
+		bufhdr = (union eth1394_hdr *)rtskb_pull(skb, adj_max_payload);
+		if (max_payload >= skb->len)
+			hdr->common.lf = ETH1394_HDR_LF_LF;
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = htons(hdr->words.word2);
+		bufhdr->words.word3 = htons(hdr->words.word3);
+		bufhdr->words.word4 = 0;
+	}
+
+	return min(max_payload, skb->len);
+}
+
+//just allocate a hpsb_packet header, without payload.
+static inline struct hpsb_packet *eth1394_alloc_common_packet(struct hpsb_host *host, unsigned int priority)
+{
+	struct hpsb_packet *p;
+
+	p = hpsb_alloc_packet(0,&host->pool, priority);
+	if (p) {
+		p->host = host;
+		p->data = NULL;
+		p->generation = get_hpsb_generation(host);
+		p->type = hpsb_async;
+	}
+	return p;
+}
+
+//prepare an asynchronous write packet
+static inline int eth1394_prep_write_packet(struct hpsb_packet *p,
+					      struct hpsb_host *host,
+					      nodeid_t node, u64 addr,
+					      void * data, int tx_len)
+{
+	p->node_id = node;
+
+	p->tcode = TCODE_WRITEB;
+
+	p->header[1] = (host->node_id << 16) | (addr >> 32);
+	p->header[2] = addr & 0xffffffff;
+
+	p->header_size = 16;
+	p->expect_response = 1;
+
+	if (hpsb_get_tlabel(p)) {
+		ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending "
+				"to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
+		return -1;
+	}
+	p->header[0] = (p->node_id << 16) | (p->tlabel << 10)
+		| (1 << 8) | (TCODE_WRITEB << 4);
+
+	p->header[3] = tx_len << 16;
+	p->data_size = tx_len + (tx_len % 4 ? 4 - (tx_len % 4) : 0);
+	p->data = (quadlet_t*)data;
+
+	return 0;
+}
+
+//prepare gasp packet from skb.
+static inline void eth1394_prep_gasp_packet(struct hpsb_packet *p,
+					      struct eth1394_priv *priv,
+					      struct rtskb *skb, int length)
+{
+	p->header_size = 4;
+	p->tcode = TCODE_STREAM_DATA;
+
+	p->header[0] = (length << 16) | (3 << 14)
+		| ((priv->broadcast_channel) << 8)
+		| (TCODE_STREAM_DATA << 4);
+	p->data_size = length;
+	p->data = ((quadlet_t*)skb->data) - 2; //we need 64bits for extra spec_id and gasp version.
+	p->data[0] = cpu_to_be32((priv->host->node_id << 16) |
+				      ETHER1394_GASP_SPECIFIER_ID_HI);
+	p->data[1] = cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
+				      ETHER1394_GASP_VERSION);
+
+	/* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
+	 * prevents hpsb_send_packet() from setting the speed to an arbitrary
+	 * value based on packet->node_id if packet->node_id is not set. */
+	p->node_id = ALL_NODES;
+	p->speed_code = priv->sspd[ALL_NODES];
+}
+
+
+static inline void eth1394_free_packet(struct hpsb_packet *packet)
+{
+	if (packet->tcode != TCODE_STREAM_DATA)
+		hpsb_free_tlabel(packet);
+	hpsb_free_packet(packet);
+}
+
+static void eth1394_complete_cb(struct hpsb_packet *packet, void *__ptask);
+
+
+/**
+ * this function does the real calling of hpsb_send_packet
+ *But before that, it also constructs the FireWire packet according to
+ * ptask
+ */
+static int eth1394_send_packet(struct packet_task *ptask, unsigned int tx_len, nanosecs_abs_t *xmit_stamp)
+{
+	struct eth1394_priv *priv = ptask->priv;
+	struct hpsb_packet *packet = NULL;
+	int ret;
+
+	packet = eth1394_alloc_common_packet(priv->host, ptask->priority);
+	if (!packet) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	if(xmit_stamp)
+		packet->xmit_stamp = xmit_stamp;
+
+	if (ptask->tx_type == ETH1394_GASP) {
+		int length = tx_len + (2 * sizeof(quadlet_t)); //for the extra gasp overhead
+
+		eth1394_prep_gasp_packet(packet, priv, ptask->skb, length);
+	} else if (eth1394_prep_write_packet(packet, priv->host,
+					       ptask->dest_node,
+					       ptask->addr, ptask->skb->data,
+					       tx_len)) {
+		hpsb_free_packet(packet);
+		return -1;
+	}
+
+	ptask->packet = packet;
+	hpsb_set_packet_complete_task(ptask->packet, eth1394_complete_cb,
+				      ptask);
+
+	ret = hpsb_send_packet(packet);
+	if (ret != 0) {
+		eth1394_free_packet(packet);
+	}
+
+	return ret;
+}
+
+
+/* Task function to be run when a datagram transmission is completed */
+static inline void eth1394_dg_complete(struct packet_task *ptask, int fail)
+{
+	struct rtskb *skb = ptask->skb;
+	struct rtnet_device *dev = skb->rtdev;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	rtdm_lockctx_t context;
+
+	/* Statistics */
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	if (fail) {
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+	} else {
+		priv->stats.tx_bytes += skb->len;
+		priv->stats.tx_packets++;
+	}
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	//dev_kfree_skb_any(skb);
+	kfree_rtskb(skb);
+	//~ kmem_cache_free(packet_task_cache, ptask);
+	//this means this ptask structure has been freed
+	ptask->packet=NULL;
+}
+
+
+/* Callback for when a packet has been sent and the status of that packet is
+ * known */
+static void eth1394_complete_cb(struct hpsb_packet *packet, void *__ptask)
+{
+	struct packet_task *ptask = (struct packet_task *)__ptask;
+	int fail = 0;
+
+	if (packet->tcode != TCODE_STREAM_DATA)
+		fail = hpsb_packet_success(packet);
+
+	//we have no rights to free packet, since it belongs to RT-FireWire kernel.
+	//~ eth1394_free_packet(packet);
+
+	ptask->outstanding_pkts--;
+	if (ptask->outstanding_pkts > 0 && !fail)
+	{
+		int tx_len;
+
+		/* Add the encapsulation header to the fragment */
+		tx_len = eth1394_encapsulate(ptask->skb, ptask->max_payload,
+					       &ptask->hdr);
+		if (eth1394_send_packet(ptask, tx_len, NULL))
+			eth1394_dg_complete(ptask, 1);
+	} else {
+		eth1394_dg_complete(ptask, fail);
+	}
+}
+
+
+
+/**
+ *Transmit a packet (called by kernel)
+ * this is the dev->hard_start_transmit
+ */
+static int eth1394_tx (struct rtskb *skb, struct rtnet_device *dev)
+{
+
+	struct ethhdr *eth;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	int proto;
+	rtdm_lockctx_t context;
+	nodeid_t dest_node;
+	eth1394_tx_type tx_type;
+	int ret = 0;
+	unsigned int tx_len;
+	unsigned int max_payload;
+	u16 dg_size;
+	u16 dgl;
+
+	//we try to find the available ptask struct, if failed, we can not send packet
+	struct packet_task *ptask = NULL;
+	int i;
+	for(i=0;i<20;i++){
+		if(priv->ptask_list[i].packet == NULL){
+			ptask = &priv->ptask_list[i];
+			break;
+		}
+	}
+	if(ptask == NULL)
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	if (priv->bc_state == ETHER1394_BC_CLOSED) {
+		ETH1394_PRINT(KERN_ERR, dev->name,
+			      "Cannot send packet, no broadcast channel available.\n");
+		ret = -EAGAIN;
+		rtdm_lock_put_irqrestore(&priv->lock, context);
+		goto fail;
+	}
+	if ((ret = eth1394_init_bc(dev))) {
+		rtdm_lock_put_irqrestore(&priv->lock, context);
+		goto fail;
+	}
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+	//if ((skb = skb_share_check (skb, kmflags)) == NULL) {
+	//	ret = -ENOMEM;
+	//	goto fail;
+	//}
+
+	/* Get rid of the fake eth1394 header, but save a pointer */
+	eth = (struct ethhdr*)skb->data;
+	rtskb_pull(skb, ETH_HLEN);
+	//dont get rid of the fake eth1394 header, since we need it on the receiving side
+	//eth = (struct ethhdr*)skb->data;
+
+	//~ //find the node id via our fake MAC address
+	//~ ne = hpsb_guid_get_entry(be64_to_cpu(*(u64*)eth->h_dest));
+	//~ if (!ne)
+		//~ dest_node = LOCAL_BUS | ALL_NODES;
+	//~ else
+		//~ dest_node = ne->nodeid;
+	//now it is much easier
+	dest_node = *(u16*)eth->h_dest;
+	if(dest_node != 0xffff)
+	DEBUGP("%s: dest_node is %x\n", __FUNCTION__, dest_node);
+
+	proto = eth->h_proto;
+
+	/* If this is an ARP packet, convert it */
+	if (proto == __constant_htons (ETH_P_ARP))
+		eth1394_arp_to_1394arp (skb, dev);
+
+	max_payload = priv->maxpayload[NODEID_TO_NODE(dest_node)];
+	DEBUGP("%s: max_payload is %d\n", __FUNCTION__, max_payload);
+
+	/* This check should be unnecessary, but we'll keep it for safety for
+	 * a while longer. */
+	if (max_payload < 512) {
+		DEBUGP("max_payload too small: %d   (setting to 512)\n",
+			      max_payload);
+		max_payload = 512;
+	}
+
+	/* Set the transmission type for the packet.  ARP packets and IP
+	 * broadcast packets are sent via GASP. */
+	if (memcmp(eth->h_dest, dev->broadcast, sizeof(nodeid_t)) == 0 ||
+	    proto == __constant_htons(ETH_P_ARP) ||
+	    (proto == __constant_htons(ETH_P_IP) &&
+	     IN_MULTICAST(__constant_ntohl(skb->nh.iph->daddr)))) {
+		tx_type = ETH1394_GASP;
+		max_payload -= ETHER1394_GASP_OVERHEAD; //we have extra overhead for gasp packet
+	} else {
+		tx_type = ETH1394_WRREQ;
+	}
+
+	dg_size = skb->len;
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	dgl = priv->dgl[NODEID_TO_NODE(dest_node)];
+	if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
+		priv->dgl[NODEID_TO_NODE(dest_node)]++;
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	ptask->hdr.words.word1 = 0;
+	ptask->hdr.words.word2 = 0;
+	ptask->hdr.words.word3 = 0;
+	ptask->hdr.words.word4 = 0;
+	ptask->skb = skb;
+	ptask->priv = priv;
+	ptask->tx_type = tx_type;
+
+	if (tx_type != ETH1394_GASP) {
+		u64 addr;
+
+		/* This test is just temporary until ConfigROM support has
+		 * been added to eth1394.  Until then, we need an ARP packet
+		 * after a bus reset from the current destination node so that
+		 * we can get FIFO information. */
+		//~ if (priv->fifo[NODEID_TO_NODE(dest_node)] == 0ULL) {
+			//~ ret = -EAGAIN;
+			//~ goto fail;
+		//~ }
+
+		//~ rtos_spin_lock_irqsave(&priv->lock, flags);
+		//~ addr = priv->fifo[NODEID_TO_NODE(dest_node)];
+		addr =  ETHER1394_REGION_ADDR;
+		//~ rtos_spin_unlock_irqrestore(&priv->lock, flags);
+
+		ptask->addr = addr;
+		ptask->dest_node = dest_node;
+	}
+
+	ptask->tx_type = tx_type;
+	ptask->max_payload = max_payload;
+	ptask->outstanding_pkts = eth1394_encapsulate_prep(max_payload, proto,
+							     &ptask->hdr, dg_size,
+							     dgl);
+
+	/* Add the encapsulation header to the fragment */
+	tx_len = eth1394_encapsulate(skb, max_payload, &ptask->hdr);
+	//dev->trans_start = jiffies;
+	//~ if(skb->xmit_stamp)
+		//~ *skb->xmit_stamp = cpu_to_be64(rtos_get_time() + *skb->xmit_stamp);
+
+
+	if (eth1394_send_packet(ptask, tx_len, skb->xmit_stamp))
+		goto fail;
+
+	rtnetif_wake_queue(dev);
+	return 0;
+fail:
+	if (ptask!=NULL){
+		//~ kmem_cache_free(packet_task_cache, ptask);
+		ptask->packet=NULL;
+		ptask=NULL;
+	}
+
+	if (skb != NULL)
+		dev_kfree_rtskb(skb);
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	priv->stats.tx_dropped++;
+	priv->stats.tx_errors++;
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	if (rtnetif_queue_stopped(dev))
+		rtnetif_wake_queue(dev);
+
+	return 0;  /* returning non-zero causes serious problems */
+}
+
+static int eth1394_init(void)
+{
+	hpsb_register_highlevel(&eth1394_highlevel);
+
+	return 0;
+}
+
+static void eth1394_exit(void)
+{
+	hpsb_unregister_highlevel(&eth1394_highlevel);
+}
+
+module_init(eth1394_init);
+module_exit(eth1394_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c	2022-03-21 12:58:29.457888298 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * arch/ppc/5xxx_io/fec.c
+ *
+ * Driver for the MPC5200 Fast Ethernet Controller
+ * Support for MPC5100 FEC has been removed, contact the author if you need it
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * 2003 (c) MontaVista, Software, Inc.  This file is licensed under the terms
+ * of the GNU General Public License version 2.  This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ *
+ * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/5xxx_io/fec.c".
+ * Copyright (c) 2008 Wolfgang Grandegger <wg@denx.de>
+ */
+
+/* #define PARANOID_CHECKS*/
+/* #define MUST_ALIGN_TRANSMIT_DATA*/
+#define MUST_UNALIGN_RECEIVE_DATA
+/* #define EXIT_ISR_AT_MEMORY_SQUEEZE*/
+/* #define DISPLAY_WARNINGS*/
+
+#ifdef ORIGINAL_CODE
+static const char *version = "fec.c v0.2\n";
+#endif /* ORIGINAL_CODE */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <asm/delay.h>
+#include <rtnet_port.h>
+#include "rt_mpc52xx_fec.h"
+#ifdef CONFIG_UBOOT
+#include <asm/ppcboot.h>
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FASTROUTE
+#error "Fast Routing on MPC5200 ethernet not supported"
+#endif
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for MPC52xx FEC");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+#define printk(fmt,args...)	rtdm_printk (fmt ,##args)
+
+static struct rtnet_device *mpc5xxx_fec_dev;
+static int mpc5xxx_fec_interrupt(rtdm_irq_t *irq_handle);
+static int mpc5xxx_fec_receive_interrupt(rtdm_irq_t *irq_handle);
+static int mpc5xxx_fec_transmit_interrupt(rtdm_irq_t *irq_handle);
+static struct net_device_stats *mpc5xxx_fec_get_stats(struct rtnet_device *dev);
+#ifdef ORIGINAL_CODE
+static void mpc5xxx_fec_set_multicast_list(struct rtnet_device *dev);
+#endif /* ORIGINAL_CODE */
+static void mpc5xxx_fec_reinit(struct rtnet_device* dev);
+static int mpc5xxx_fec_setup(struct rtnet_device *dev, int reinit);
+static int mpc5xxx_fec_cleanup(struct rtnet_device *dev, int reinit);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void mpc5xxx_fec_mii(struct rtnet_device *dev);
+#ifdef ORIGINAL_CODE
+static int mpc5xxx_fec_ioctl(struct rtnet_device *, struct ifreq *rq, int cmd);
+static int mpc5xxx_netdev_ethtool_ioctl(struct rtnet_device *dev, void *useraddr);
+#endif /* ORIGINAL_CODE */
+static void mdio_timer_callback(unsigned long data);
+static void mii_display_status(struct rtnet_device *dev);
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET
+static void mpc5xxx_mdio_callback(uint regval, struct rtnet_device *dev, uint data);
+static int mpc5xxx_mdio_read(struct rtnet_device *dev, int phy_id, int location);
+#endif
+
+static void mpc5xxx_fec_update_stat(struct rtnet_device *);
+
+/* MII processing.  We keep this as simple as possible.  Requests are
+ * placed on the list (if there is room).  When the request is finished
+ * by the MII, an optional function may be called.
+ */
+typedef struct mii_list {
+	uint    mii_regval;
+	void    (*mii_func)(uint val, struct rtnet_device *dev, uint data);
+	struct  mii_list *mii_next;
+	uint    mii_data;
+} mii_list_t;
+
+#define		NMII	20
+mii_list_t      mii_cmds[NMII];
+mii_list_t      *mii_free;
+mii_list_t      *mii_head;
+mii_list_t      *mii_tail;
+
+typedef struct mdio_read_data {
+	u16 regval;
+	struct task_struct *sleeping_task;
+} mdio_read_data_t;
+
+static int mii_queue(struct rtnet_device *dev, int request,
+		void (*func)(uint, struct rtnet_device *, uint), uint data);
+
+/* Make MII read/write commands for the FEC.
+ * */
+#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
+							(VAL & 0xffff))
+#define mk_mii_end	0
+
+/* Register definitions for the PHY.
+*/
+
+#define MII_REG_CR	 0	/* Control Register */
+#define MII_REG_SR	 1	/* Status Register */
+#define MII_REG_PHYIR1	 2	/* PHY Identification Register 1 */
+#define MII_REG_PHYIR2	 3	/* PHY Identification Register 2 */
+#define MII_REG_ANAR	 4	/* A-N Advertisement Register */
+#define MII_REG_ANLPAR	 5	/* A-N Link Partner Ability Register */
+#define MII_REG_ANER	 6	/* A-N Expansion Register */
+#define MII_REG_ANNPTR	 7	/* A-N Next Page Transmit Register */
+#define MII_REG_ANLPRNPR 8	/* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE	0x0001	/* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP	0x0002	/* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK	0x00f0	/* mask for speed */
+#define PHY_CONF_10HDX	0x0010	/* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX	0x0020	/* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX	0x0040	/* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX	0x0080	/* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK	0x0100	/* 1 up - 0 down */
+#define PHY_STAT_FAULT	0x0200	/* 1 remote fault */
+#define PHY_STAT_ANC	0x0400	/* 1 auto-negotiation complete	*/
+#define PHY_STAT_SPMASK	0xf000	/* mask for speed */
+#define PHY_STAT_10HDX	0x1000	/* 10 Mbit half duplex selected	*/
+#define PHY_STAT_10FDX	0x2000	/* 10 Mbit full duplex selected	*/
+#define PHY_STAT_100HDX	0x4000	/* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX	0x8000	/* 100 Mbit full duplex selected */
+
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+u8 mpc5xxx_fec_mac_addr[6];
+u8 null_mac[6];
+
+#ifdef ORIGINAL_CODE
+static void mpc5xxx_fec_tx_timeout(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+
+	priv->stats.tx_errors++;
+
+	if (!priv->tx_full)
+		rtnetif_wake_queue(dev);
+}
+#endif /* ORIGINAL_CODE */
+
+static void
+mpc5xxx_fec_set_paddr(struct rtnet_device *dev, u8 *mac)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+
+	out_be32(&fec->paddr1, (mac[0]<<24) | (mac[1]<<16)
+			| (mac[2]<<8) | (mac[3]<<0));
+	out_be32(&fec->paddr2, (mac[4]<<24) | (mac[5]<<16) | 0x8808);
+}
+
+#ifdef ORIGINAL_CODE
+static int
+mpc5xxx_fec_set_mac_address(struct rtnet_device *dev, void *addr)
+{
+	struct sockaddr *sock = (struct sockaddr *)addr;
+
+	mpc5xxx_fec_set_paddr(dev, sock->sa_data);
+	return 0;
+}
+#endif /* ORIGINAL_CODE */
+
+/* This function is called to start or restart the FEC during a link
+ * change.  This happens on fifo errors or when switching between half
+ * and full duplex.
+ */
+static void
+mpc5xxx_fec_restart(struct rtnet_device *dev, int duplex)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	u32 rcntrl;
+	u32 tcntrl;
+	int i;
+
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("mpc5xxx_fec_restart\n");
+#endif
+	out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & 0x700000);
+	out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & 0x700000);
+	out_be32(&fec->reset_cntrl, 0x1000000);
+
+	/* Whack a reset.  We should wait for this. */
+	out_be32(&fec->ecntrl, MPC5xxx_FEC_ECNTRL_RESET);
+	for (i = 0; i < MPC5xxx_FEC_RESET_DELAY; ++i) {
+		if ((in_be32(&fec->ecntrl) & MPC5xxx_FEC_ECNTRL_RESET) == 0)
+			break;
+		udelay(1);
+	}
+	if (i == MPC5xxx_FEC_RESET_DELAY)
+		printk ("FEC Reset timeout!\n");
+
+	/* Set station address. */
+	out_be32(&fec->paddr1, *(u32 *)&dev->dev_addr[0]);
+	out_be32(&fec->paddr2,
+		((*(u16 *)&dev->dev_addr[4]) << 16) | 0x8808);
+
+#ifdef ORIGINAL_CODE
+	mpc5xxx_fec_set_multicast_list(dev);
+#endif /* ORIGINAL_CODE */
+
+	rcntrl = MPC5xxx_FEC_RECV_BUFFER_SIZE << 16;	/* max frame length */
+	rcntrl |= MPC5xxx_FEC_RCNTRL_FCE;
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	rcntrl |= MPC5xxx_FEC_RCNTRL_MII_MODE;
+#endif
+	if (duplex)
+		tcntrl = MPC5xxx_FEC_TCNTRL_FDEN;		/* FD enable */
+	else {
+		rcntrl |= MPC5xxx_FEC_RCNTRL_DRT;
+		tcntrl = 0;
+	}
+	out_be32(&fec->r_cntrl, rcntrl);
+	out_be32(&fec->x_cntrl, tcntrl);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed. */
+	out_be32(&fec->mii_speed, priv->phy_speed);
+#endif
+
+	priv->full_duplex = duplex;
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	priv->duplex_change = 0;
+#endif
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("%s: duplex set to %d\n", dev->name, priv->full_duplex);
+#endif
+
+	/* Clear any outstanding interrupt. */
+	out_be32(&fec->ievent, 0xffffffff);	/* clear intr events */
+
+	/* Enable interrupts we wish to service.
+	*/
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	out_be32(&fec->imask, 0xf0fe0000);	/* enable all intr but tfint */
+#else
+	out_be32(&fec->imask, 0xf07e0000);	/* enable all intr but tfint */
+#endif
+
+	/* And last, enable the transmit and receive processing.
+	*/
+	out_be32(&fec->ecntrl, MPC5xxx_FEC_ECNTRL_ETHER_EN);
+	out_be32(&fec->r_des_active, 0x01000000);
+
+	/* The tx ring is no longer full. */
+	if (priv->tx_full)
+	{
+		priv->tx_full = 0;
+		rtnetif_wake_queue(dev);
+	}
+}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void
+mpc5xxx_fec_mii(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	mii_list_t	*mip;
+	uint		mii_reg;
+
+	mii_reg = in_be32(&fec->mii_data);
+
+	if ((mip = mii_head) == NULL) {
+		printk("MII and no head!\n");
+		return;
+	}
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mpc5xxx_fec_mii %08x %08x %08x\n",
+		mii_reg, (u32)mip->mii_func, mip->mii_data);
+#endif
+
+	if (mip->mii_func != NULL)
+		(*(mip->mii_func))(mii_reg, dev, mip->mii_data);
+
+	mii_head = mip->mii_next;
+	mip->mii_next = mii_free;
+	mii_free = mip;
+
+	if ((mip = mii_head) != NULL)
+		out_be32(&fec->mii_data, mip->mii_regval);
+}
+
+static int
+mii_queue(struct rtnet_device *dev, int regval, void (*func)(uint, struct rtnet_device *, uint), uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	rtdm_lockctx_t	context;
+	mii_list_t	*mip;
+	int		retval;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mii_queue: %08x %08x %08x\n", regval, (u32)func, data);
+#endif
+
+	/* Add PHY address to register command.
+	*/
+	regval |= priv->phy_addr << 23;
+
+	retval = 0;
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+
+	if ((mip = mii_free) != NULL) {
+		mii_free = mip->mii_next;
+		mip->mii_regval = regval;
+		mip->mii_func = func;
+		mip->mii_next = NULL;
+		mip->mii_data = data;
+		if (mii_head) {
+			mii_tail->mii_next = mip;
+			mii_tail = mip;
+		} else {
+			mii_head = mii_tail = mip;
+			out_be32(&fec->mii_data, regval);
+		}
+	} else
+		retval = 1;
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	return retval;
+}
+
+static void mii_do_cmd(struct rtnet_device *dev, const phy_cmd_t *c)
+{
+	int k;
+
+	if (!c)
+		return;
+
+	for (k = 0; (c+k)->mii_data != mk_mii_end; k++)
+		mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0);
+}
+
+static void mii_parse_sr(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+	if (mii_reg & 0x0004)
+		s |= PHY_STAT_LINK;
+	if (mii_reg & 0x0010)
+		s |= PHY_STAT_FAULT;
+	if (mii_reg & 0x0020)
+		s |= PHY_STAT_ANC;
+
+	priv->phy_status = s;
+	priv->link = (s & PHY_STAT_LINK) ? 1 : 0;
+}
+
+static void mii_parse_cr(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+	if (mii_reg & 0x1000)
+		s |= PHY_CONF_ANE;
+	if (mii_reg & 0x4000)
+		s |= PHY_CONF_LOOP;
+
+	priv->phy_status = s;
+}
+
+static void mii_parse_anar(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_CONF_SPMASK);
+
+	if (mii_reg & 0x0020)
+		s |= PHY_CONF_10HDX;
+	if (mii_reg & 0x0040)
+		s |= PHY_CONF_10FDX;
+	if (mii_reg & 0x0080)
+		s |= PHY_CONF_100HDX;
+	if (mii_reg & 0x0100)
+		s |= PHY_CONF_100FDX;
+
+	priv->phy_status = s;
+}
+
+/* ------------------------------------------------------------------------- */
+/* Generic PHY support.  Should work for all PHYs, but does not support link
+ * change interrupts.
+ */
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY
+
+static phy_info_t phy_info_generic = {
+	0x00000000, /* 0-->match any PHY */
+	"GENERIC",
+
+	(const phy_cmd_t []) {  /* config */
+		/* advertise only half-duplex capabilities */
+		{ mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF),
+			mii_parse_anar },
+
+		/* enable auto-negotiation */
+		{ mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		/* restart auto-negotiation */
+		{ mk_mii_write(MII_BMCR, (BMCR_ANENABLE | BMCR_ANRESTART)),
+			NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* We don't actually use the ack_int table with a generic
+		 * PHY, but putting a reference to mii_parse_sr here keeps
+		 * us from getting a compiler warning about unused static
+		 * functions in the case where we only compile in generic
+		 * PHY support.
+		 */
+		{ mk_mii_read(MII_BMSR), mii_parse_sr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown */
+		{ mk_mii_end, }
+	},
+};
+#endif	/* CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards		     */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_LXT971
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR	16	/* Port Control Register	*/
+#define MII_LXT971_SR2	17	/* Status Register 2		*/
+#define MII_LXT971_IER	18	/* Interrupt Enable Register	*/
+#define MII_LXT971_ISR	19	/* Interrupt Status Register	*/
+#define MII_LXT971_LCR	20	/* LED Control Register		*/
+#define MII_LXT971_TCR	30	/* Transmit Control Register	*/
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x4000) {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	else {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	if (mii_reg & 0x0008)
+		s |= PHY_STAT_FAULT;
+
+	/* Record the new full_duplex value only if the link is up
+	 * (so we don't bother restarting the driver on duplex
+	 * changes when the link is down).
+	 */
+	if (priv->link) {
+		int prev_duplex = priv->full_duplex;
+		priv->full_duplex = ((mii_reg & 0x0200) != 0);
+		if (priv->full_duplex != prev_duplex) {
+			/* trigger a restart with changed duplex */
+			priv->duplex_change = 1;
+#if MPC5xxx_FEC_DEBUG > 1
+			printk("%s: duplex change: %s\n",
+			       dev->name, priv->full_duplex ? "full" : "half");
+#endif
+		}
+	}
+	priv->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt971 = {
+	0x0001378e,
+	"LXT971",
+
+	(const phy_cmd_t []) {	/* config */
+#ifdef MPC5100_FIX10HDX
+		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10 Mbps, HD */
+#else
+/*		{ mk_mii_write(MII_REG_ANAR, 0x0A1), NULL }, *//*  10/100, HD */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL }, /* 10/100, FD */
+#endif
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {	/* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+
+		/* Somehow does the 971 tell me that the link is down
+		 * the first read after power-up.
+		 * read here to get a valid value in ack_int */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+#if defined(CONFIG_UC101)
+		{ mk_mii_write(MII_LXT971_LCR, 0x4122), NULL }, /* LED settings */
+#endif
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+
+		/* we only need to read ISR to acknowledge */
+
+		{ mk_mii_read(MII_LXT971_ISR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {	/* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_LXT971 */
+
+/* ----------------------------------------------------------------- */
+/* The National Semiconductor DP83847 is used on a INKA 4X0 board    */
+/* ----------------------------------------------------------------- */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_DP83847
+
+/* Register definitions */
+#define MII_DP83847_PHYSTS 0x10  /* PHY Status Register */
+
+static void mii_parse_dp83847_physts(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x2) {
+		if (mii_reg & 0x4)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	else {
+		if (mii_reg & 0x4)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	if (mii_reg & 0x40)
+		s |= PHY_STAT_FAULT;
+
+	priv->full_duplex = ((mii_reg & 0x4) != 0);
+
+	priv->phy_status = s;
+}
+
+static phy_info_t phy_info_dp83847 = {
+	0x020005c3,
+	"DP83847",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL  }, /* Auto-Negociation Register Control set to    */
+							       /* auto-negociate 10/100MBps, Half/Full duplex */
+		{ mk_mii_read(MII_REG_CR),   mii_parse_cr   },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83847_PHYSTS), mii_parse_dp83847_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83847_PHYSTS), mii_parse_dp83847_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_end, }
+	}
+};
+
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_DP83847 */
+
+static phy_info_t *phy_info[] = {
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_LXT971
+	&phy_info_lxt971,
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_LXT971 */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_DP83847
+	&phy_info_dp83847,
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_DP83847 */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY
+	/* Generic PHY support.  This must be the last PHY in the table.
+	 * It will be used to support any PHY that doesn't match a previous
+	 * entry in the table.
+	 */
+	&phy_info_generic,
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY */
+
+	NULL
+};
+
+static void mii_display_config(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	printk("%s: config: auto-negotiation ", dev->name);
+
+	if (s & PHY_CONF_ANE)
+		printk("on");
+	else
+		printk("off");
+
+	if (s & PHY_CONF_100FDX)
+		printk(", 100FDX");
+	if (s & PHY_CONF_100HDX)
+		printk(", 100HDX");
+	if (s & PHY_CONF_10FDX)
+		printk(", 10FDX");
+	if (s & PHY_CONF_10HDX)
+		printk(", 10HDX");
+	if (!(s & PHY_CONF_SPMASK))
+		printk(", No speed/duplex selected?");
+
+	if (s & PHY_CONF_LOOP)
+		printk(", loopback enabled");
+
+	printk(".\n");
+
+	priv->sequence_done = 1;
+}
+
+static void mii_queue_config(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+
+	priv->phy_task.routine = (void *)mii_display_config;
+	priv->phy_task.data = dev;
+	schedule_task(&priv->phy_task);
+}
+
+
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+			       { mk_mii_end, } };
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	int	i;
+
+	priv->phy_id |= (mii_reg & 0xffff);
+
+	for (i = 0; phy_info[i]; i++) {
+		if (phy_info[i]->id == (priv->phy_id >> 4) || !phy_info[i]->id)
+			break;
+		if (phy_info[i]->id == 0)	/* check generic entry */
+			break;
+	}
+
+	if (!phy_info[i])
+		panic("%s: PHY id 0x%08x is not supported!\n",
+			dev->name, priv->phy_id);
+
+	priv->phy = phy_info[i];
+	priv->phy_id_done = 1;
+
+	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
+		dev->name, priv->phy_addr, priv->phy->name, priv->phy_id);
+#if defined(CONFIG_UC101)
+	mii_do_cmd(dev, priv->phy->startup);
+#endif
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID.  This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint	phytype;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mii_discover_phy\n");
+#endif
+
+	if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
+		/* Got first part of ID, now get remainder.
+		*/
+		priv->phy_id = phytype << 16;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3, 0);
+	} else {
+		priv->phy_addr++;
+		if (priv->phy_addr < 32)
+			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+							mii_discover_phy, 0);
+		else
+			printk("fec: No PHY device found.\n");
+	}
+}
+
+static void
+mpc5xxx_fec_link_up(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)(dev->priv);
+
+	printk("mpc5xxx_fec_link_up: link_up=%d\n", priv->link_up);
+#ifdef ORIGINAL_CODE
+	priv->link_up = 0;
+#endif /* ORIGINAL_CODE */
+	mii_display_status(dev);
+	if (priv->duplex_change) {
+#if MPC5xxx_FEC_DEBUG > 1
+		printk("%s: restarting with %s duplex...\n",
+		       dev->name, priv->full_duplex ? "full" : "half");
+#endif
+		mpc5xxx_fec_restart(dev, priv->full_duplex);
+	}
+}
+
+/*
+ * Execute the ack_int command set and schedules next timer call back.
+ */
+static void mdio_timer_callback(unsigned long data)
+{
+	struct rtnet_device *dev = (struct rtnet_device *)data;
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)(dev->priv);
+	mii_do_cmd(dev, priv->phy->ack_int);
+
+	if (priv->link_up) {
+#ifdef ORIGINAL_CODE
+		priv->link_up_task.routine = (void *)mpc5xxx_fec_link_up;
+		priv->link_up_task.data = dev;
+		schedule_task(&priv->link_up_task);
+#else
+		mpc5xxx_fec_link_up(dev);
+		return;
+#endif /* ORIGINAL_CODE */
+	}
+	/* Reschedule in 1 second */
+	priv->phy_timer_list.expires = jiffies + (1000 * HZ / 1000);
+	add_timer(&priv->phy_timer_list);
+}
+
+/*
+ * Displays the current status of the PHY.
+ */
+static void mii_display_status(struct rtnet_device *dev)
+{
+    struct mpc5xxx_fec_priv *priv = dev->priv;
+    uint s = priv->phy_status;
+
+    printk("%s: status: ", dev->name);
+
+    if (!priv->link) {
+	printk("link down");
+    } else {
+	printk("link up");
+
+	switch(s & PHY_STAT_SPMASK) {
+	case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
+	case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
+	case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
+	case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
+	default:
+	    printk(", Unknown speed/duplex");
+	}
+
+	if (s & PHY_STAT_ANC)
+	    printk(", auto-negotiation complete");
+    }
+
+    if (s & PHY_STAT_FAULT)
+	printk(", remote fault");
+
+    printk(".\n");
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+#define RFIFO_DATA	0xf0003184
+#define TFIFO_DATA	0xf00031a4
+
+/*
+ * Initialize FEC receive task.
+ * Returns task number of FEC receive task.
+ * Returns -1 on failure
+ */
+int
+mpc5xxx_fec_rx_task_setup(int num_bufs, int maxbufsize)
+{
+	static TaskSetupParamSet_t params;
+	int tasknum;
+
+	params.NumBD = num_bufs;
+	params.Size.MaxBuf = maxbufsize;
+	params.StartAddrSrc = RFIFO_DATA;
+	params.IncrSrc = 0;
+	params.SzSrc = 4;
+	params.IncrDst = 4;
+	params.SzDst = 4;
+
+	tasknum = TaskSetup(TASK_FEC_RX, &params);
+
+	/* clear pending interrupt bits */
+	TaskIntClear(tasknum);
+
+	return tasknum;
+}
+
+/*
+ * Initialize FEC transmit task.
+ * Returns task number of FEC transmit task.
+ * Returns -1 on failure
+ */
+int
+mpc5xxx_fec_tx_task_setup(int num_bufs)
+{
+	static TaskSetupParamSet_t params;
+	int tasknum;
+
+	params.NumBD = num_bufs;
+	params.IncrSrc = 4;
+	params.SzSrc = 4;
+	params.StartAddrDst = TFIFO_DATA;
+	params.IncrDst = 0;
+	params.SzDst = 4;
+
+	tasknum = TaskSetup(TASK_FEC_TX, &params);
+
+	/* clear pending interrupt bits */
+	TaskIntClear(tasknum);
+
+	return tasknum;
+}
+
+
+
+#ifdef PARANOID_CHECKS
+static volatile int tx_fifo_cnt, tx_fifo_ipos, tx_fifo_opos;
+static volatile int rx_fifo_opos;
+#endif
+
+static struct rtskb *tx_fifo_skb[MPC5xxx_FEC_TBD_NUM];
+static struct rtskb *rx_fifo_skb[MPC5xxx_FEC_RBD_NUM];
+static BDIdx mpc5xxx_bdi_tx = 0;
+
+
+static int
+mpc5xxx_fec_setup(struct rtnet_device *dev, int reinit)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_xlb *xlb = (struct mpc5xxx_xlb *)MPC5xxx_XLB;
+	struct rtskb *skb;
+	int i;
+	struct mpc5xxx_rbuf *rbuf;
+	struct mpc5xxx_fec *fec = priv->fec;
+	u32 u32_value;
+	u16 u16_value;
+
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("mpc5xxx_fec_setup\n");
+#endif
+
+	mpc5xxx_fec_set_paddr(dev, dev->dev_addr);
+
+	/*
+	 * Initialize receive queue
+	 */
+	priv->r_tasknum = mpc5xxx_fec_rx_task_setup(MPC5xxx_FEC_RBD_NUM,
+						    MPC5xxx_FEC_RECV_BUFFER_SIZE_BC);
+	TaskBDReset(priv->r_tasknum);
+	for(i=0;i<MPC5xxx_FEC_RBD_NUM;i++) {
+		BDIdx bdi_a;
+		if(!reinit) {
+			skb = dev_alloc_rtskb(sizeof *rbuf, dev);
+			if (skb == 0)
+				goto eagain;
+#ifdef MUST_UNALIGN_RECEIVE_DATA
+			rtskb_reserve(skb,2);
+#endif
+			rbuf = (struct mpc5xxx_rbuf *)rtskb_put(skb, sizeof *rbuf);
+			rx_fifo_skb[i]=skb;
+		}
+		else {
+			skb=rx_fifo_skb[i];
+			rbuf = (struct mpc5xxx_rbuf *)skb->data;
+		}
+		bdi_a = TaskBDAssign(priv->r_tasknum,
+					(void*)virt_to_phys((void *)&rbuf->data),
+					0, sizeof *rbuf, MPC5xxx_FEC_RBD_INIT);
+		if(bdi_a<0)
+			panic("mpc5xxx_fec_setup: error while TaskBDAssign, err=%i\n",(int)bdi_a);
+	}
+#ifdef PARANOID_CHECKS
+	rx_fifo_opos = 0;
+#endif
+
+	/*
+	 * Initialize transmit queue
+	 */
+	if(!reinit) {
+		priv->t_tasknum = mpc5xxx_fec_tx_task_setup(MPC5xxx_FEC_TBD_NUM);
+		TaskBDReset(priv->t_tasknum);
+		mpc5xxx_bdi_tx = 0;
+		for(i=0;i<MPC5xxx_FEC_TBD_NUM;i++) tx_fifo_skb[i]=0;
+#ifdef PARANOID_CHECKS
+		tx_fifo_cnt = tx_fifo_ipos = tx_fifo_opos = 0;
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		if (reinit) {
+			if (!priv->sequence_done) {
+				if (!priv->phy) {
+					printk("mpc5xxx_fec_setup: PHY not configured\n");
+					return -ENODEV; /* No PHY we understand */
+				}
+
+				mii_do_cmd(dev, priv->phy->config);
+				mii_do_cmd(dev, phy_cmd_config); /* display configuration */
+				while(!priv->sequence_done)
+					schedule();
+
+				mii_do_cmd(dev, priv->phy->startup);
+			}
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+		dev->irq = MPC5xxx_FEC_IRQ;
+		priv->r_irq = MPC5xxx_SDMA_IRQ_BASE + priv->r_tasknum;
+		priv->t_irq = MPC5xxx_SDMA_IRQ_BASE + priv->t_tasknum;
+
+		if ((i = rtdm_irq_request(&priv->irq_handle, dev->irq,
+					  mpc5xxx_fec_interrupt, 0,
+					  "rteth_err", dev))) {
+			printk(KERN_ERR "FEC interrupt allocation failed\n");
+			return i;
+		}
+
+		if ((i = rtdm_irq_request(&priv->r_irq_handle, priv->r_irq,
+					  mpc5xxx_fec_receive_interrupt, 0,
+					  "rteth_recv", dev))) {
+			printk(KERN_ERR "FEC receive task interrupt allocation failed\n");
+			return i;
+		}
+
+		if ((i = rtdm_irq_request(&priv->t_irq_handle, priv->t_irq,
+					  mpc5xxx_fec_transmit_interrupt, 0,
+					  "rteth_xmit", dev))) {
+			printk(KERN_ERR "FEC transmit task interrupt allocation failed\n");
+			return i;
+		}
+
+		rt_stack_connect(dev, &STACK_manager);
+
+		u32_value = in_be32(&priv->gpio->port_config);
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		u32_value |= 0x00050000;	/* 100MBit with MD	*/
+#else
+		u32_value |= 0x00020000;	/* 10MBit with 7-wire	*/
+#endif
+		out_be32(&priv->gpio->port_config, u32_value);
+
+	}
+
+	out_be32(&fec->op_pause, 0x00010020);	/* change to 0xffff0020 ??? */
+	out_be32(&fec->rfifo_cntrl, 0x0f240000);
+	out_be32(&fec->rfifo_alarm, 0x0000030c);
+	out_be32(&fec->tfifo_cntrl, 0x0f240000);
+	out_be32(&fec->tfifo_alarm, 0x00000100);
+	out_be32(&fec->x_wmrk, 0x3);		/* xmit fifo watermark = 256 */
+	out_be32(&fec->xmit_fsm, 0x03000000);	/* enable crc generation */
+	out_be32(&fec->iaddr1, 0x00000000);	/* No individual filter */
+	out_be32(&fec->iaddr2, 0x00000000);	/* No individual filter */
+
+#ifdef CONFIG_MPC5200
+	/* Disable COMM Bus Prefetch */
+	u16_value = in_be16(&priv->sdma->PtdCntrl);
+	u16_value |= 1;
+	out_be16(&priv->sdma->PtdCntrl, u16_value);
+
+	/* Disable (or enable?) BestComm XLB address snooping */
+	out_be32(&xlb->config, in_be32(&xlb->config) | MPC5200B_XLB_CONF_BSDIS);
+#endif
+
+	if(!reinit) {
+#if !defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO)
+		mpc5xxx_fec_restart (dev, 0);	/* always use half duplex mode only */
+#else
+#ifdef CONFIG_UBOOT
+		extern unsigned char __res[];
+		bd_t *bd = (bd_t *)__res;
+#define MPC5xxx_IPBFREQ bd->bi_ipbfreq
+#else
+#define MPC5xxx_IPBFREQ CONFIG_PPC_5xxx_IPBFREQ
+#endif
+
+		for (i=0; i<NMII-1; i++)
+			mii_cmds[i].mii_next = &mii_cmds[i+1];
+		mii_free = mii_cmds;
+
+		priv->phy_speed = (((MPC5xxx_IPBFREQ >> 20) / 5) << 1);
+
+		/*mpc5xxx_fec_restart (dev, 0);*/ /* half duplex, negotiate speed */
+		mpc5xxx_fec_restart (dev, 1);	/* full duplex, negotiate speed */
+
+		/* Queue up command to detect the PHY and initialize the
+		 * remainder of the interface.
+		 */
+		priv->phy_id_done = 0;
+		priv->phy_addr = 0;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy, 0);
+
+		priv->old_status = 0;
+
+		/*
+		 * Read MIB counters in order to reset them,
+		 * then zero all the stats fields in memory
+		 */
+		mpc5xxx_fec_update_stat(dev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		if (reinit) {
+			if (!priv->sequence_done) {
+				if (!priv->phy) {
+					printk("mpc5xxx_fec_open: PHY not configured\n");
+					return -ENODEV;		/* No PHY we understand */
+				}
+
+				mii_do_cmd(dev, priv->phy->config);
+				mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
+				while(!priv->sequence_done)
+					schedule();
+
+				mii_do_cmd(dev, priv->phy->startup);
+
+				/*
+				 * Currently, MII link interrupts are not supported,
+				 * so start the 100 msec timer to monitor the link up event.
+				 */
+				init_timer(&priv->phy_timer_list);
+
+				priv->phy_timer_list.expires = jiffies + (100 * HZ / 1000);
+				priv->phy_timer_list.data = (unsigned long)dev;
+				priv->phy_timer_list.function = mdio_timer_callback;
+				add_timer(&priv->phy_timer_list);
+
+				printk("%s: Waiting for the link to be up...\n", dev->name);
+				while (priv->link == 0) {
+					schedule();
+				}
+				mii_display_status(dev);
+				if (priv->full_duplex == 0) { /* FD is not negotiated, restart the fec in HD */
+					mpc5xxx_fec_restart(dev, 0);
+				}
+			}
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+#endif
+	}
+	else {
+		mpc5xxx_fec_restart (dev, 0);
+	}
+
+	rtnetif_start_queue(dev);
+
+	TaskStart(priv->r_tasknum, TASK_AUTOSTART_ENABLE,
+		  priv->r_tasknum, TASK_INTERRUPT_ENABLE);
+
+	if(reinit) {
+		TaskStart(priv->t_tasknum, TASK_AUTOSTART_ENABLE,
+			  priv->t_tasknum, TASK_INTERRUPT_ENABLE);
+	}
+
+	return 0;
+
+eagain:
+	printk("mpc5xxx_fec_setup: failed\n");
+	for (i=0; i<MPC5xxx_FEC_RBD_NUM; i++) {
+		skb = rx_fifo_skb[i];
+		if (skb == 0)
+			break;
+		dev_kfree_rtskb(skb);
+	}
+	TaskBDReset(priv->r_tasknum);
+
+	return -EAGAIN;
+}
+
+static int
+mpc5xxx_fec_open(struct rtnet_device *dev)
+{
+	return mpc5xxx_fec_setup(dev,0);
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int
+mpc5xxx_fec_hard_start_xmit(struct rtskb *skb, struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	rtdm_lockctx_t context;
+	int pad;
+	short length;
+	BDIdx bdi_a;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mpc5xxx_fec_hard_start_xmit:\n");
+	printk("dev %08x, priv %08x, skb %08x\n",
+			(u32)dev, (u32)priv, (u32)skb);
+#endif
+#if MPC5xxx_FEC_DEBUG > 0
+	if (fec_start_status(&priv->t_queue) & MPC5xxx_FEC_TBD_TFD)
+		panic("MPC5xxx transmit queue overrun\n");
+#endif
+
+	length = skb->len;
+#ifdef	MUST_ALIGN_TRANSMIT_DATA
+	pad = (int)skb->data & 3;
+	if (pad) {
+		void *old_data = skb->data;
+		rtskb_push(skb, pad);
+		memcpy(skb->data, old_data, length);
+		rtskb_trim(skb, length);
+	}
+#endif
+	/* Zero out up to the minimum length ethernet packet size,
+	 * so we don't inadvertently expose sensitive data
+	 */
+	pad = ETH_ZLEN - skb->len;
+	if (pad > 0) {
+		skb = rtskb_padto(skb, ETH_ZLEN);
+		if (skb == 0) {
+			printk("rtskb_padto failed\n");
+			return 0;
+		}
+		length += pad;
+	}
+
+	flush_dcache_range((u32)skb->data, (u32)skb->data + length);
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+
+	bdi_a = TaskBDAssign(priv->t_tasknum,(void*)virt_to_phys((void *)skb->data),
+			     NULL,length,MPC5xxx_FEC_TBD_INIT);
+
+#ifdef PARANOID_CHECKS
+	/* check for other errors during assignment*/
+	if((bdi_a<0)||(bdi_a>=MPC5xxx_FEC_TBD_NUM))
+		panic("mpc5xxx_fec_hard_start_xmit: error while TaskBDAssign, err=%i\n",(int)bdi_a);
+
+	/* sanity check: bdi must always equal tx_fifo_ipos*/
+	if(bdi_a!=tx_fifo_ipos)
+		panic("bdi_a!=tx_fifo_ipos: %i, %i\n",(int)bdi_a,tx_fifo_ipos);
+
+	tx_fifo_cnt++;
+	tx_fifo_ipos++;
+	if(tx_fifo_ipos==MPC5xxx_FEC_TBD_NUM) tx_fifo_ipos=0;
+
+	/* check number of BDs in use*/
+	if(TaskBDInUse(priv->t_tasknum)!=tx_fifo_cnt)
+		panic("TaskBDInUse != tx_fifo_cnt: %i %i\n",TaskBDInUse(priv->t_tasknum),tx_fifo_cnt);
+#endif
+
+	tx_fifo_skb[bdi_a]=skb;
+
+#ifdef ORIGINAL_CODE
+	dev->trans_start = jiffies;
+#endif /* ORIGINAL_CODE */
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	TaskStart(priv->t_tasknum, TASK_AUTOSTART_ENABLE, priv->t_tasknum, TASK_INTERRUPT_ENABLE);
+
+	if(TaskBDInUse(priv->t_tasknum)==MPC5xxx_FEC_TBD_NUM) {
+		priv->tx_full = 1;
+		rtnetif_stop_queue(dev);
+	}
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	return 0;
+}
+
+/* This handles SDMA transmit task interrupts
+ */
+static int
+mpc5xxx_fec_transmit_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	BDIdx bdi_r;
+
+	rtdm_lock_get(&priv->lock);
+
+	while(TaskBDInUse(priv->t_tasknum)) {
+
+		/* relase BD*/
+		bdi_r = TaskBDRelease(priv->t_tasknum);
+
+		/* we are done if we can't release any more BDs*/
+		if(bdi_r==TASK_ERR_BD_BUSY) break;
+		/* if(bdi_r<0) break;*/
+
+#ifdef PARANOID_CHECKS
+		/* check for other errors during release*/
+		if((bdi_r<0)||(bdi_r>=MPC5xxx_FEC_TBD_NUM))
+			panic("mpc5xxx_fec_transmit_interrupt: error while TaskBDRelease, err=%i\n",(int)bdi_r);
+
+		tx_fifo_cnt--;
+		tx_fifo_opos++;
+		if(tx_fifo_opos==MPC5xxx_FEC_TBD_NUM) tx_fifo_opos=0;
+
+		/* sanity check: bdi_r must always equal tx_fifo_opos*/
+		if(bdi_r!=tx_fifo_opos) {
+			panic("bdi_r!=tx_fifo_opos: %i, %i\n",(int)bdi_r,tx_fifo_opos);
+		}
+
+		/* check number of BDs in use*/
+		if(TaskBDInUse(priv->t_tasknum)!=tx_fifo_cnt)
+			panic("TaskBDInUse != tx_fifo_cnt: %i %i\n",TaskBDInUse(priv->t_tasknum),tx_fifo_cnt);
+#endif
+
+		if((tx_fifo_skb[mpc5xxx_bdi_tx])==0)
+			panic("skb confusion in tx\n");
+
+		dev_kfree_rtskb(tx_fifo_skb[mpc5xxx_bdi_tx]);
+		tx_fifo_skb[mpc5xxx_bdi_tx]=0;
+
+		mpc5xxx_bdi_tx = bdi_r;
+
+		if(TaskBDInUse(priv->t_tasknum)<MPC5xxx_FEC_TBD_NUM/2)
+			priv->tx_full = 0;
+
+	}
+
+	if (rtnetif_queue_stopped(dev) && !priv->tx_full)
+		rtnetif_wake_queue(dev);
+
+	rtdm_lock_put(&priv->lock);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static BDIdx mpc5xxx_bdi_rx = 0;
+
+static int
+mpc5xxx_fec_receive_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct rtskb *skb;
+	struct rtskb *nskb;
+	struct mpc5xxx_rbuf *rbuf;
+	struct mpc5xxx_rbuf *nrbuf;
+	u32 status;
+	int length;
+	BDIdx bdi_a, bdi_r;
+	int discard = 0;
+	int dropped = 0;
+	int packets = 0;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	while(1) {
+
+		/* release BD*/
+		bdi_r = TaskBDRelease(priv->r_tasknum);
+
+		/* we are done if we can't release any more BDs*/
+		if(bdi_r==TASK_ERR_BD_BUSY) break;
+
+#ifdef PARANOID_CHECKS
+		/* check for other errors during release*/
+		if((bdi_r<0)||(bdi_r>=MPC5xxx_FEC_RBD_NUM))
+			panic("mpc5xxx_fec_receive_interrupt: error while TaskBDRelease, err=%i\n",(int)bdi_r);
+
+		rx_fifo_opos++;
+		if(rx_fifo_opos==MPC5xxx_FEC_RBD_NUM) rx_fifo_opos=0;
+
+		if(bdi_r != rx_fifo_opos)
+			panic("bdi_r != rx_fifo_opos: %i, %i\n",bdi_r, rx_fifo_opos);
+#endif
+
+		/* get BD status in order to determine length*/
+		status = TaskGetBD(priv->r_tasknum,mpc5xxx_bdi_rx)->Status;
+
+		/* determine packet length and pointer to socket buffer / actual data*/
+		skb = rx_fifo_skb[mpc5xxx_bdi_rx];
+		length = (status & 0xffff) - 4;
+		rbuf = (struct mpc5xxx_rbuf *)skb->data;
+
+#ifndef EXIT_ISR_AT_MEMORY_SQUEEZE
+		/* in case of a memory squeeze, we just drop all packets, because*/
+		/* subsequent allocations will also fail.*/
+		if(discard!=3) {
+#endif
+
+			/* check for frame errors*/
+			if(status&0x00370000) {
+				/* frame error, drop */
+#ifdef DISPLAY_WARNINGS
+				if(status&MPC5xxx_FEC_FRAME_LG)
+					printk("%s: Frame length error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_NO)
+					printk("%s: Non-octet aligned frame error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_CR)
+					printk("%s: Frame CRC error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_OV)
+					printk("%s: FIFO overrun error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_TR)
+					printk("%s: Frame truncated error, dropping packet (status=0x%08x)\n",dev->name,status);
+#endif
+				discard=1;
+			}
+			else if (length>(MPC5xxx_FEC_RECV_BUFFER_SIZE-4)) {
+				/* packet too big, drop */
+#ifdef DISPLAY_WARNINGS
+				printk("%s: Frame too big, dropping packet (length=%i)\n",dev->name,length);
+#endif
+				discard=2;
+			}
+			else {
+				/* allocate replacement skb */
+				nskb = dev_alloc_rtskb(sizeof *nrbuf, dev);
+				if (nskb == NULL) {
+					/* memory squeeze, drop */
+					discard=3;
+					dropped++;
+				}
+				else {
+					discard=0;
+				}
+			}
+
+#ifndef EXIT_ISR_AT_MEMORY_SQUEEZE
+		}
+		else {
+			dropped++;
+		}
+#endif
+
+		if (discard) {
+			priv->stats.rx_dropped++;
+			nrbuf = (struct mpc5xxx_rbuf *)skb->data;
+		}
+		else {
+#ifdef MUST_UNALIGN_RECEIVE_DATA
+			rtskb_reserve(nskb,2);
+#endif
+			nrbuf = (struct mpc5xxx_rbuf *)rtskb_put(nskb, sizeof *nrbuf);
+
+			/* only invalidate the number of bytes in dcache actually received*/
+#ifdef MUST_UNALIGN_RECEIVE_DATA
+			invalidate_dcache_range((u32)rbuf - 2, (u32)rbuf + length);
+#else
+			invalidate_dcache_range((u32)rbuf, (u32)rbuf + length);
+#endif
+			rtskb_trim(skb, length);
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			skb->time_stamp = time_stamp;
+			rtnetif_rx(skb);
+			packets++;
+#ifdef ORIGINAL_CODE
+			dev->last_rx = jiffies;
+#endif /* ORIGINAL_CODE */
+			rx_fifo_skb[mpc5xxx_bdi_rx] = nskb;
+		}
+
+		/* Assign new socket buffer to BD*/
+		bdi_a = TaskBDAssign(priv->r_tasknum, (void*)virt_to_phys((void *)&nrbuf->data),
+				     0, sizeof *nrbuf, MPC5xxx_FEC_RBD_INIT);
+
+#ifdef PARANOID_CHECKS
+		/* check for errors during assignment*/
+		if((bdi_a<0)||(bdi_r>=MPC5xxx_FEC_RBD_NUM))
+			panic("mpc5xxx_fec_receive_interrupt: error while TaskBDAssign, err=%i\n",(int)bdi_a);
+
+		/* check if Assign/Release sequence numbers are ok*/
+		if(((bdi_a+1)%MPC5xxx_FEC_RBD_NUM) != bdi_r)
+			panic("bdi_a+1 != bdi_r: %i %i\n",(int)((bdi_a+1)%MPC5xxx_FEC_RBD_NUM),(int)bdi_r);
+#endif
+
+		mpc5xxx_bdi_rx = bdi_r;
+
+#ifdef EXIT_ISR_AT_MEMORY_SQUEEZE
+		/* if we couldn't get memory for a new socket buffer, then it doesn't*/
+		/* make sense to proceed.*/
+		if (discard==3)
+			break;
+#endif
+
+	}
+
+#ifdef DISPLAY_WARNINGS
+	if(dropped) {
+		printk("%s: Memory squeeze, dropped %i packets\n",dev->name,dropped);
+	}
+#endif
+	TaskStart(priv->r_tasknum, TASK_AUTOSTART_ENABLE, priv->r_tasknum, TASK_INTERRUPT_ENABLE);
+
+	if (packets > 0)
+		rt_mark_stack_mgr(dev);
+	return RTDM_IRQ_HANDLED;
+}
+
+
+static void
+mpc5xxx_fec_reinit(struct rtnet_device *dev)
+{
+	int retval;
+	printk("mpc5xxx_fec_reinit\n");
+	mpc5xxx_fec_cleanup(dev,1);
+	retval=mpc5xxx_fec_setup(dev,1);
+	if(retval) panic("reinit failed\n");
+}
+
+
+static int
+mpc5xxx_fec_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	int ievent;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mpc5xxx_fec_interrupt:\n");
+#endif
+
+	ievent = in_be32(&fec->ievent);
+	out_be32(&fec->ievent, ievent);		/* clear pending events */
+
+	if (ievent & (MPC5xxx_FEC_IEVENT_RFIFO_ERROR |
+		      MPC5xxx_FEC_IEVENT_XFIFO_ERROR)) {
+		if (ievent & MPC5xxx_FEC_IEVENT_RFIFO_ERROR)
+			printk(KERN_WARNING "MPC5xxx_FEC_IEVENT_RFIFO_ERROR\n");
+		if (ievent & MPC5xxx_FEC_IEVENT_XFIFO_ERROR)
+			printk(KERN_WARNING "MPC5xxx_FEC_IEVENT_XFIFO_ERROR\n");
+		mpc5xxx_fec_reinit(dev);
+	}
+	else if (ievent & MPC5xxx_FEC_IEVENT_MII) {
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		mpc5xxx_fec_mii(dev);
+#else
+		printk("%s[%d] %s: unexpected MPC5xxx_FEC_IEVENT_MII\n",
+			__FILE__, __LINE__, __FUNCTION__);
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+	}
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int
+mpc5xxx_fec_cleanup(struct rtnet_device *dev, int reinit)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	unsigned long timeout;
+	int i;
+
+	priv->open_time = 0;
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	priv->sequence_done = 0;
+#endif
+
+	rtnetif_stop_queue(dev);
+
+	/* Wait for rx queue to drain */
+	if(!reinit) {
+		timeout = jiffies + 2*HZ;
+		while (TaskBDInUse(priv->t_tasknum) && (jiffies < timeout)) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(HZ/10);
+		}
+	}
+
+	/* Disable FEC interrupts */
+	out_be32(&fec->imask, 0x0);
+
+	/* Stop FEC */
+	out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~0x2);
+
+	/* Disable the rx and tx queues. */
+	TaskStop(priv->r_tasknum);
+	TaskStop(priv->t_tasknum);
+
+	/* Release irqs */
+	if(!reinit) {
+		rtdm_irq_disable(&priv->irq_handle);
+		rtdm_irq_disable(&priv->r_irq_handle);
+		rtdm_irq_disable(&priv->t_irq_handle);
+		rtdm_irq_free(&priv->irq_handle);
+		rtdm_irq_free(&priv->r_irq_handle);
+		rtdm_irq_free(&priv->t_irq_handle);
+		rt_stack_disconnect(dev);
+	}
+
+	/* Free rx Buffers */
+	if(!reinit) {
+		for (i=0; i<MPC5xxx_FEC_RBD_NUM; i++) {
+			dev_kfree_rtskb(rx_fifo_skb[i]);
+		}
+	}
+
+	mpc5xxx_fec_get_stats(dev);
+
+	return 0;
+}
+
+static int
+mpc5xxx_fec_close(struct rtnet_device *dev)
+{
+	int ret = mpc5xxx_fec_cleanup(dev,0);
+	return ret;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *mpc5xxx_fec_get_stats(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct net_device_stats *stats = &priv->stats;
+	struct mpc5xxx_fec *fec = priv->fec;
+
+	stats->rx_bytes = in_be32(&fec->rmon_r_octets);
+	stats->rx_packets = in_be32(&fec->rmon_r_packets);
+	stats->rx_errors = stats->rx_packets - (
+					in_be32(&fec->ieee_r_frame_ok) +
+					in_be32(&fec->rmon_r_mc_pkt));
+	stats->tx_bytes = in_be32(&fec->rmon_t_octets);
+	stats->tx_packets = in_be32(&fec->rmon_t_packets);
+	stats->tx_errors = stats->tx_packets - (
+					in_be32(&fec->ieee_t_frame_ok) +
+					in_be32(&fec->rmon_t_col) +
+					in_be32(&fec->ieee_t_1col) +
+					in_be32(&fec->ieee_t_mcol) +
+					in_be32(&fec->ieee_t_def));
+	stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
+	stats->collisions = in_be32(&fec->rmon_t_col);
+
+	/* detailed rx_errors: */
+	stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+			+ in_be32(&fec->rmon_r_oversize)
+			+ in_be32(&fec->rmon_r_frag)
+			+ in_be32(&fec->rmon_r_jab);
+	stats->rx_over_errors = in_be32(&fec->r_macerr);
+	stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
+	stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
+	stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
+	stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
+
+	/* detailed tx_errors: */
+	stats->tx_aborted_errors = 0;
+	stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
+	stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop) +
+				in_be32(&fec->ieee_t_macerr);
+	stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
+	stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
+
+	return stats;
+}
+
+static void
+mpc5xxx_fec_update_stat(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct net_device_stats *stats = &priv->stats;
+	struct mpc5xxx_fec *fec = priv->fec;
+
+	out_be32(&fec->mib_control, MPC5xxx_FEC_MIB_DISABLE);
+	memset_io(&fec->rmon_t_drop, 0,
+			(u32)&fec->reserved10 - (u32)&fec->rmon_t_drop);
+	out_be32(&fec->mib_control, 0);
+	memset(stats, 0, sizeof *stats);
+	mpc5xxx_fec_get_stats(dev);
+}
+
+#ifdef ORIGINAL_CODE
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+mpc5xxx_fec_set_multicast_list(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	u32 u32_value;
+
+	if (dev->flags & IFF_PROMISC) {
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		u32_value = in_be32(&fec->r_cntrl);
+		u32_value |= MPC5xxx_FEC_RCNTRL_PROM;
+		out_be32(&fec->r_cntrl, u32_value);
+	}
+	else if (dev->flags & IFF_ALLMULTI) {
+		u32_value = in_be32(&fec->r_cntrl);
+		u32_value &= ~MPC5xxx_FEC_RCNTRL_PROM;
+		out_be32(&fec->r_cntrl, u32_value);
+		out_be32(&fec->gaddr1, 0xffffffff);
+		out_be32(&fec->gaddr2, 0xffffffff);
+	}
+	else {
+		u32 crc;
+		int i;
+		struct dev_mc_list *dmi;
+		u32 gaddr1 = 0x00000000;
+		u32 gaddr2 = 0x00000000;
+
+		dmi = dev->mc_list;
+		for (i=0; i<dev->mc_count; i++) {
+			crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+			if (crc >= 32)
+				gaddr1 |= 1 << (crc-32);
+			else
+				gaddr2 |= 1 << crc;
+			dmi = dmi->next;
+		}
+		out_be32(&fec->gaddr1, gaddr1);
+		out_be32(&fec->gaddr2, gaddr2);
+	}
+}
+#endif /* ORIGINAL_CODE */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET
+static void mpc5xxx_mdio_callback(uint regval, struct rtnet_device *dev, uint data)
+{
+	mdio_read_data_t* mrd = (mdio_read_data_t *)data;
+	mrd->regval = 0xFFFF & regval;
+	wake_up_process(mrd->sleeping_task);
+}
+
+static int mpc5xxx_mdio_read(struct rtnet_device *dev, int phy_id, int location)
+{
+	uint retval;
+	mdio_read_data_t* mrd = (mdio_read_data_t *)kmalloc(sizeof(*mrd),
+			GFP_KERNEL);
+
+	mrd->sleeping_task = current;
+	set_current_state(TASK_INTERRUPTIBLE);
+	mii_queue(dev, mk_mii_read(location),
+		mpc5xxx_mdio_callback, (unsigned int) mrd);
+	schedule();
+
+	retval = mrd->regval;
+
+	kfree(mrd);
+
+	return retval;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+static void mpc5xxx_mdio_write(struct rtnet_device *dev, int phy_id, int location, int value)
+{
+	mii_queue(dev, mk_mii_write(location, value), NULL, 0);
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifdef ORIGINAL_CODE
+static int
+mpc5xxx_netdev_ethtool_ioctl(struct rtnet_device *dev, void *useraddr)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	struct mpc5xxx_fec_priv *private = (struct mpc5xxx_fec_priv *)dev->priv;
+#endif
+	u32 ethcmd;
+
+	if (copy_from_user(&ethcmd, useraddr, sizeof ethcmd))
+		return -EFAULT;
+
+	switch (ethcmd) {
+
+		/* Get driver info */
+	case ETHTOOL_GDRVINFO:{
+			struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+			strncpy(info.driver, "gt64260",
+				sizeof info.driver - 1);
+			strncpy(info.version, version,
+				sizeof info.version - 1);
+			if (copy_to_user(useraddr, &info, sizeof info))
+				return -EFAULT;
+			return 0;
+		}
+		/* get settings */
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	case ETHTOOL_GSET:{
+			struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+			spin_lock_irq(&private->lock);
+			mii_ethtool_gset(&private->mii_if, &ecmd);
+			spin_unlock_irq(&private->lock);
+			if (copy_to_user(useraddr, &ecmd, sizeof ecmd))
+				return -EFAULT;
+			return 0;
+		}
+		/* set settings */
+	case ETHTOOL_SSET:{
+			int r;
+			struct ethtool_cmd ecmd;
+			if (copy_from_user(&ecmd, useraddr, sizeof ecmd))
+				return -EFAULT;
+			spin_lock_irq(&private->lock);
+			r = mii_ethtool_sset(&private->mii_if, &ecmd);
+			spin_unlock_irq(&private->lock);
+			return r;
+		}
+		/* restart autonegotiation */
+	case ETHTOOL_NWAY_RST:{
+			return mii_nway_restart(&private->mii_if);
+		}
+		/* get link status */
+	case ETHTOOL_GLINK:{
+			struct ethtool_value edata = { ETHTOOL_GLINK };
+			edata.data = mii_link_ok(&private->mii_if);
+			if (copy_to_user(useraddr, &edata, sizeof edata))
+				return -EFAULT;
+			return 0;
+		}
+#endif
+		/* get message-level */
+	case ETHTOOL_GMSGLVL:{
+			struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+			edata.data = 0;	/* XXX */
+			if (copy_to_user(useraddr, &edata, sizeof edata))
+				return -EFAULT;
+			return 0;
+		}
+		/* set message-level */
+	case ETHTOOL_SMSGLVL:{
+			struct ethtool_value edata;
+			if (copy_from_user(&edata, useraddr, sizeof edata))
+				return -EFAULT;
+/* debug = edata.data; *//* XXX */
+			return 0;
+		}
+	}
+	return -EOPNOTSUPP;
+}
+
+static int
+mpc5xxx_fec_ioctl(struct rtnet_device *dev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data;
+	int phy = dev->base_addr & 0x1f;
+#endif
+	int retval;
+
+	switch (cmd) {
+	case SIOCETHTOOL:
+		retval = mpc5xxx_netdev_ethtool_ioctl(
+					dev, (void *) rq->ifr_data);
+		break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
+	case SIOCDEVPRIVATE:	/* for binary compat, remove in 2.5 */
+		data->phy_id = phy;
+		/* Fall through */
+
+	case SIOCGMIIREG:	/* Read MII PHY register. */
+	case SIOCDEVPRIVATE + 1:	/* for binary compat, remove in 2.5 */
+		data->val_out =
+			mpc5xxx_mdio_read(dev, data->phy_id&0x1f,
+				data->reg_num&0x1f);
+		retval = 0;
+		break;
+
+	case SIOCSMIIREG:	/* Write MII PHY register. */
+	case SIOCDEVPRIVATE + 2:	/* for binary compat, remove in 2.5 */
+		if (!capable(CAP_NET_ADMIN)) {
+			retval = -EPERM;
+		} else {
+			mpc5xxx_mdio_write(dev, data->phy_id & 0x1f,
+				data->reg_num & 0x1f, data->val_in);
+			retval = 0;
+		}
+		break;
+#endif
+
+	default:
+		retval = -EOPNOTSUPP;
+		break;
+	}
+	return retval;
+}
+
+static void __init
+mpc5xxx_fec_str2mac(char *str, unsigned char *mac)
+{
+	int i;
+	u64 val64;
+
+	val64 = simple_strtoull(str, NULL, 16);
+
+	for (i = 0; i < 6; i++)
+		mac[5-i] = val64 >> (i*8);
+}
+
+static int __init
+mpc5xxx_fec_mac_setup(char *mac_address)
+{
+	mpc5xxx_fec_str2mac(mac_address, mpc5xxx_fec_mac_addr);
+	return 0;
+}
+
+__setup("mpc5xxx_mac=", mpc5xxx_fec_mac_setup);
+#endif /* ORIGINAL_CODE */
+
+static int __init
+mpc5xxx_fec_init(void)
+{
+	struct mpc5xxx_fec *fec;
+	struct rtnet_device *dev;
+	struct mpc5xxx_fec_priv *priv;
+	int err = 0;
+
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("mpc5xxx_fec_init\n");
+#endif
+
+	if (!rx_pool_size)
+		rx_pool_size = MPC5xxx_FEC_RBD_NUM * 2;
+
+	dev = rt_alloc_etherdev(sizeof(*priv), rx_pool_size + MPC5xxx_FEC_TBD_NUM);
+	if (!dev)
+		return -EIO;
+	rtdev_alloc_name(dev, "rteth%d");
+	memset(dev->priv, 0, sizeof(*priv));
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+
+
+	mpc5xxx_fec_dev = dev;
+	priv = (struct mpc5xxx_fec_priv *)dev->priv;
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("fec_priv %08x\n", (u32)priv);
+#endif
+	priv->fec = fec = (struct mpc5xxx_fec *)MPC5xxx_FEC;
+	priv->gpio = (struct mpc5xxx_gpio *)MPC5xxx_GPIO;
+	priv->sdma = (struct mpc5xxx_sdma *)MPC5xxx_SDMA;
+
+	rtdm_lock_init(&priv->lock);
+	dev->open		= mpc5xxx_fec_open;
+	dev->stop		= mpc5xxx_fec_close;
+	dev->hard_start_xmit	= mpc5xxx_fec_hard_start_xmit;
+	//FIXME dev->hard_header	= &rt_eth_header;
+	dev->get_stats		= mpc5xxx_fec_get_stats;
+#ifdef ORIGINAL_CODE
+	dev->do_ioctl		= mpc5xxx_fec_ioctl;
+	dev->set_mac_address	= mpc5xxx_fec_set_mac_address;
+	dev->set_multicast_list = mpc5xxx_fec_set_multicast_list;
+
+	dev->tx_timeout		= mpc5xxx_fec_tx_timeout;
+	dev->watchdog_timeo	= MPC5xxx_FEC_WATCHDOG_TIMEOUT;
+#endif /* ORIGINAL_CODE */
+	dev->flags &= ~IFF_RUNNING;
+
+	if ((err = rt_register_rtnetdev(dev)))
+		goto abort;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FASTROUTE
+	dev->accept_fastpath = mpc5xxx_fec_accept_fastpath;
+#endif
+	if (memcmp(mpc5xxx_fec_mac_addr, null_mac, 6) != 0)
+		memcpy(dev->dev_addr, mpc5xxx_fec_mac_addr, 6);
+	else {
+		*(u32 *)&dev->dev_addr[0] = in_be32(&fec->paddr1);
+		*(u16 *)&dev->dev_addr[4] = in_be16((u16*)&fec->paddr2);
+	}
+
+	/*
+	 * Read MIB counters in order to reset them,
+	 * then zero all the stats fields in memory
+	 */
+	mpc5xxx_fec_update_stat(dev);
+
+	return 0;
+
+abort:
+	rtdev_free(dev);
+
+	return err;
+}
+
+static void __exit
+mpc5xxx_fec_uninit(void)
+{
+	struct rtnet_device *dev = mpc5xxx_fec_dev;
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+
+	rt_stack_disconnect(dev);
+	rt_unregister_rtnetdev(dev);
+	rt_rtdev_disconnect(dev);
+	printk("%s: unloaded\n", dev->name);
+	rtdev_free(dev);
+	dev->priv = NULL;
+}
+
+static int __init
+mpc5xxx_fec_module_init(void)
+{
+	return mpc5xxx_fec_init();
+}
+
+static void __exit
+mpc5xxx_fec_module_exit(void)
+{
+	mpc5xxx_fec_uninit();
+}
+
+module_init(mpc5xxx_fec_module_init);
+module_exit(mpc5xxx_fec_module_exit);
+++ linux-patched/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h	2022-03-21 12:58:29.452888347 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * arch/ppc/5xxx_io/fec.h
+ *
+ * Header file for the MPC5xxx Fast Ethernet Controller driver
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * Copyright 2003 MontaVista Software
+ *
+ * 2003 (c) MontaVista, Software, Inc.  This file is licensed under the terms
+ * of the GNU General Public License version 2.  This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#ifndef __RT_MPC52XX_FEC_H_
+#define __RT_MPC52XX_FEC_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/skbuff.h>
+#include <asm/mpc5xxx.h>
+#include <bestcomm_api.h>
+
+/* Define board specific options */
+#define CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#define CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY
+#define CONFIG_XENO_DRIVERS_NET_FEC_LXT971
+#undef CONFIG_XENO_DRIVERS_NET_FEC_DP83847
+
+/* Tunable constants */
+#define MPC5xxx_FEC_RECV_BUFFER_SIZE	1518	/* max receive packet size */
+#define MPC5xxx_FEC_RECV_BUFFER_SIZE_BC 2048	/* max receive packet size */
+#define MPC5xxx_FEC_TBD_NUM		256	/* max transmit packets */
+#define MPC5xxx_FEC_RBD_NUM		256	/* max receive packets */
+
+struct mpc5xxx_fec {
+	volatile u32 fec_id;			/* FEC + 0x000 */
+	volatile u32 ievent;			/* FEC + 0x004 */
+	volatile u32 imask;			/* FEC + 0x008 */
+
+	volatile u32 reserved0[1];		/* FEC + 0x00C */
+	volatile u32 r_des_active;		/* FEC + 0x010 */
+	volatile u32 x_des_active;		/* FEC + 0x014 */
+	volatile u32 r_des_active_cl;		/* FEC + 0x018 */
+	volatile u32 x_des_active_cl;		/* FEC + 0x01C */
+	volatile u32 ivent_set;			/* FEC + 0x020 */
+	volatile u32 ecntrl;			/* FEC + 0x024 */
+
+	volatile u32 reserved1[6];		/* FEC + 0x028-03C */
+	volatile u32 mii_data;			/* FEC + 0x040 */
+	volatile u32 mii_speed;			/* FEC + 0x044 */
+	volatile u32 mii_status;		/* FEC + 0x048 */
+
+	volatile u32 reserved2[5];		/* FEC + 0x04C-05C */
+	volatile u32 mib_data;			/* FEC + 0x060 */
+	volatile u32 mib_control;		/* FEC + 0x064 */
+
+	volatile u32 reserved3[6];		/* FEC + 0x068-7C */
+	volatile u32 r_activate;		/* FEC + 0x080 */
+	volatile u32 r_cntrl;			/* FEC + 0x084 */
+	volatile u32 r_hash;			/* FEC + 0x088 */
+	volatile u32 r_data;			/* FEC + 0x08C */
+	volatile u32 ar_done;			/* FEC + 0x090 */
+	volatile u32 r_test;			/* FEC + 0x094 */
+	volatile u32 r_mib;			/* FEC + 0x098 */
+	volatile u32 r_da_low;			/* FEC + 0x09C */
+	volatile u32 r_da_high;			/* FEC + 0x0A0 */
+
+	volatile u32 reserved4[7];		/* FEC + 0x0A4-0BC */
+	volatile u32 x_activate;		/* FEC + 0x0C0 */
+	volatile u32 x_cntrl;			/* FEC + 0x0C4 */
+	volatile u32 backoff;			/* FEC + 0x0C8 */
+	volatile u32 x_data;			/* FEC + 0x0CC */
+	volatile u32 x_status;			/* FEC + 0x0D0 */
+	volatile u32 x_mib;			/* FEC + 0x0D4 */
+	volatile u32 x_test;			/* FEC + 0x0D8 */
+	volatile u32 fdxfc_da1;			/* FEC + 0x0DC */
+	volatile u32 fdxfc_da2;			/* FEC + 0x0E0 */
+	volatile u32 paddr1;			/* FEC + 0x0E4 */
+	volatile u32 paddr2;			/* FEC + 0x0E8 */
+	volatile u32 op_pause;			/* FEC + 0x0EC */
+
+	volatile u32 reserved5[4];		/* FEC + 0x0F0-0FC */
+	volatile u32 instr_reg;			/* FEC + 0x100 */
+	volatile u32 context_reg;		/* FEC + 0x104 */
+	volatile u32 test_cntrl;		/* FEC + 0x108 */
+	volatile u32 acc_reg;			/* FEC + 0x10C */
+	volatile u32 ones;			/* FEC + 0x110 */
+	volatile u32 zeros;			/* FEC + 0x114 */
+	volatile u32 iaddr1;			/* FEC + 0x118 */
+	volatile u32 iaddr2;			/* FEC + 0x11C */
+	volatile u32 gaddr1;			/* FEC + 0x120 */
+	volatile u32 gaddr2;			/* FEC + 0x124 */
+	volatile u32 random;			/* FEC + 0x128 */
+	volatile u32 rand1;			/* FEC + 0x12C */
+	volatile u32 tmp;			/* FEC + 0x130 */
+
+	volatile u32 reserved6[3];		/* FEC + 0x134-13C */
+	volatile u32 fifo_id;			/* FEC + 0x140 */
+	volatile u32 x_wmrk;			/* FEC + 0x144 */
+	volatile u32 fcntrl;			/* FEC + 0x148 */
+	volatile u32 r_bound;			/* FEC + 0x14C */
+	volatile u32 r_fstart;			/* FEC + 0x150 */
+	volatile u32 r_count;			/* FEC + 0x154 */
+	volatile u32 r_lag;			/* FEC + 0x158 */
+	volatile u32 r_read;			/* FEC + 0x15C */
+	volatile u32 r_write;			/* FEC + 0x160 */
+	volatile u32 x_count;			/* FEC + 0x164 */
+	volatile u32 x_lag;			/* FEC + 0x168 */
+	volatile u32 x_retry;			/* FEC + 0x16C */
+	volatile u32 x_write;			/* FEC + 0x170 */
+	volatile u32 x_read;			/* FEC + 0x174 */
+
+	volatile u32 reserved7[2];		/* FEC + 0x178-17C */
+	volatile u32 fm_cntrl;			/* FEC + 0x180 */
+	volatile u32 rfifo_data;		/* FEC + 0x184 */
+	volatile u32 rfifo_status;		/* FEC + 0x188 */
+	volatile u32 rfifo_cntrl;		/* FEC + 0x18C */
+	volatile u32 rfifo_lrf_ptr;		/* FEC + 0x190 */
+	volatile u32 rfifo_lwf_ptr;		/* FEC + 0x194 */
+	volatile u32 rfifo_alarm;		/* FEC + 0x198 */
+	volatile u32 rfifo_rdptr;		/* FEC + 0x19C */
+	volatile u32 rfifo_wrptr;		/* FEC + 0x1A0 */
+	volatile u32 tfifo_data;		/* FEC + 0x1A4 */
+	volatile u32 tfifo_status;		/* FEC + 0x1A8 */
+	volatile u32 tfifo_cntrl;		/* FEC + 0x1AC */
+	volatile u32 tfifo_lrf_ptr;		/* FEC + 0x1B0 */
+	volatile u32 tfifo_lwf_ptr;		/* FEC + 0x1B4 */
+	volatile u32 tfifo_alarm;		/* FEC + 0x1B8 */
+	volatile u32 tfifo_rdptr;		/* FEC + 0x1BC */
+	volatile u32 tfifo_wrptr;		/* FEC + 0x1C0 */
+
+	volatile u32 reset_cntrl;		/* FEC + 0x1C4 */
+	volatile u32 xmit_fsm;			/* FEC + 0x1C8 */
+
+	volatile u32 reserved8[3];		/* FEC + 0x1CC-1D4 */
+	volatile u32 rdes_data0;		/* FEC + 0x1D8 */
+	volatile u32 rdes_data1;		/* FEC + 0x1DC */
+	volatile u32 r_length;			/* FEC + 0x1E0 */
+	volatile u32 x_length;			/* FEC + 0x1E4 */
+	volatile u32 x_addr;			/* FEC + 0x1E8 */
+	volatile u32 cdes_data;			/* FEC + 0x1EC */
+	volatile u32 status;			/* FEC + 0x1F0 */
+	volatile u32 dma_control;		/* FEC + 0x1F4 */
+	volatile u32 des_cmnd;			/* FEC + 0x1F8 */
+	volatile u32 data;			/* FEC + 0x1FC */
+
+	volatile u32 rmon_t_drop;		/* FEC + 0x200 */
+	volatile u32 rmon_t_packets;		/* FEC + 0x204 */
+	volatile u32 rmon_t_bc_pkt;		/* FEC + 0x208 */
+	volatile u32 rmon_t_mc_pkt;		/* FEC + 0x20C */
+	volatile u32 rmon_t_crc_align;		/* FEC + 0x210 */
+	volatile u32 rmon_t_undersize;		/* FEC + 0x214 */
+	volatile u32 rmon_t_oversize;		/* FEC + 0x218 */
+	volatile u32 rmon_t_frag;		/* FEC + 0x21C */
+	volatile u32 rmon_t_jab;		/* FEC + 0x220 */
+	volatile u32 rmon_t_col;		/* FEC + 0x224 */
+	volatile u32 rmon_t_p64;		/* FEC + 0x228 */
+	volatile u32 rmon_t_p65to127;		/* FEC + 0x22C */
+	volatile u32 rmon_t_p128to255;		/* FEC + 0x230 */
+	volatile u32 rmon_t_p256to511;		/* FEC + 0x234 */
+	volatile u32 rmon_t_p512to1023;		/* FEC + 0x238 */
+	volatile u32 rmon_t_p1024to2047;	/* FEC + 0x23C */
+	volatile u32 rmon_t_p_gte2048;		/* FEC + 0x240 */
+	volatile u32 rmon_t_octets;		/* FEC + 0x244 */
+	volatile u32 ieee_t_drop;		/* FEC + 0x248 */
+	volatile u32 ieee_t_frame_ok;		/* FEC + 0x24C */
+	volatile u32 ieee_t_1col;		/* FEC + 0x250 */
+	volatile u32 ieee_t_mcol;		/* FEC + 0x254 */
+	volatile u32 ieee_t_def;		/* FEC + 0x258 */
+	volatile u32 ieee_t_lcol;		/* FEC + 0x25C */
+	volatile u32 ieee_t_excol;		/* FEC + 0x260 */
+	volatile u32 ieee_t_macerr;		/* FEC + 0x264 */
+	volatile u32 ieee_t_cserr;		/* FEC + 0x268 */
+	volatile u32 ieee_t_sqe;		/* FEC + 0x26C */
+	volatile u32 t_fdxfc;			/* FEC + 0x270 */
+	volatile u32 ieee_t_octets_ok;		/* FEC + 0x274 */
+
+	volatile u32 reserved9[2];		/* FEC + 0x278-27C */
+	volatile u32 rmon_r_drop;		/* FEC + 0x280 */
+	volatile u32 rmon_r_packets;		/* FEC + 0x284 */
+	volatile u32 rmon_r_bc_pkt;		/* FEC + 0x288 */
+	volatile u32 rmon_r_mc_pkt;		/* FEC + 0x28C */
+	volatile u32 rmon_r_crc_align;		/* FEC + 0x290 */
+	volatile u32 rmon_r_undersize;		/* FEC + 0x294 */
+	volatile u32 rmon_r_oversize;		/* FEC + 0x298 */
+	volatile u32 rmon_r_frag;		/* FEC + 0x29C */
+	volatile u32 rmon_r_jab;		/* FEC + 0x2A0 */
+
+	volatile u32 rmon_r_resvd_0;		/* FEC + 0x2A4 */
+
+	volatile u32 rmon_r_p64;		/* FEC + 0x2A8 */
+	volatile u32 rmon_r_p65to127;		/* FEC + 0x2AC */
+	volatile u32 rmon_r_p128to255;		/* FEC + 0x2B0 */
+	volatile u32 rmon_r_p256to511;		/* FEC + 0x2B4 */
+	volatile u32 rmon_r_p512to1023;		/* FEC + 0x2B8 */
+	volatile u32 rmon_r_p1024to2047;	/* FEC + 0x2BC */
+	volatile u32 rmon_r_p_gte2048;		/* FEC + 0x2C0 */
+	volatile u32 rmon_r_octets;		/* FEC + 0x2C4 */
+	volatile u32 ieee_r_drop;		/* FEC + 0x2C8 */
+	volatile u32 ieee_r_frame_ok;		/* FEC + 0x2CC */
+	volatile u32 ieee_r_crc;		/* FEC + 0x2D0 */
+	volatile u32 ieee_r_align;		/* FEC + 0x2D4 */
+	volatile u32 r_macerr;			/* FEC + 0x2D8 */
+	volatile u32 r_fdxfc;			/* FEC + 0x2DC */
+	volatile u32 ieee_r_octets_ok;		/* FEC + 0x2E0 */
+
+	volatile u32 reserved10[6];		/* FEC + 0x2E4-2FC */
+
+	volatile u32 reserved11[64];		/* FEC + 0x300-3FF */
+};
+
+#define MPC5xxx_FEC_MIB_DISABLE			0x80000000
+
+#define MPC5xxx_FEC_IEVENT_HBERR		0x80000000
+#define MPC5xxx_FEC_IEVENT_BABR			0x40000000
+#define MPC5xxx_FEC_IEVENT_BABT			0x20000000
+#define MPC5xxx_FEC_IEVENT_GRA			0x10000000
+#define MPC5xxx_FEC_IEVENT_TFINT		0x08000000
+#define MPC5xxx_FEC_IEVENT_MII			0x00800000
+#define MPC5xxx_FEC_IEVENT_LATE_COL		0x00200000
+#define MPC5xxx_FEC_IEVENT_COL_RETRY_LIM	0x00100000
+#define MPC5xxx_FEC_IEVENT_XFIFO_UN		0x00080000
+#define MPC5xxx_FEC_IEVENT_XFIFO_ERROR		0x00040000
+#define MPC5xxx_FEC_IEVENT_RFIFO_ERROR		0x00020000
+
+#define MPC5xxx_FEC_IMASK_HBERR			0x80000000
+#define MPC5xxx_FEC_IMASK_BABR			0x40000000
+#define MPC5xxx_FEC_IMASK_BABT			0x20000000
+#define MPC5xxx_FEC_IMASK_GRA			0x10000000
+#define MPC5xxx_FEC_IMASK_MII			0x00800000
+#define MPC5xxx_FEC_IMASK_LATE_COL		0x00200000
+#define MPC5xxx_FEC_IMASK_COL_RETRY_LIM		0x00100000
+#define MPC5xxx_FEC_IMASK_XFIFO_UN		0x00080000
+#define MPC5xxx_FEC_IMASK_XFIFO_ERROR		0x00040000
+#define MPC5xxx_FEC_IMASK_RFIFO_ERROR		0x00020000
+
+#define MPC5xxx_FEC_RCNTRL_MAX_FL_SHIFT		16
+#define MPC5xxx_FEC_RCNTRL_LOOP			0x01
+#define MPC5xxx_FEC_RCNTRL_DRT			0x02
+#define MPC5xxx_FEC_RCNTRL_MII_MODE		0x04
+#define MPC5xxx_FEC_RCNTRL_PROM			0x08
+#define MPC5xxx_FEC_RCNTRL_BC_REJ		0x10
+#define MPC5xxx_FEC_RCNTRL_FCE			0x20
+
+#define MPC5xxx_FEC_TCNTRL_GTS			0x00000001
+#define MPC5xxx_FEC_TCNTRL_HBC			0x00000002
+#define MPC5xxx_FEC_TCNTRL_FDEN			0x00000004
+#define MPC5xxx_FEC_TCNTRL_TFC_PAUSE		0x00000008
+#define MPC5xxx_FEC_TCNTRL_RFC_PAUSE		0x00000010
+
+#define MPC5xxx_FEC_ECNTRL_RESET		0x00000001
+#define MPC5xxx_FEC_ECNTRL_ETHER_EN		0x00000002
+
+#define MPC5xxx_FEC_RESET_DELAY			50 /* uS */
+
+
+/* Receive & Transmit Buffer Descriptor definitions */
+struct mpc5xxx_fec_bd {
+	volatile u32 status;
+	volatile u32 data;
+};
+
+/* Receive data buffer format */
+struct mpc5xxx_rbuf {
+	u8 data[MPC5xxx_FEC_RECV_BUFFER_SIZE_BC];
+};
+
+struct fec_queue {
+	volatile struct mpc5xxx_fec_bd *bd_base;
+	struct rtskb **skb_base;
+	u16 last_index;
+	u16 start_index;
+	u16 finish_index;
+};
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#define MII_ADVERTISE_HALF	(ADVERTISE_100HALF | ADVERTISE_10HALF | \
+				 ADVERTISE_CSMA)
+
+#define MII_ADVERTISE_ALL	(ADVERTISE_100FULL | ADVERTISE_10FULL | \
+				 MII_ADVERTISE_HALF)
+#ifdef PHY_INTERRUPT
+#define MII_ADVERTISE_DEFAULT   MII_ADVERTISE_ALL
+#else
+#define MII_ADVERTISE_DEFAULT   MII_ADVERTISE_HALF
+#endif
+
+typedef struct {
+	uint mii_data;
+	void (*funct)(uint mii_reg, struct rtnet_device *dev, uint data);
+} phy_cmd_t;
+
+typedef struct {
+	uint id;
+	char *name;
+
+	const phy_cmd_t *config;
+	const phy_cmd_t *startup;
+	const phy_cmd_t *ack_int;
+	const phy_cmd_t *shutdown;
+} phy_info_t;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+struct mpc5xxx_fec_priv {
+	int full_duplex;
+	int tx_full;
+	int r_tasknum;
+	int t_tasknum;
+	int r_irq;
+	int t_irq;
+	rtdm_irq_t irq_handle;
+	rtdm_irq_t r_irq_handle;
+	rtdm_irq_t t_irq_handle;
+	u32 last_transmit_time;
+	u32 last_receive_time;
+	struct mpc5xxx_fec *fec;
+	struct mpc5xxx_sram_fec *sram;
+	struct mpc5xxx_gpio *gpio;
+	struct mpc5xxx_sdma *sdma;
+	struct fec_queue r_queue;
+	struct rtskb *rskb[MPC5xxx_FEC_RBD_NUM];
+	struct fec_queue t_queue;
+	struct rtskb *tskb[MPC5xxx_FEC_TBD_NUM];
+	rtdm_lock_t lock;
+	unsigned long open_time;
+	struct net_device_stats stats;
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	uint phy_id;
+	uint phy_id_done;
+	uint phy_status;
+	uint phy_speed;
+	phy_info_t *phy;
+	struct tq_struct phy_task;
+	volatile uint sequence_done;
+	uint link;
+	uint phy_addr;
+
+	struct tq_struct link_up_task;
+	int duplex_change;
+	int link_up;
+
+	struct timer_list phy_timer_list;
+	u16 old_status;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+};
+
+struct mpc5xxx_sram_fec {
+	volatile struct mpc5xxx_fec_bd tbd[MPC5xxx_FEC_TBD_NUM];
+	volatile struct mpc5xxx_fec_bd rbd[MPC5xxx_FEC_RBD_NUM];
+};
+
+#define MPC5xxx_FEC_RBD_READY	0x40000000
+#define MPC5xxx_FEC_RBD_RFD	0x08000000	/* receive frame done */
+
+#define MPC5xxx_FEC_RBD_INIT	MPC5xxx_FEC_RBD_READY
+
+#define MPC5xxx_FEC_TBD_READY	0x40000000
+#define MPC5xxx_FEC_TBD_TFD	0x08000000	/* transmit frame done */
+#define MPC5xxx_FEC_TBD_INT	0x04000000	/* Interrupt */
+
+#define MPC5xxx_FEC_TBD_INIT	(MPC5xxx_FEC_TBD_INT | MPC5xxx_FEC_TBD_TFD | \
+				 MPC5xxx_FEC_TBD_READY)
+
+
+
+/* MII-related definitions */
+#define MPC5xxx_FEC_MII_DATA_ST		0x40000000	/* Start frame */
+#define MPC5xxx_FEC_MII_DATA_OP_RD	0x20000000	/* Perform read */
+#define MPC5xxx_FEC_MII_DATA_OP_WR	0x10000000	/* Perform write */
+#define MPC5xxx_FEC_MII_DATA_PA_MSK	0x0f800000	/* PHY Address mask */
+#define MPC5xxx_FEC_MII_DATA_RA_MSK	0x007c0000	/* PHY Register mask */
+#define MPC5xxx_FEC_MII_DATA_TA		0x00020000	/* Turnaround */
+#define MPC5xxx_FEC_MII_DATA_DATAMSK	0x00000fff	/* PHY data mask */
+
+#define MPC5xxx_FEC_MII_DATA_RA_SHIFT	0x12		/* MII reg addr bits */
+#define MPC5xxx_FEC_MII_DATA_PA_SHIFT	0x17		/* MII PHY addr bits */
+
+#define MPC5xxx_FEC_MII_SPEED		(5 * 2)
+
+const char mpc5xxx_fec_name[] = "eth0";
+
+struct mibCounters {
+	unsigned int byteReceived;
+	unsigned int byteSent;
+	unsigned int framesReceived;
+	unsigned int framesSent;
+	unsigned int totalByteReceived;
+	unsigned int totalFramesReceived;
+	unsigned int broadcastFramesReceived;
+	unsigned int multicastFramesReceived;
+	unsigned int cRCError;
+	unsigned int oversizeFrames;
+	unsigned int fragments;
+	unsigned int jabber;
+	unsigned int collision;
+	unsigned int lateCollision;
+	unsigned int frames64;
+	unsigned int frames65_127;
+	unsigned int frames128_255;
+	unsigned int frames256_511;
+	unsigned int frames512_1023;
+	unsigned int frames1024_MaxSize;
+	unsigned int macRxError;
+	unsigned int droppedFrames;
+	unsigned int outMulticastFrames;
+	unsigned int outBroadcastFrames;
+	unsigned int undersizeFrames;
+};
+
+#define MPC5xxx_FEC_WATCHDOG_TIMEOUT  ((400*HZ)/1000)
+
+
+#define MPC5xxx_FEC_FRAME_LAST		0x08000000	/* Last */
+#define MPC5xxx_FEC_FRAME_M		0x01000000	/* M? */
+#define MPC5xxx_FEC_FRAME_BC		0x00800000	/* Broadcast */
+#define MPC5xxx_FEC_FRAME_MC		0x00400000	/* Multicast */
+#define MPC5xxx_FEC_FRAME_LG		0x00200000	/* Length error */
+#define MPC5xxx_FEC_FRAME_NO		0x00100000	/* Non-octet aligned frame error */
+#define MPC5xxx_FEC_FRAME_CR		0x00040000	/* CRC frame error */
+#define MPC5xxx_FEC_FRAME_OV		0x00020000	/* Overrun error */
+#define MPC5xxx_FEC_FRAME_TR		0x00010000	/* Truncated error */
+
+
+
+#endif	/* __RT_MPC52XX_FEC_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile	2022-03-21 12:58:29.447888396 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_phy.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MPC52XX_FEC) += rt_mpc52xx_fec.o
+
+rt_mpc52xx_fec-y := mpc52xx_fec.o
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_phy.c	2022-03-21 12:58:29.442888445 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/igb.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+
+static s32  igb_phy_setup_autoneg(struct e1000_hw *hw);
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+					     u16 *phy_ctrl);
+static s32  igb_wait_autoneg(struct e1000_hw *hw);
+static s32  igb_set_master_slave_mode(struct e1000_hw *hw);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+	(sizeof(e1000_m88_cable_length_table) / \
+	sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+	0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+	6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+	21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+	40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+	60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+	83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+	104, 109, 114, 118, 121, 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+	(sizeof(e1000_igp_2_cable_length_table) / \
+	 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  igb_check_reset_block - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return 0, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 igb_check_reset_block(struct e1000_hw *hw)
+{
+	u32 manc;
+
+	manc = rd32(E1000_MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ *  igb_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 igb_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_id;
+
+	ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id = (u32)(phy_id << 16);
+	udelay(20);
+	ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+	phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_reset_dsp - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.write_reg))
+		goto out;
+
+	ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		goto out;
+
+	ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control regsiter in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		hw_dbg("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/* Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_READ));
+
+	wr32(E1000_MDIC, mdic);
+
+	/* Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = rd32(E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		hw_dbg("MDI Read did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		hw_dbg("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	*data = (u16) mdic;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		hw_dbg("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/* Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+		(offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_WRITE));
+
+	wr32(E1000_MDIC, mdic);
+
+	/* Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = rd32(E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		hw_dbg("MDI Write did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		hw_dbg("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_i2c - Read PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the i2c interface and stores the
+ *  retrieved information in data.
+ **/
+s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+
+	/* Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+		  (E1000_I2CCMD_OPCODE_READ));
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = rd32(E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/* Need to byte-swap the 16-bit value. */
+	*data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+	return 0;
+}
+
+/**
+ *  igb_write_phy_reg_i2c - Write PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+	u16 phy_data_swapped;
+
+	/* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+	if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+		hw_dbg("PHY I2C Address %d is out of range.\n",
+			  hw->phy.addr);
+		return -E1000_ERR_CONFIG;
+	}
+
+	/* Swap the data bytes for the I2C interface */
+	phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+	/* Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+		  E1000_I2CCMD_OPCODE_WRITE |
+		  phy_data_swapped);
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = rd32(E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Write did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_read_sfp_data_byte - Reads SFP module data.
+ *  @hw: pointer to the HW structure
+ *  @offset: byte location offset to be read
+ *  @data: read data buffer pointer
+ *
+ *  Reads one byte from SFP module data stored
+ *  in SFP resided EEPROM memory or SFP diagnostic area.
+ *  Function should be called with
+ *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ *  access
+ **/
+s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+	u32 i = 0;
+	u32 i2ccmd = 0;
+	u32 data_local = 0;
+
+	if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+		hw_dbg("I2CCMD command address exceeds upper limit\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/* Set up Op-code, EEPROM Address,in the I2CCMD
+	 * register. The MAC will take care of interfacing with the
+	 * EEPROM to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+		  E1000_I2CCMD_OPCODE_READ);
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		data_local = rd32(E1000_I2CCMD);
+		if (data_local & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(data_local & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (data_local & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+	*data = (u8) data_local & 0xFF;
+
+	return 0;
+}
+
+/**
+ *  igb_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.acquire))
+		goto out;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = igb_write_phy_reg_mdic(hw,
+						 IGP01E1000_PHY_PAGE_SELECT,
+						 (u16)offset);
+		if (ret_val) {
+			hw->phy.ops.release(hw);
+			goto out;
+		}
+	}
+
+	ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.acquire))
+		goto out;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = igb_write_phy_reg_mdic(hw,
+						 IGP01E1000_PHY_PAGE_SELECT,
+						 (u16)offset);
+		if (ret_val) {
+			hw->phy.ops.release(hw);
+			goto out;
+		}
+	}
+
+	ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					 data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	if (phy->type == e1000_phy_82580) {
+		ret_val = hw->phy.ops.reset(hw);
+		if (ret_val) {
+			hw_dbg("Error resetting the PHY.\n");
+			goto out;
+		}
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
+
+	/* Enable downshift */
+	phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
+
+	ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Set MDI/MDIX mode */
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		goto out;
+	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+	/* Options:
+	 *   0 - Auto (default)
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 */
+	switch (hw->phy.mdix) {
+	case 1:
+		break;
+	case 2:
+		phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+		break;
+	case 0:
+	default:
+		phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+		break;
+	}
+	ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/* Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+		break;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/* Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	if (phy->revision < E1000_REVISION_4) {
+		/* Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == E1000_REVISION_2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+				      M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+				     M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+					     phy_data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Commit the changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val) {
+		hw_dbg("Error committing the PHY changes\n");
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ *  Also enables and sets the downshift parameters.
+ **/
+s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (phy->reset_disable)
+		return 0;
+
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		/* M88E1112 does not support this mode) */
+		if (phy->id != M88E1112_E_PHY_ID) {
+			phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+			break;
+		}
+		fallthrough;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/* Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	/* Enable downshift and setting it to X6 */
+	if (phy->id == M88E1543_E_PHY_ID) {
+		phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+		ret_val =
+		    phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = igb_phy_sw_reset(hw);
+		if (ret_val) {
+			hw_dbg("Error committing the PHY changes\n");
+			return ret_val;
+		}
+	}
+
+	phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+	phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+	phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Commit the changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val) {
+		hw_dbg("Error committing the PHY changes\n");
+		return ret_val;
+	}
+	ret_val = igb_set_master_slave_mode(hw);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/**
+ *  igb_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	ret_val = phy->ops.reset(hw);
+	if (ret_val) {
+		hw_dbg("Error resetting the PHY.\n");
+		goto out;
+	}
+
+	/* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+	 * timeout issues when LFS is enabled.
+	 */
+	msleep(100);
+
+	/* The NVM settings will configure LPLU in D3 for
+	 * non-IGP1 PHYs.
+	 */
+	if (phy->type == e1000_phy_igp) {
+		/* disable lplu d3 during driver init */
+		if (phy->ops.set_d3_lplu_state)
+			ret_val = phy->ops.set_d3_lplu_state(hw, false);
+		if (ret_val) {
+			hw_dbg("Error Disabling LPLU D3\n");
+			goto out;
+		}
+	}
+
+	/* disable lplu d0 during driver init */
+	ret_val = phy->ops.set_d0_lplu_state(hw, false);
+	if (ret_val) {
+		hw_dbg("Error Disabling LPLU D0\n");
+		goto out;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		goto out;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/* when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default.
+		 */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = phy->ops.read_reg(hw,
+						    IGP01E1000_PHY_PORT_CONFIG,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     data);
+			if (ret_val)
+				goto out;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				goto out;
+		}
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	/* Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	hw_dbg("Reconfiguring auto-neg advertisement params\n");
+	ret_val = igb_phy_setup_autoneg(hw);
+	if (ret_val) {
+		hw_dbg("Error Setting up Auto-Negotiation\n");
+		goto out;
+	}
+	hw_dbg("Restarting Auto-Neg\n");
+
+	/* Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	/* Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->autoneg_wait_to_complete) {
+		ret_val = igb_wait_autoneg(hw);
+		if (ret_val) {
+			hw_dbg("Error while waiting for autoneg to complete\n");
+			goto out;
+		}
+	}
+
+	hw->mac.get_link_status = true;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+					    &mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+				 NWAY_AR_100TX_HD_CAPS |
+				 NWAY_AR_10T_FD_CAPS   |
+				 NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+	hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		hw_dbg("Advertise 10mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		hw_dbg("Advertise 10mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		hw_dbg("Advertise 100mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		hw_dbg("Advertise 100mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+		hw_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		hw_dbg("Advertise 1000mb Full duplex\n");
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/* Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          but we do not support receiving pause frames).
+	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *          in the EEPROM is used.
+	 */
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		/* Flow control (RX & TX) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/* RX Flow control is enabled, and TX Flow control is
+		 * disabled, by a software over-ride.
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of RX Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric RX PAUSE.  Later
+		 * (in e1000_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/* TX Flow control is enabled, and RX Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/* Flow control (both RX and TX) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		hw_dbg("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		ret_val = phy->ops.write_reg(hw,
+					     PHY_1000T_CTRL,
+					     mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_setup_copper_link - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 igb_setup_copper_link(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+	if (hw->mac.autoneg) {
+		/* Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = igb_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		hw_dbg("Forcing Speed and Duplex\n");
+		ret_val = hw->phy.ops.force_speed_duplex(hw);
+		if (ret_val) {
+			hw_dbg("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/* Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
+	if (ret_val)
+		goto out;
+
+	if (link) {
+		hw_dbg("Valid link established!!!\n");
+		igb_config_collision_dist(hw);
+		ret_val = igb_config_fc_after_link_up(hw);
+	} else {
+		hw_dbg("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("IGP PSCR: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			hw_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on TX must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	/* I210 and I211 devices support Auto-Crossover in forced operation. */
+	if (phy->type != e1000_phy_i210) {
+		/* Clear Auto-Crossover to force MDI manually.  M88E1000
+		 * requires MDI forced whenever speed and duplex are forced.
+		 */
+		ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+		ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+					     phy_data);
+		if (ret_val)
+			goto out;
+
+		hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+	}
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Reset the phy to commit changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			bool reset_dsp = true;
+
+			switch (hw->phy.id) {
+			case I347AT4_E_PHY_ID:
+			case M88E1112_E_PHY_ID:
+			case I210_I_PHY_ID:
+				reset_dsp = false;
+				break;
+			default:
+				if (hw->phy.type != e1000_phy_m88)
+					reset_dsp = false;
+				break;
+			}
+			if (!reset_dsp)
+				hw_dbg("Link taking longer than expected.\n");
+			else {
+				/* We didn't get link.
+				 * Reset the DSP and cross our fingers.
+				 */
+				ret_val = phy->ops.write_reg(hw,
+						M88E1000_PHY_PAGE_SELECT,
+						0x001d);
+				if (ret_val)
+					goto out;
+				ret_val = igb_phy_reset_dsp(hw);
+				if (ret_val)
+					goto out;
+			}
+		}
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
+					   100000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+	if (hw->phy.type != e1000_phy_m88 ||
+	    hw->phy.id == I347AT4_E_PHY_ID ||
+	    hw->phy.id == M88E1112_E_PHY_ID ||
+	    hw->phy.id == I210_I_PHY_ID)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+					     u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+	/* Turn off flow control when forcing speed/duplex */
+	hw->fc.current_mode = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = rd32(E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+		hw_dbg("Half Duplex\n");
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+		hw_dbg("Full Duplex\n");
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+		hw_dbg("Forcing 100mb\n");
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+		hw_dbg("Forcing 10mb\n");
+	}
+
+	igb_config_collision_dist(hw);
+
+	wr32(E1000_CTRL, ctrl);
+}
+
+/**
+ *  igb_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 data;
+
+	if (!(hw->phy.ops.read_reg))
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+					     data);
+		if (ret_val)
+			goto out;
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = phy->ops.read_reg(hw,
+						    IGP01E1000_PHY_PORT_CONFIG,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = phy->ops.read_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+					      data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+					    &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+					     data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_check_downshift - Checks whether a downshift in speed occurred
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 igb_check_downshift(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	switch (phy->type) {
+	case e1000_phy_i210:
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = false;
+		ret_val = 0;
+		goto out;
+	}
+
+	ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask) ? true : false;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 igb_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+static s32 igb_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+	/* Polarity is determined based on the speed of
+	 * our connection.
+	 */
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/* This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = phy->ops.read_reg(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_wait_autoneg - Wait for auto-neg completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+static s32 igb_wait_autoneg(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msleep(100);
+	}
+
+	/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  igb_phy_has_link - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+		     u32 usec_interval, bool *success)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	for (i = 0; i < iterations; i++) {
+		/* Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val && usec_interval > 0) {
+			/* If the first read fails, another entity may have
+			 * ownership of the resources, wait and try again to
+			 * see if they have relinquished the resources yet.
+			 */
+			if (usec_interval >= 1000)
+				mdelay(usec_interval/1000);
+			else
+				udelay(usec_interval);
+		}
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			mdelay(usec_interval/1000);
+		else
+			udelay(usec_interval);
+	}
+
+	*success = (i < iterations) ? true : false;
+
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 igb_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, phy_data2, index, default_page, is_cm;
+
+	switch (hw->phy.id) {
+	case I210_I_PHY_ID:
+		/* Get cable length from PHY Cable Diagnostics Control Reg */
+		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+					    (I347AT4_PCDL + phy->addr),
+					    &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/* Check if the unit of cable length is meters or cm */
+		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+					    I347AT4_PCDC, &phy_data2);
+		if (ret_val)
+			return ret_val;
+
+		is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+		/* Populate the phy structure with cable length in meters */
+		phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->cable_length = phy_data / (is_cm ? 100 : 1);
+		break;
+	case M88E1543_E_PHY_ID:
+	case I347AT4_E_PHY_ID:
+		/* Remember the original page select and set it to 7 */
+		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+					    &default_page);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+		if (ret_val)
+			goto out;
+
+		/* Get cable length from PHY Cable Diagnostics Control Reg */
+		ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Check if the unit of cable length is meters or cm */
+		ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+		if (ret_val)
+			goto out;
+
+		is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+		/* Populate the phy structure with cable length in meters */
+		phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+		/* Reset the page selec to its original value */
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+					     default_page);
+		if (ret_val)
+			goto out;
+		break;
+	case M88E1112_E_PHY_ID:
+		/* Remember the original page select and set it to 5 */
+		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+					    &default_page);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+			M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+		if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		phy->min_cable_length = e1000_m88_cable_length_table[index];
+		phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+		phy->cable_length = (phy->min_cable_length +
+				     phy->max_cable_length) / 2;
+
+		/* Reset the page select to its original value */
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+					     default_page);
+		if (ret_val)
+			goto out;
+
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of coarse and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+		IGP02E1000_PHY_AGC_A,
+		IGP02E1000_PHY_AGC_B,
+		IGP02E1000_PHY_AGC_C,
+		IGP02E1000_PHY_AGC_D
+	};
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Getting bits 15:9, which represent the combination of
+		 * coarse and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length.
+		 */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+				IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+		      e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+				 (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+	if (phy->media_type != e1000_media_type_copper) {
+		hw_dbg("Phy info is only valid for copper media\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+				   ? true : false;
+
+	ret_val = igb_check_polarity_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = phy->ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = igb_check_polarity_igp(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = phy->ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_sw_reset - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 igb_phy_sw_reset(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 phy_ctrl;
+
+	if (!(hw->phy.ops.read_reg))
+		goto out;
+
+	ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	udelay(1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_hw_reset - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and release the semaphore (if necessary).
+ **/
+s32 igb_phy_hw_reset(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl;
+
+	ret_val = igb_check_reset_block(hw);
+	if (ret_val) {
+		ret_val = 0;
+		goto out;
+	}
+
+	ret_val = phy->ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ctrl = rd32(E1000_CTRL);
+	wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+	wrfl();
+
+	udelay(phy->reset_delay_us);
+
+	wr32(E1000_CTRL, ctrl);
+	wrfl();
+
+	udelay(150);
+
+	phy->ops.release(hw);
+
+	ret_val = phy->ops.get_cfg_done(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	hw_dbg("Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to TX amplitude in Giga mode */
+	hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+	/* Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+	/* Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+	return 0;
+}
+
+/**
+ * igb_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ **/
+void igb_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igb_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ **/
+void igb_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+	usleep_range(1000, 2000);
+}
+
+/**
+ *  igb_check_polarity_82580 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+static s32 igb_check_polarity_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Clear Auto-Crossover to force MDI manually.  82580 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+
+	ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
+
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			hw_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_82580 - Retrieve I82580 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = igb_check_polarity_82580(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
+
+	if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
+	    I82580_PHY_STATUS2_SPEED_1000MBPS) {
+		ret_val = hw->phy.ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_82580 - Determine cable length for 82580 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 igb_get_cable_length_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, length;
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
+		 I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+
+	if (length == E1000_CABLE_LENGTH_UNDEFINED)
+		ret_val = -E1000_ERR_PHY;
+
+	phy->cable_length = length;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_gs40g - Write GS40G PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: lower half is register offset to write to
+ *     upper half is page to use.
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u16 page = offset >> GS40G_PAGE_SHIFT;
+
+	offset = offset & GS40G_OFFSET_MASK;
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+	if (ret_val)
+		goto release;
+	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+release:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_gs40g - Read GS40G  PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: lower half is register offset to read to
+ *     upper half is page to use.
+ *  @data: data to read at register offset
+ *
+ *  Acquires semaphore, if necessary, then reads the data in the PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u16 page = offset >> GS40G_PAGE_SHIFT;
+
+	offset = offset & GS40G_OFFSET_MASK;
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+	if (ret_val)
+		goto release;
+	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+release:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_set_master_slave_mode - Setup PHY for Master/slave mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Master/slave mode
+ **/
+static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Resolve Master/Slave mode */
+	ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* load defaults for future use */
+	hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+				   ((phy_data & CR_1000T_MS_VALUE) ?
+				    e1000_ms_force_master :
+				    e1000_ms_force_slave) : e1000_ms_auto;
+
+	switch (hw->phy.ms_type) {
+	case e1000_ms_force_master:
+		phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_force_slave:
+		phy_data |= CR_1000T_MS_ENABLE;
+		phy_data &= ~(CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_auto:
+		phy_data &= ~CR_1000T_MS_ENABLE;
+		fallthrough;
+	default:
+		break;
+	}
+
+	return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
+++ linux-patched/drivers/xenomai/net/drivers/igb/igb.h	2022-03-21 12:58:29.437888493 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_regs.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGB_H_
+#define _IGB_H_
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+
+#include <rtdev.h>
+
+#undef CONFIG_IGB_HWMON
+
+struct igb_adapter;
+
+#define E1000_PCS_CFG_IGN_SD	1
+
+/* Interrupt defines */
+#define IGB_START_ITR		648 /* ~6000 ints/sec */
+#define IGB_4K_ITR		980
+#define IGB_20K_ITR		196
+#define IGB_70K_ITR		56
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD		256
+#define IGB_DEFAULT_TX_WORK	128
+#define IGB_MIN_TXD		80
+#define IGB_MAX_TXD		4096
+
+#define IGB_DEFAULT_RXD		256
+#define IGB_MIN_RXD		80
+#define IGB_MAX_RXD		4096
+
+#define IGB_DEFAULT_ITR		3 /* dynamic */
+#define IGB_MAX_ITR_USECS	10000
+#define IGB_MIN_ITR_USECS	10
+#define NON_Q_VECTORS		1
+#define MAX_Q_VECTORS		8
+#define MAX_MSIX_ENTRIES	10
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES	8
+#define IGB_MAX_RX_QUEUES_82575	4
+#define IGB_MAX_RX_QUEUES_I211	2
+#define IGB_MAX_TX_QUEUES	8
+#define IGB_MAX_VF_MC_ENTRIES	30
+#define IGB_MAX_VF_FUNCTIONS	8
+#define IGB_MAX_VFTA_ENTRIES	128
+#define IGB_82576_VF_DEV_ID	0x10CA
+#define IGB_I350_VF_DEV_ID	0x1520
+
+/* NVM version defines */
+#define IGB_MAJOR_MASK		0xF000
+#define IGB_MINOR_MASK		0x0FF0
+#define IGB_BUILD_MASK		0x000F
+#define IGB_COMB_VER_MASK	0x00FF
+#define IGB_MAJOR_SHIFT		12
+#define IGB_MINOR_SHIFT		4
+#define IGB_COMB_VER_SHFT	8
+#define IGB_NVM_VER_INVALID	0xFFFF
+#define IGB_ETRACK_SHIFT	16
+#define NVM_ETRACK_WORD		0x0042
+#define NVM_COMB_VER_OFF	0x0083
+#define NVM_COMB_VER_PTR	0x003d
+
+struct vf_data_storage {
+	unsigned char vf_mac_addresses[ETH_ALEN];
+	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+	u16 num_vf_mc_hashes;
+	u16 vlans_enabled;
+	u32 flags;
+	unsigned long last_nack;
+	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+	u16 pf_qos;
+	u16 tx_rate;
+	bool spoofchk_enabled;
+};
+
+#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC     0x00000008 /* PF has set MAC address */
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ *           descriptors available in its onboard memory.
+ *           Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ *           available in host memory.
+ *           If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ *           descriptors until either it has this many to write back, or the
+ *           ITR timer expires.
+ */
+#define IGB_RX_PTHRESH	((hw->mac.type == e1000_i354) ? 12 : 8)
+#define IGB_RX_HTHRESH	8
+#define IGB_TX_PTHRESH	((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_TX_HTHRESH	1
+#define IGB_RX_WTHRESH	((hw->mac.type == e1000_82576 && \
+			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
+#define IGB_TX_WTHRESH	((hw->mac.type == e1000_82576 && \
+			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_256	256
+#define IGB_RXBUFFER_2048	2048
+#define IGB_RX_HDR_LEN		IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ		IGB_RXBUFFER_2048
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGB_RX_BUFFER_WRITE	16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES		0
+#define IGB_EEPROM_APME		0x0400
+
+#ifndef IGB_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define IGB_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#define IGB_MNG_VLAN_NONE	-1
+
+enum igb_tx_flags {
+	/* cmd_type flags */
+	IGB_TX_FLAGS_VLAN	= 0x01,
+	IGB_TX_FLAGS_TSO	= 0x02,
+	IGB_TX_FLAGS_TSTAMP	= 0x04,
+
+	/* olinfo flags */
+	IGB_TX_FLAGS_IPV4	= 0x10,
+	IGB_TX_FLAGS_CSUM	= 0x20,
+};
+
+/* VLAN info */
+#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT	16
+
+/* The largest size we can write to the descriptor is 65535.  In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR	15
+#define IGB_MAX_DATA_PER_TXD	(1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* EEPROM byte offsets */
+#define IGB_SFF_8472_SWAP		0x5C
+#define IGB_SFF_8472_COMP		0x5E
+
+/* Bitmasks */
+#define IGB_SFF_ADDRESSING_MODE		0x4
+#define IGB_SFF_8472_UNSUP		0x00
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igb_tx_buffer {
+	union e1000_adv_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct rtskb *skb;
+	unsigned int bytecount;
+	u16 gso_segs;
+	__be16 protocol;
+
+	u32 tx_flags;
+};
+
+struct igb_rx_buffer {
+	dma_addr_t dma;
+	struct rtskb *skb;
+};
+
+struct igb_tx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 restart_queue;
+	u64 restart_queue2;
+};
+
+struct igb_rx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 drops;
+	u64 csum_err;
+	u64 alloc_failed;
+};
+
+struct igb_ring_container {
+	struct igb_ring *ring;		/* pointer to linked list of rings */
+	unsigned int total_bytes;	/* total bytes processed this int */
+	unsigned int total_packets;	/* total packets processed this int */
+	u16 work_limit;			/* total work allowed per interrupt */
+	u8 count;			/* total number of rings in vector */
+	u8 itr;				/* current ITR setting for ring */
+};
+
+struct igb_ring {
+	struct igb_q_vector *q_vector;	/* backlink to q_vector */
+	struct rtnet_device *netdev;	/* back pointer to net_device */
+	struct device *dev;		/* device pointer for dma mapping */
+	union {				/* array of buffer info structs */
+		struct igb_tx_buffer *tx_buffer_info;
+		struct igb_rx_buffer *rx_buffer_info;
+	};
+	void *desc;			/* descriptor ring memory */
+	unsigned long flags;		/* ring specific flags */
+	void __iomem *tail;		/* pointer to ring tail register */
+	dma_addr_t dma;			/* phys address of the ring */
+	unsigned int  size;		/* length of desc. ring in bytes */
+
+	u16 count;			/* number of desc. in the ring */
+	u8 queue_index;			/* logical index of the ring*/
+	u8 reg_idx;			/* physical index of the ring */
+
+	/* everything past this point are written often */
+	u16 next_to_clean;
+	u16 next_to_use;
+	u16 next_to_alloc;
+
+	union {
+		/* TX */
+		struct {
+			struct igb_tx_queue_stats tx_stats;
+		};
+		/* RX */
+		struct {
+			struct igb_rx_queue_stats rx_stats;
+			u16 rx_buffer_len;
+		};
+	};
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+	struct igb_adapter *adapter;	/* backlink */
+	int cpu;			/* CPU for DCA */
+	u32 eims_value;			/* EIMS mask value */
+
+	u16 itr_val;
+	u8 set_itr;
+	void __iomem *itr_register;
+
+	struct igb_ring_container rx, tx;
+
+	struct rcu_head rcu;	/* to avoid race with update stats on free */
+	char name[IFNAMSIZ + 9];
+
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+enum e1000_ring_flags_t {
+	IGB_RING_FLAG_RX_SCTP_CSUM,
+	IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+	IGB_RING_FLAG_TX_CTX_IDX,
+	IGB_RING_FLAG_TX_DETECT_HANG
+};
+
+#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
+
+#define IGB_RX_DESC(R, i)	\
+	(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
+#define IGB_TX_DESC(R, i)	\
+	(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
+#define IGB_TX_CTXTDESC(R, i)	\
+	(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+
+/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
+static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
+				      const u32 stat_err_bits)
+{
+	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+#ifdef CONFIG_IGB_HWMON
+
+#define IGB_HWMON_TYPE_LOC	0
+#define IGB_HWMON_TYPE_TEMP	1
+#define IGB_HWMON_TYPE_CAUTION	2
+#define IGB_HWMON_TYPE_MAX	3
+
+struct hwmon_attr {
+	struct device_attribute dev_attr;
+	struct e1000_hw *hw;
+	struct e1000_thermal_diode_data *sensor;
+	char name[12];
+	};
+
+struct hwmon_buff {
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+	struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
+	unsigned int n_hwmon;
+	};
+#endif
+
+#define IGB_N_EXTTS	2
+#define IGB_N_PEROUT	2
+#define IGB_N_SDP	4
+#define IGB_RETA_SIZE	128
+
+/* board specific private data structure */
+struct igb_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+	struct rtnet_device *netdev;
+
+	unsigned long state;
+	unsigned int flags;
+
+	unsigned int num_q_vectors;
+	struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
+	rtdm_irq_t msix_irq_handle[MAX_MSIX_ENTRIES];
+	rtdm_irq_t irq_handle;
+	rtdm_nrtsig_t watchdog_nrtsig;
+	spinlock_t stats64_lock;
+
+	/* Interrupt Throttle Rate */
+	u32 rx_itr_setting;
+	u32 tx_itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	/* TX */
+	u16 tx_work_limit;
+	u32 tx_timeout_count;
+	int num_tx_queues;
+	struct igb_ring *tx_ring[16];
+
+	/* RX */
+	int num_rx_queues;
+	struct igb_ring *rx_ring[16];
+
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+
+	u16 mng_vlan_id;
+	u32 bd_number;
+	u32 wol;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+
+	struct work_struct reset_task;
+	struct work_struct watchdog_task;
+	bool fc_autoneg;
+	u8  tx_timeout_factor;
+	struct timer_list blink_timer;
+	unsigned long led_status;
+
+	/* OS defined structs */
+	struct pci_dev *pdev;
+
+	struct net_device_stats net_stats;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+
+	u32 test_icr;
+	struct igb_ring test_tx_ring;
+	struct igb_ring test_rx_ring;
+
+	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
+	u32 eims_enable_mask;
+	u32 eims_other;
+
+	/* to not mess up cache alignment, always add to the bottom */
+	u16 tx_ring_count;
+	u16 rx_ring_count;
+	int vf_rate_link_speed;
+	u32 rss_queues;
+	u32 wvbr;
+	u32 *shadow_vfta;
+
+	unsigned long last_rx_timestamp;
+
+	char fw_version[32];
+#ifdef CONFIG_IGB_HWMON
+	struct hwmon_buff *igb_hwmon_buff;
+	bool ets;
+#endif
+	struct i2c_algo_bit_data i2c_algo;
+	struct i2c_adapter i2c_adap;
+	struct i2c_client *i2c_client;
+	u32 rss_indir_tbl_init;
+	u8 rss_indir_tbl[IGB_RETA_SIZE];
+
+	unsigned long link_check_timeout;
+	int copper_tries;
+	struct e1000_info ei;
+	u16 eee_advert;
+};
+
+#define IGB_FLAG_HAS_MSI		(1 << 0)
+#define IGB_FLAG_DCA_ENABLED		(1 << 1)
+#define IGB_FLAG_QUAD_PORT_A		(1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS		(1 << 3)
+#define IGB_FLAG_DMAC			(1 << 4)
+#define IGB_FLAG_PTP			(1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP	(1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP	(1 << 7)
+#define IGB_FLAG_WOL_SUPPORTED		(1 << 8)
+#define IGB_FLAG_NEED_LINK_UPDATE	(1 << 9)
+#define IGB_FLAG_MEDIA_RESET		(1 << 10)
+#define IGB_FLAG_MAS_CAPABLE		(1 << 11)
+#define IGB_FLAG_MAS_ENABLE		(1 << 12)
+#define IGB_FLAG_HAS_MSIX		(1 << 13)
+#define IGB_FLAG_EEE			(1 << 14)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0		0X0001
+#define IGB_MAS_ENABLE_1		0X0002
+#define IGB_MAS_ENABLE_2		0X0004
+#define IGB_MAS_ENABLE_3		0X0008
+
+/* DMA Coalescing defines */
+#define IGB_MIN_TXPBSIZE	20408
+#define IGB_TX_BUF_4096		4096
+#define IGB_DMCTLX_DCFLUSH_DIS	0x80000000  /* Disable DMA Coal Flush */
+
+#define IGB_82576_TSYNC_SHIFT	19
+#define IGB_TS_HDR_LEN		16
+enum e1000_state_t {
+	__IGB_TESTING,
+	__IGB_RESETTING,
+	__IGB_DOWN,
+	__IGB_PTP_TX_IN_PROGRESS,
+};
+
+enum igb_boards {
+	board_82575,
+};
+
+extern char igb_driver_name[];
+extern char igb_driver_version[];
+
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct rtskb *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct rtnet_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct rtskb *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+			 struct rtskb *skb);
+int igb_ptp_set_ts_config(struct rtnet_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct rtnet_device *netdev, struct ifreq *ifr);
+#ifdef CONFIG_IGB_HWMON
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
+#endif
+static inline s32 igb_reset_phy(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.reset)
+		return hw->phy.ops.reset(hw);
+
+	return 0;
+}
+
+static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	if (hw->phy.ops.read_reg)
+		return hw->phy.ops.read_reg(hw, offset, data);
+
+	return 0;
+}
+
+static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	if (hw->phy.ops.write_reg)
+		return hw->phy.ops.write_reg(hw, offset, data);
+
+	return 0;
+}
+
+static inline s32 igb_get_phy_info(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.get_phy_info)
+		return hw->phy.ops.get_phy_info(hw);
+
+	return 0;
+}
+
+static inline struct rtnet_device *txring_txq(const struct igb_ring *tx_ring)
+{
+	return tx_ring->netdev;
+}
+
+#endif /* _IGB_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_regs.h	2022-03-21 12:58:29.432888542 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_phy.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_MDICNFG  0x00E04  /* MDI Config - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_TSSDP    0x0003C  /* Time Sync SDP Configuration Register - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE     0x01514  /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_LEDMUX   0x08130  /* LED MUX Control */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+#define E1000_I2CPARAMS        0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN      0x00000100  /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT   0x00000200  /* I2C- Clock */
+#define E1000_I2C_DATA_OUT  0x00000400  /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800  /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN   0x00001000  /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N  0x00002000  /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN    0x00004000  /* I2C- Clock In */
+#define E1000_MPHY_ADDR_CTRL	0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA		0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT		0x0E0C /* GBE MPHY Statistics */
+
+/* IEEE 1588 TIMESYNCH */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_TRGTTIML0  0x0B644 /* Target Time Register 0 Low  - RW */
+#define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
+#define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
+#define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
+#define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
+#define E1000_AUXSTMPH1  0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
+#define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
+#define E1000_TSICR      0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM       0x0B674 /* Interrupt Mask Register */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
+#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
+#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
+#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
+#define E1000_SAQF0 E1000_SAQF(0)
+#define E1000_DAQF0 E1000_DAQF(0)
+#define E1000_SPQF0 E1000_SPQF(0)
+#define E1000_FTQF0 E1000_FTQF(0)
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
+/* DMA Coalescing registers */
+#define E1000_DMACR	0x02508 /* Control Register */
+#define E1000_DMCTXTH	0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX	0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH	0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT	0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC	0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */
+
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL	0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM	0x3690 /* Tx BCN Rate-scheduler MMW */
+#define E1000_RTTBCNRC	0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+
+/* Split and Replication RX Control - RW */
+#define E1000_RXPBS	0x02404 /* Rx Packet Buffer Size - RW */
+
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT	0x08100 /* Junction Temperature */
+#define E1000_THLOWTC	0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC	0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC	0x0810C /* High Threshold Control */
+#define E1000_THSTAT	0x08110 /* Thermal Sensor Status */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)   ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
+				    : (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)   ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
+				    : (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)   ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
+				    : (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)  ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
+				    : (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)     ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
+				    : (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)     ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
+				    : (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)  ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
+				    : (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)   ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
+				    : (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)   ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
+				    : (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)   ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
+				    : (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)     ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
+				    : (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)     ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
+				    : (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)  ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
+				    : (0x0E028 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n)	  ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+				      (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n)	E1000_RXCTL(_n)
+#define E1000_TXCTL(_n)   ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+				      (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDWBAL(_n)  ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
+				    : (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n)  ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
+				    : (0x0E03C + ((_n) * 0x40)))
+
+#define E1000_RXPBS	0x02404  /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS	0x03404  /* Tx Packet Buffer Size - RW */
+
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_DTXCTL   0x03590  /* DMA TX Control - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+/* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXPTC  0x04104
+/* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICRXATC  0x04108
+/* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C
+/* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXATC  0x04110
+/* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQEC  0x04118
+/* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICTXQMTC 0x0411C
+/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker TX Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker RX Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets TX Count */
+#define E1000_HTCBDPC     0x04124  /* Host TX Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* RX Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_RA2      0x054E0  /* 2nd half of Rx address array - RW Array */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+					(0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+					(0x054E4 + ((_i - 16) * 8)))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
+
+/* RSS registers */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
+/* MSI-X Allocation Register (_i) - RW */
+#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4))
+/* Redirection Table - RW Array */
+#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4))
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+
+/* VT Registers */
+#define E1000_MBVFICR   0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR   0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE     0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE      0x00C8C /* VF Receive Enables */
+#define E1000_VFTE      0x00C90 /* VF Transmit Enables */
+#define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR      0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL    0x05BBC /* IOV Control Register */
+#define E1000_TXSWC     0x05ACC /* Tx Switch Control */
+#define E1000_LVMMC	0x03548 /* Last VM Misbehavior cause */
+/* These act per VF so an array friendly macro is used */
+#define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
+#define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
+#define E1000_DVMOLR(_n)       (0x0C038 + (64 * (_n)))
+#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
+#define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
+
+struct e1000_hw;
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+	u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
+	if (!E1000_REMOVED(hw_addr)) \
+		writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igb_rd32(hw, reg))
+
+#define wrfl() ((void)rd32(E1000_STATUS))
+
+#define array_wr32(reg, offset, value) \
+	wr32((reg) + ((offset) << 2), (value))
+
+#define array_rd32(reg, offset) \
+	(readl(hw->hw_addr + reg + ((offset) << 2)))
+
+/* DMA Coalescing registers */
+#define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */
+
+/* Energy Efficient Ethernet "EEE" register */
+#define E1000_IPCNFG	0x0E38 /* Internal PHY Configuration */
+#define E1000_EEER	0x0E30 /* Energy Efficient Ethernet */
+#define E1000_EEE_SU	0X0E34 /* EEE Setup */
+#define E1000_EMIADD	0x10   /* Extended Memory Indirect Address */
+#define E1000_EMIDATA	0x11   /* Extended Memory Indirect Data */
+#define E1000_MMDAC	13     /* MMD Access Control */
+#define E1000_MMDAAD	14     /* MMD Access Address/Data */
+
+/* Thermal Sensor Register */
+#define E1000_THSTAT	0x08110 /* Thermal Sensor Status */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC	0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC	0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC	0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC	0x0415C /* OS2BMC packets transmitted by host */
+
+#define E1000_SRWR		0x12018  /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL	0x12038
+#define E1000_I210_FLMNGDATA	0x1203C
+#define E1000_I210_FLMNGCNT	0x12040
+
+#define E1000_I210_FLSWCTL	0x12048
+#define E1000_I210_FLSWDATA	0x1204C
+#define E1000_I210_FLSWCNT	0x12050
+
+#define E1000_I210_FLA		0x1201C
+
+#define E1000_INVM_DATA_REG(_n)	(0x12120 + 4*(_n))
+#define E1000_INVM_SIZE		64 /* Number of INVM Data Registers */
+
+#define E1000_REMOVED(h) unlikely(!(h))
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_phy.h	2022-03-21 12:58:29.427888591 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_mac.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+enum e1000_ms_type {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+};
+
+s32  igb_check_downshift(struct e1000_hw *hw);
+s32  igb_check_reset_block(struct e1000_hw *hw);
+s32  igb_copper_link_setup_igp(struct e1000_hw *hw);
+s32  igb_copper_link_setup_m88(struct e1000_hw *hw);
+s32  igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  igb_get_cable_length_m88(struct e1000_hw *hw);
+s32  igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32  igb_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  igb_get_phy_id(struct e1000_hw *hw);
+s32  igb_get_phy_info_igp(struct e1000_hw *hw);
+s32  igb_get_phy_info_m88(struct e1000_hw *hw);
+s32  igb_phy_sw_reset(struct e1000_hw *hw);
+s32  igb_phy_hw_reset(struct e1000_hw *hw);
+s32  igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  igb_setup_copper_link(struct e1000_hw *hw);
+s32  igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+				u32 usec_interval, bool *success);
+void igb_power_up_phy_copper(struct e1000_hw *hw);
+void igb_power_down_phy_copper(struct e1000_hw *hw);
+s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32  igb_copper_link_setup_82580(struct e1000_hw *hw);
+s32  igb_get_phy_info_82580(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
+s32  igb_get_cable_length_82580(struct e1000_hw *hw);
+s32  igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_check_polarity_m88(struct e1000_hw *hw);
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+#define I82580_ADDR_REG                   16
+#define I82580_CFG_REG                    22
+#define I82580_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82580_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82580_CTRL_REG                   23
+#define I82580_CTRL_DOWNSHIFT_MASK        (7 << 10)
+
+/* 82580 specific PHY registers */
+#define I82580_PHY_CTRL_2            18
+#define I82580_PHY_LBK_CTRL          19
+#define I82580_PHY_STATUS_2          26
+#define I82580_PHY_DIAG_STATUS       31
+
+/* I82580 PHY Status 2 */
+#define I82580_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82580_PHY_STATUS2_MDIX           0x0800
+#define I82580_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
+
+/* I82580 PHY Control 2 */
+#define I82580_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK    0x0600
+
+/* I82580 PHY Diagnostics Status */
+#define I82580_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT	0xE14
+#define E1000_82580_PM_SPD		0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU		0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU		0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD		0x0020 /* Go Link Disconnect */
+
+/* Enable flexible speed on link-up */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0800
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT		0x16
+#define GS40G_PAGE_SHIFT		16
+#define GS40G_OFFSET_MASK		0xFFFF
+#define GS40G_PAGE_2			0x20000
+#define GS40G_MAC_REG2			0x15
+#define GS40G_MAC_LB			0x4140
+#define GS40G_MAC_SPEED_1G		0X0006
+#define GS40G_COPPER_SPEC		0x0010
+#define GS40G_LINE_LB			0x4000
+
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET	0x00
+#define E1000_SFF_IDENTIFIER_SFF	0x02
+#define E1000_SFF_IDENTIFIER_SFP	0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET	0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct e1000_sfp_flags {
+	u8 e1000_base_sx:1;
+	u8 e1000_base_lx:1;
+	u8 e1000_base_cx:1;
+	u8 e1000_base_t:1;
+	u8 e100_base_lx:1;
+	u8 e100_base_fx:1;
+	u8 e10_base_bx10:1;
+	u8 e10_base_px:1;
+};
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_mac.h	2022-03-21 12:58:29.422888640 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_82575.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+#include "e1000_hw.h"
+
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_defines.h"
+#include "e1000_i210.h"
+
+/* Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+s32  igb_blink_led(struct e1000_hw *hw);
+s32  igb_check_for_copper_link(struct e1000_hw *hw);
+s32  igb_cleanup_led(struct e1000_hw *hw);
+s32  igb_config_fc_after_link_up(struct e1000_hw *hw);
+s32  igb_disable_pcie_master(struct e1000_hw *hw);
+s32  igb_force_mac_fc(struct e1000_hw *hw);
+s32  igb_get_auto_rd_done(struct e1000_hw *hw);
+s32  igb_get_bus_info_pcie(struct e1000_hw *hw);
+s32  igb_get_hw_semaphore(struct e1000_hw *hw);
+s32  igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+				     u16 *duplex);
+s32  igb_id_led_init(struct e1000_hw *hw);
+s32  igb_led_off(struct e1000_hw *hw);
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+			     u8 *mc_addr_list, u32 mc_addr_count);
+s32  igb_setup_link(struct e1000_hw *hw);
+s32  igb_validate_mdi_setting(struct e1000_hw *hw);
+s32  igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+			     u32 offset, u8 data);
+
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
+void igb_clear_vfta(struct e1000_hw *hw);
+void igb_clear_vfta_i350(struct e1000_hw *hw);
+s32  igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
+void igb_config_collision_dist(struct e1000_hw *hw);
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
+void igb_put_hw_semaphore(struct e1000_hw *hw);
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  igb_check_alt_mac_addr(struct e1000_hw *hw);
+
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG	0x20000000
+
+#define E1000_FWSM_MODE_MASK	0xE
+#define E1000_FWSM_MODE_SHIFT	1
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN	0x2
+
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_82575.c	2022-03-21 12:58:29.417888688 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_nvm.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2015 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* e1000_82575
+ * e1000_82576
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/i2c.h>
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+#include "e1000_i210.h"
+
+static s32  igb_get_invariants_82575(struct e1000_hw *);
+static s32  igb_acquire_phy_82575(struct e1000_hw *);
+static void igb_release_phy_82575(struct e1000_hw *);
+static s32  igb_acquire_nvm_82575(struct e1000_hw *);
+static void igb_release_nvm_82575(struct e1000_hw *);
+static s32  igb_check_for_link_82575(struct e1000_hw *);
+static s32  igb_get_cfg_done_82575(struct e1000_hw *);
+static s32  igb_init_hw_82575(struct e1000_hw *);
+static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
+static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
+static s32  igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
+static s32  igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
+static s32  igb_reset_hw_82575(struct e1000_hw *);
+static s32  igb_reset_hw_82580(struct e1000_hw *);
+static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
+static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
+static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
+static s32  igb_setup_copper_link_82575(struct e1000_hw *);
+static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
+static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
+static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
+static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
+						 u16 *);
+static s32  igb_get_phy_id_82575(struct e1000_hw *);
+static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
+static bool igb_sgmii_active_82575(struct e1000_hw *);
+static s32  igb_reset_init_script_82575(struct e1000_hw *);
+static s32  igb_read_mac_addr_82575(struct e1000_hw *);
+static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
+static const u16 e1000_82580_rxpbs_table[] = {
+	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
+
+/**
+ *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to determine if the I2C pins are being used for I2C or as an
+ *  external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+	u32 reg = 0;
+	bool ext_mdio = false;
+
+	switch (hw->mac.type) {
+	case e1000_82575:
+	case e1000_82576:
+		reg = rd32(E1000_MDIC);
+		ext_mdio = !!(reg & E1000_MDIC_DEST);
+		break;
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		reg = rd32(E1000_MDICNFG);
+		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+		break;
+	default:
+		break;
+	}
+	return ext_mdio;
+}
+
+/**
+ *  igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ *  @hw: pointer to the HW structure
+ *
+ *  Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	u8 port = 0;
+
+	/* Check the copper medium. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (data & E1000_M88E1112_STATUS_LINK)
+		port = E1000_MEDIA_PORT_COPPER;
+
+	/* Check the other medium. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	/* reset page to 0 */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+	if (ret_val)
+		return ret_val;
+
+	if (data & E1000_M88E1112_STATUS_LINK)
+		port = E1000_MEDIA_PORT_OTHER;
+
+	/* Determine if a swap needs to happen. */
+	if (port && (hw->dev_spec._82575.media_port != port)) {
+		hw->dev_spec._82575.media_port = port;
+		hw->dev_spec._82575.media_changed = true;
+	} else {
+		ret_val = igb_check_for_link_82575(hw);
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_init_phy_params_82575 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u32 ctrl_ext;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type = e1000_phy_none;
+		goto out;
+	}
+
+	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us	= 100;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+
+	if (igb_sgmii_active_82575(hw)) {
+		phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+		ctrl_ext |= E1000_CTRL_I2C_ENA;
+	} else {
+		phy->ops.reset = igb_phy_hw_reset;
+		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+	}
+
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+	igb_reset_mdicnfg_82580(hw);
+
+	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+		phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+		phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+	} else {
+		switch (hw->mac.type) {
+		case e1000_82580:
+		case e1000_i350:
+		case e1000_i354:
+			phy->ops.read_reg = igb_read_phy_reg_82580;
+			phy->ops.write_reg = igb_write_phy_reg_82580;
+			break;
+		case e1000_i210:
+		case e1000_i211:
+			phy->ops.read_reg = igb_read_phy_reg_gs40g;
+			phy->ops.write_reg = igb_write_phy_reg_gs40g;
+			break;
+		default:
+			phy->ops.read_reg = igb_read_phy_reg_igp;
+			phy->ops.write_reg = igb_write_phy_reg_igp;
+		}
+	}
+
+	/* set lan id */
+	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+			E1000_STATUS_FUNC_SHIFT;
+
+	/* Set phy->phy_addr and phy->id. */
+	ret_val = igb_get_phy_id_82575(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Verify phy id and set remaining function pointers */
+	switch (phy->id) {
+	case M88E1543_E_PHY_ID:
+	case I347AT4_E_PHY_ID:
+	case M88E1112_E_PHY_ID:
+	case M88E1111_I_PHY_ID:
+		phy->type		= e1000_phy_m88;
+		phy->ops.check_polarity	= igb_check_polarity_m88;
+		phy->ops.get_phy_info	= igb_get_phy_info_m88;
+		if (phy->id != M88E1111_I_PHY_ID)
+			phy->ops.get_cable_length =
+					 igb_get_cable_length_m88_gen2;
+		else
+			phy->ops.get_cable_length = igb_get_cable_length_m88;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+		/* Check if this PHY is confgured for media swap. */
+		if (phy->id == M88E1112_E_PHY_ID) {
+			u16 data;
+
+			ret_val = phy->ops.write_reg(hw,
+						     E1000_M88E1112_PAGE_ADDR,
+						     2);
+			if (ret_val)
+				goto out;
+
+			ret_val = phy->ops.read_reg(hw,
+						    E1000_M88E1112_MAC_CTRL_1,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+			       E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+			if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+			    data == E1000_M88E1112_AUTO_COPPER_BASEX)
+				hw->mac.ops.check_for_link =
+						igb_check_for_link_media_swap;
+		}
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy->type = e1000_phy_igp_3;
+		phy->ops.get_phy_info = igb_get_phy_info_igp;
+		phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+		break;
+	case I82580_I_PHY_ID:
+	case I350_I_PHY_ID:
+		phy->type = e1000_phy_82580;
+		phy->ops.force_speed_duplex =
+					 igb_phy_force_speed_duplex_82580;
+		phy->ops.get_cable_length = igb_get_cable_length_82580;
+		phy->ops.get_phy_info = igb_get_phy_info_82580;
+		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+		break;
+	case I210_I_PHY_ID:
+		phy->type		= e1000_phy_i210;
+		phy->ops.check_polarity	= igb_check_polarity_m88;
+		phy->ops.get_phy_info	= igb_get_phy_info_m88;
+		phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_init_nvm_params_82575 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+	u16 size;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+		     E1000_EECD_SIZE_EX_SHIFT);
+
+	/* Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* Just in case size is out of range, cap it to the largest
+	 * EEPROM size supported
+	 */
+	if (size > 15)
+		size = 15;
+
+	nvm->word_size = 1 << size;
+	nvm->opcode_bits = 8;
+	nvm->delay_usec = 1;
+
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+				    16 : 8;
+		break;
+	}
+	if (nvm->word_size == (1 << 15))
+		nvm->page_size = 128;
+
+	nvm->type = e1000_nvm_eeprom_spi;
+
+	/* NVM Function Pointers */
+	nvm->ops.acquire = igb_acquire_nvm_82575;
+	nvm->ops.release = igb_release_nvm_82575;
+	nvm->ops.write = igb_write_nvm_spi;
+	nvm->ops.validate = igb_validate_nvm_checksum;
+	nvm->ops.update = igb_update_nvm_checksum;
+	if (nvm->word_size < (1 << 15))
+		nvm->ops.read = igb_read_nvm_eerd;
+	else
+		nvm->ops.read = igb_read_nvm_spi;
+
+	/* override generic family function pointers for specific descendants */
+	switch (hw->mac.type) {
+	case e1000_82580:
+		nvm->ops.validate = igb_validate_nvm_checksum_82580;
+		nvm->ops.update = igb_update_nvm_checksum_82580;
+		break;
+	case e1000_i354:
+	case e1000_i350:
+		nvm->ops.validate = igb_validate_nvm_checksum_i350;
+		nvm->ops.update = igb_update_nvm_checksum_i350;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_init_mac_params_82575 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	switch (mac->type) {
+	case e1000_82576:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+		break;
+	case e1000_82580:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+		break;
+	case e1000_i350:
+	case e1000_i354:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+		break;
+	default:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+		break;
+	}
+	/* reset */
+	if (mac->type >= e1000_82580)
+		mac->ops.reset_hw = igb_reset_hw_82580;
+	else
+		mac->ops.reset_hw = igb_reset_hw_82575;
+
+	if (mac->type >= e1000_i210) {
+		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+		mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
+
+	} else {
+		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+		mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
+	}
+
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = true;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+			? true : false;
+	/* enable EEE on i350 parts and later parts */
+	if (mac->type >= e1000_i350)
+		dev_spec->eee_disable = false;
+	else
+		dev_spec->eee_disable = true;
+	/* Allow a single clear of the SW semaphore on I210 and newer */
+	if (mac->type >= e1000_i210)
+		dev_spec->clear_semaphore_once = true;
+	/* physical interface link setup */
+	mac->ops.setup_physical_interface =
+		(hw->phy.media_type == e1000_media_type_copper)
+			? igb_setup_copper_link_82575
+			: igb_setup_serdes_link_82575;
+
+	if (mac->type == e1000_82580) {
+		switch (hw->device_id) {
+		/* feature not supported on these id's */
+		case E1000_DEV_ID_DH89XXCC_SGMII:
+		case E1000_DEV_ID_DH89XXCC_SERDES:
+		case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+		case E1000_DEV_ID_DH89XXCC_SFP:
+			break;
+		default:
+			hw->dev_spec._82575.mas_capable = true;
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ *  igb_set_sfp_media_type_82575 - derives SFP module media type.
+ *  @hw: pointer to the HW structure
+ *
+ *  The media type is chosen based on SFP module.
+ *  compatibility flags retrieved from SFP ID EEPROM.
+ **/
+static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_ERR_CONFIG;
+	u32 ctrl_ext = 0;
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+	struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
+	u8 tranceiver_type = 0;
+	s32 timeout = 3;
+
+	/* Turn I2C interface ON and power on sfp cage */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+	wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+	wrfl();
+
+	/* Read SFP module data */
+	while (timeout) {
+		ret_val = igb_read_sfp_data_byte(hw,
+			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+			&tranceiver_type);
+		if (ret_val == 0)
+			break;
+		msleep(100);
+		timeout--;
+	}
+	if (ret_val != 0)
+		goto out;
+
+	ret_val = igb_read_sfp_data_byte(hw,
+			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+			(u8 *)eth_flags);
+	if (ret_val != 0)
+		goto out;
+
+	/* Check if there is some SFP module plugged and powered */
+	if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+	    (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+		dev_spec->module_plugged = true;
+		if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+			hw->phy.media_type = e1000_media_type_internal_serdes;
+		} else if (eth_flags->e100_base_fx) {
+			dev_spec->sgmii_active = true;
+			hw->phy.media_type = e1000_media_type_internal_serdes;
+		} else if (eth_flags->e1000_base_t) {
+			dev_spec->sgmii_active = true;
+			hw->phy.media_type = e1000_media_type_copper;
+		} else {
+			hw->phy.media_type = e1000_media_type_unknown;
+			hw_dbg("PHY module has not been recognized\n");
+			goto out;
+		}
+	} else {
+		hw->phy.media_type = e1000_media_type_unknown;
+	}
+	ret_val = 0;
+out:
+	/* Restore I2C interface setting */
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+	return ret_val;
+}
+
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+	s32 ret_val;
+	u32 ctrl_ext = 0;
+	u32 link_mode = 0;
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82575EB_COPPER:
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		mac->type = e1000_82575;
+		break;
+	case E1000_DEV_ID_82576:
+	case E1000_DEV_ID_82576_NS:
+	case E1000_DEV_ID_82576_NS_SERDES:
+	case E1000_DEV_ID_82576_FIBER:
+	case E1000_DEV_ID_82576_SERDES:
+	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+	case E1000_DEV_ID_82576_SERDES_QUAD:
+		mac->type = e1000_82576;
+		break;
+	case E1000_DEV_ID_82580_COPPER:
+	case E1000_DEV_ID_82580_FIBER:
+	case E1000_DEV_ID_82580_QUAD_FIBER:
+	case E1000_DEV_ID_82580_SERDES:
+	case E1000_DEV_ID_82580_SGMII:
+	case E1000_DEV_ID_82580_COPPER_DUAL:
+	case E1000_DEV_ID_DH89XXCC_SGMII:
+	case E1000_DEV_ID_DH89XXCC_SERDES:
+	case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+	case E1000_DEV_ID_DH89XXCC_SFP:
+		mac->type = e1000_82580;
+		break;
+	case E1000_DEV_ID_I350_COPPER:
+	case E1000_DEV_ID_I350_FIBER:
+	case E1000_DEV_ID_I350_SERDES:
+	case E1000_DEV_ID_I350_SGMII:
+		mac->type = e1000_i350;
+		break;
+	case E1000_DEV_ID_I210_COPPER:
+	case E1000_DEV_ID_I210_FIBER:
+	case E1000_DEV_ID_I210_SERDES:
+	case E1000_DEV_ID_I210_SGMII:
+	case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+	case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+		mac->type = e1000_i210;
+		break;
+	case E1000_DEV_ID_I211_COPPER:
+		mac->type = e1000_i211;
+		break;
+	case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+	case E1000_DEV_ID_I354_SGMII:
+	case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+		mac->type = e1000_i354;
+		break;
+	default:
+		return -E1000_ERR_MAC_INIT;
+	}
+
+	/* Set media type */
+	/* The 82575 uses bits 22:23 for link mode. The mode can be changed
+	 * based on the EEPROM. We cannot rely upon device ID. There
+	 * is no distinguishable difference between fiber and internal
+	 * SerDes mode on the 82575. There can be an external PHY attached
+	 * on the SGMII interface. For this, we'll set sgmii_active to true.
+	 */
+	hw->phy.media_type = e1000_media_type_copper;
+	dev_spec->sgmii_active = false;
+	dev_spec->module_plugged = false;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+
+	link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+	switch (link_mode) {
+	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	case E1000_CTRL_EXT_LINK_MODE_SGMII:
+		/* Get phy control interface type set (MDIO vs. I2C)*/
+		if (igb_sgmii_uses_mdio_82575(hw)) {
+			hw->phy.media_type = e1000_media_type_copper;
+			dev_spec->sgmii_active = true;
+			break;
+		}
+		/* for I2C based SGMII: */
+		fallthrough;
+	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+		/* read media type from SFP EEPROM */
+		ret_val = igb_set_sfp_media_type_82575(hw);
+		if ((ret_val != 0) ||
+		    (hw->phy.media_type == e1000_media_type_unknown)) {
+			/* If media type was not identified then return media
+			 * type defined by the CTRL_EXT settings.
+			 */
+			hw->phy.media_type = e1000_media_type_internal_serdes;
+
+			if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+				hw->phy.media_type = e1000_media_type_copper;
+				dev_spec->sgmii_active = true;
+			}
+
+			break;
+		}
+
+		/* do not change link mode for 100BaseFX */
+		if (dev_spec->eth_flags.e100_base_fx)
+			break;
+
+		/* change current link mode setting */
+		ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+		else
+			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+		wr32(E1000_CTRL_EXT, ctrl_ext);
+
+		break;
+	default:
+		break;
+	}
+
+	/* mac initialization and operations */
+	ret_val = igb_init_mac_params_82575(hw);
+	if (ret_val)
+		goto out;
+
+	/* NVM initialization */
+	ret_val = igb_init_nvm_params_82575(hw);
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+		ret_val = igb_init_nvm_params_i210(hw);
+		break;
+	default:
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	/* if part supports SR-IOV then initialize mailbox parameters */
+	switch (mac->type) {
+	case e1000_82576:
+	case e1000_i350:
+		igb_init_mbx_params_pf(hw);
+		break;
+	default:
+		break;
+	}
+
+	/* setup PHY parameters */
+	ret_val = igb_init_phy_params_82575(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_acquire_phy_82575 - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
+{
+	u16 mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		mask = E1000_SWFW_PHY1_SM;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_SWFW_PHY2_SM;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_SWFW_PHY3_SM;
+
+	return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ *  igb_release_phy_82575 - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void igb_release_phy_82575(struct e1000_hw *hw)
+{
+	u16 mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		mask = E1000_SWFW_PHY1_SM;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_SWFW_PHY2_SM;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_SWFW_PHY3_SM;
+
+	hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ *  igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the serial gigabit media independent
+ *  interface and stores the retrieved information in data.
+ **/
+static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+					  u16 *data)
+{
+	s32 ret_val = -E1000_ERR_PARAM;
+
+	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+		hw_dbg("PHY Address %u is out of range\n", offset);
+		goto out;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_phy_reg_i2c(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the serial gigabit
+ *  media independent interface.
+ **/
+static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+					   u16 data)
+{
+	s32 ret_val = -E1000_ERR_PARAM;
+
+
+	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+		hw_dbg("PHY Address %d is out of range\n", offset);
+		goto out;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_write_phy_reg_i2c(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_id_82575 - Retrieve PHY addr and id
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieves the PHY address and ID for both PHY's which do and do not use
+ *  sgmi interface.
+ **/
+static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val = 0;
+	u16 phy_id;
+	u32 ctrl_ext;
+	u32 mdic;
+
+	/* Extra read required for some PHY's on i354 */
+	if (hw->mac.type == e1000_i354)
+		igb_get_phy_id(hw);
+
+	/* For SGMII PHYs, we try the list of possible addresses until
+	 * we find one that works.  For non-SGMII PHYs
+	 * (e.g. integrated copper PHYs), an address of 1 should
+	 * work.  The result of this function should mean phy->phy_addr
+	 * and phy->id are set correctly.
+	 */
+	if (!(igb_sgmii_active_82575(hw))) {
+		phy->addr = 1;
+		ret_val = igb_get_phy_id(hw);
+		goto out;
+	}
+
+	if (igb_sgmii_uses_mdio_82575(hw)) {
+		switch (hw->mac.type) {
+		case e1000_82575:
+		case e1000_82576:
+			mdic = rd32(E1000_MDIC);
+			mdic &= E1000_MDIC_PHY_MASK;
+			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+			break;
+		case e1000_82580:
+		case e1000_i350:
+		case e1000_i354:
+		case e1000_i210:
+		case e1000_i211:
+			mdic = rd32(E1000_MDICNFG);
+			mdic &= E1000_MDICNFG_PHY_MASK;
+			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+			break;
+		default:
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		ret_val = igb_get_phy_id(hw);
+		goto out;
+	}
+
+	/* Power on sgmii phy if it is disabled */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+	wrfl();
+	msleep(300);
+
+	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.
+	 * Therefore, we need to test 1-7
+	 */
+	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+		if (ret_val == 0) {
+			hw_dbg("Vendor ID 0x%08X read at address %u\n",
+			       phy_id, phy->addr);
+			/* At the time of this writing, The M88 part is
+			 * the only supported SGMII PHY product.
+			 */
+			if (phy_id == M88_VENDOR)
+				break;
+		} else {
+			hw_dbg("PHY address %u was unreadable\n", phy->addr);
+		}
+	}
+
+	/* A valid PHY type couldn't be found. */
+	if (phy->addr == 8) {
+		phy->addr = 0;
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	} else {
+		ret_val = igb_get_phy_id(hw);
+	}
+
+	/* restore previous sfp cage power state */
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	/* This isn't a true "hard" reset, but is the only reset
+	 * available to us at this time.
+	 */
+
+	hw_dbg("Soft resetting SGMII attached PHY...\n");
+
+	/* SFP documentation requires the following to configure the SPF module
+	 * to work on SGMII.  No further documentation is given.
+	 */
+	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_phy_sw_reset(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+						 data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+						&data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+						 data);
+		if (ret_val)
+			goto out;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+						 data);
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = phy->ops.read_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = phy->ops.read_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u16 data;
+
+	data = rd32(E1000_82580_PHY_POWER_MGMT);
+
+	if (active) {
+		data |= E1000_82580_PM_D0_LPLU;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		data &= ~E1000_82580_PM_SPD;
+	} else {
+		data &= ~E1000_82580_PM_D0_LPLU;
+
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on)
+			data |= E1000_82580_PM_SPD;
+		else if (phy->smart_speed == e1000_smart_speed_off)
+			data &= ~E1000_82580_PM_SPD; }
+
+	wr32(E1000_82580_PHY_POWER_MGMT, data);
+	return 0;
+}
+
+/**
+ *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u16 data;
+
+	data = rd32(E1000_82580_PHY_POWER_MGMT);
+
+	if (!active) {
+		data &= ~E1000_82580_PM_D3_LPLU;
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on)
+			data |= E1000_82580_PM_SPD;
+		else if (phy->smart_speed == e1000_smart_speed_off)
+			data &= ~E1000_82580_PM_SPD;
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_82580_PM_D3_LPLU;
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		data &= ~E1000_82580_PM_SPD;
+	}
+
+	wr32(E1000_82580_PHY_POWER_MGMT, data);
+	return 0;
+}
+
+/**
+ *  igb_acquire_nvm_82575 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_acquire_nvm(hw);
+
+	if (ret_val)
+		hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_nvm_82575 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void igb_release_nvm_82575(struct e1000_hw *hw)
+{
+	igb_release_nvm(hw);
+	hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = 0;
+	s32 i = 0, timeout = 200;
+
+	while (i < timeout) {
+		if (igb_get_hw_semaphore(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = rd32(E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/* Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
+		igb_put_hw_semaphore(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_swfw_sync_82575 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (igb_get_hw_semaphore(hw) != 0)
+		; /* Empty */
+
+	swfw_sync = rd32(E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+}
+
+/**
+ *  igb_get_cfg_done_82575 - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  0.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_NVM_CFG_DONE_PORT_2;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_NVM_CFG_DONE_PORT_3;
+
+	while (timeout) {
+		if (rd32(E1000_EEMNGCTL) & mask)
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	if (!timeout)
+		hw_dbg("MNG configuration cycle has not completed.\n");
+
+	/* If EEPROM is not marked present, init the PHY manually */
+	if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
+	    (hw->phy.type == e1000_phy_igp_3))
+		igb_phy_init_script_igp3(hw);
+
+	return 0;
+}
+
+/**
+ *  igb_get_link_up_info_82575 - Get link speed/duplex info
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  This is a wrapper function, if using the serial gigabit media independent
+ *  interface, use PCS to retrieve the link speed and duplex information.
+ *  Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+					u16 *duplex)
+{
+	s32 ret_val;
+
+	if (hw->phy.media_type != e1000_media_type_copper)
+		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
+							       duplex);
+	else
+		ret_val = igb_get_speed_and_duplex_copper(hw, speed,
+								    duplex);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_link_82575 - Check for link
+ *  @hw: pointer to the HW structure
+ *
+ *  If sgmii is enabled, then use the pcs register to determine link, otherwise
+ *  use the generic interface for determining link.
+ **/
+static s32 igb_check_for_link_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 speed, duplex;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
+							     &duplex);
+		/* Use this flag to determine if link needs to be checked or
+		 * not.  If  we have link clear the flag so that we do not
+		 * continue to check for link.
+		 */
+		hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+		/* Configure Flow Control now that Auto-Neg has completed.
+		 * First, we need to restore the desired flow control
+		 * settings because we may have had to re-autoneg with a
+		 * different link partner.
+		 */
+		ret_val = igb_config_fc_after_link_up(hw);
+		if (ret_val)
+			hw_dbg("Error configuring flow control\n");
+	} else {
+		ret_val = igb_check_for_copper_link(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ *  @hw: pointer to the HW structure
+ **/
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 reg;
+
+
+	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+	    !igb_sgmii_active_82575(hw))
+		return;
+
+	/* Enable PCS to turn on link */
+	reg = rd32(E1000_PCS_CFG0);
+	reg |= E1000_PCS_CFG_PCS_EN;
+	wr32(E1000_PCS_CFG0, reg);
+
+	/* Power up the laser */
+	reg = rd32(E1000_CTRL_EXT);
+	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+	wr32(E1000_CTRL_EXT, reg);
+
+	/* flush the write to verify completion */
+	wrfl();
+	usleep_range(1000, 2000);
+}
+
+/**
+ *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Using the physical coding sub-layer (PCS), retrieve the current speed and
+ *  duplex, then store the values in the pointers provided.
+ **/
+static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
+						u16 *duplex)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 pcs, status;
+
+	/* Set up defaults for the return values of this function */
+	mac->serdes_has_link = false;
+	*speed = 0;
+	*duplex = 0;
+
+	/* Read the PCS Status register for link state. For non-copper mode,
+	 * the status register is not accurate. The PCS status register is
+	 * used instead.
+	 */
+	pcs = rd32(E1000_PCS_LSTAT);
+
+	/* The link up bit determines when link is up on autoneg. The sync ok
+	 * gets set once both sides sync up and agree upon link. Stable link
+	 * can be determined by checking for both link up and link sync ok
+	 */
+	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+		mac->serdes_has_link = true;
+
+		/* Detect and store PCS speed */
+		if (pcs & E1000_PCS_LSTS_SPEED_1000)
+			*speed = SPEED_1000;
+		else if (pcs & E1000_PCS_LSTS_SPEED_100)
+			*speed = SPEED_100;
+		else
+			*speed = SPEED_10;
+
+		/* Detect and store PCS duplex */
+		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
+			*duplex = FULL_DUPLEX;
+		else
+			*duplex = HALF_DUPLEX;
+
+	/* Check if it is an I354 2.5Gb backplane connection. */
+		if (mac->type == e1000_i354) {
+			status = rd32(E1000_STATUS);
+			if ((status & E1000_STATUS_2P5_SKU) &&
+			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
+				*speed = SPEED_2500;
+				*duplex = FULL_DUPLEX;
+				hw_dbg("2500 Mbs, ");
+				hw_dbg("Full Duplex\n");
+			}
+		}
+
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_shutdown_serdes_link_82575 - Remove link during power down
+ *  @hw: pointer to the HW structure
+ *
+ *  In the case of fiber serdes, shut down optics and PCS on driver unload
+ *  when management pass thru is not enabled.
+ **/
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	if (hw->phy.media_type != e1000_media_type_internal_serdes &&
+	    igb_sgmii_active_82575(hw))
+		return;
+
+	if (!igb_enable_mng_pass_thru(hw)) {
+		/* Disable PCS to turn off link */
+		reg = rd32(E1000_PCS_CFG0);
+		reg &= ~E1000_PCS_CFG_PCS_EN;
+		wr32(E1000_PCS_CFG0, reg);
+
+		/* shutdown the laser */
+		reg = rd32(E1000_CTRL_EXT);
+		reg |= E1000_CTRL_EXT_SDP3_DATA;
+		wr32(E1000_CTRL_EXT, reg);
+
+		/* flush the write to verify completion */
+		wrfl();
+		usleep_range(1000, 2000);
+	}
+}
+
+/**
+ *  igb_reset_hw_82575 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 igb_reset_hw_82575(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = igb_disable_pcie_master(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Master disable polling has failed.\n");
+
+	/* set the completion timeout for interface */
+	ret_val = igb_set_pcie_completion_timeout(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Set completion timeout has failed.\n");
+
+	hw_dbg("Masking off all interrupts\n");
+	wr32(E1000_IMC, 0xffffffff);
+
+	wr32(E1000_RCTL, 0);
+	wr32(E1000_TCTL, E1000_TCTL_PSP);
+	wrfl();
+
+	usleep_range(10000, 20000);
+
+	ctrl = rd32(E1000_CTRL);
+
+	hw_dbg("Issuing a global reset to MAC\n");
+	wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	ret_val = igb_get_auto_rd_done(hw);
+	if (ret_val) {
+		/* When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		hw_dbg("Auto Read Done did not complete\n");
+	}
+
+	/* If EEPROM is not present, run manual init scripts */
+	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+		igb_reset_init_script_82575(hw);
+
+	/* Clear any pending interrupt events. */
+	wr32(E1000_IMC, 0xffffffff);
+	rd32(E1000_ICR);
+
+	/* Install any alternate MAC address into RAR0 */
+	ret_val = igb_check_alt_mac_addr(hw);
+
+	return ret_val;
+}
+
+/**
+ *  igb_init_hw_82575 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 igb_init_hw_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+	if ((hw->mac.type >= e1000_i210) &&
+	    !(igb_get_flash_presence_i210(hw))) {
+		ret_val = igb_pll_workaround_i210(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Initialize identification LED */
+	ret_val = igb_id_led_init(hw);
+	if (ret_val) {
+		hw_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	hw_dbg("Initializing the IEEE VLAN\n");
+	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+		igb_clear_vfta_i350(hw);
+	else
+		igb_clear_vfta(hw);
+
+	/* Setup the receive address */
+	igb_init_rx_addrs(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	hw_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		array_wr32(E1000_MTA, i, 0);
+
+	/* Zero out the Unicast HASH table */
+	hw_dbg("Zeroing the UTA\n");
+	for (i = 0; i < mac->uta_reg_count; i++)
+		array_wr32(E1000_UTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = igb_setup_link(hw);
+
+	/* Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	igb_clear_hw_cntrs_82575(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_setup_copper_link_82575 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32  ret_val;
+	u32 phpm_reg;
+
+	ctrl = rd32(E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	wr32(E1000_CTRL, ctrl);
+
+	/* Clear Go Link Disconnect bit on supported devices */
+	switch (hw->mac.type) {
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
+		phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+		wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+		break;
+	default:
+		break;
+	}
+
+	ret_val = igb_setup_serdes_link_82575(hw);
+	if (ret_val)
+		goto out;
+
+	if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+		/* allow time for SFP cage time to power up phy */
+		msleep(300);
+
+		ret_val = hw->phy.ops.reset(hw);
+		if (ret_val) {
+			hw_dbg("Error resetting the PHY.\n");
+			goto out;
+		}
+	}
+	switch (hw->phy.type) {
+	case e1000_phy_i210:
+	case e1000_phy_m88:
+		switch (hw->phy.id) {
+		case I347AT4_E_PHY_ID:
+		case M88E1112_E_PHY_ID:
+		case M88E1543_E_PHY_ID:
+		case I210_I_PHY_ID:
+			ret_val = igb_copper_link_setup_m88_gen2(hw);
+			break;
+		default:
+			ret_val = igb_copper_link_setup_m88(hw);
+			break;
+		}
+		break;
+	case e1000_phy_igp_3:
+		ret_val = igb_copper_link_setup_igp(hw);
+		break;
+	case e1000_phy_82580:
+		ret_val = igb_copper_link_setup_82580(hw);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_setup_copper_link(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_setup_serdes_link_82575 - Setup link for serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
+ *  used on copper connections where the serialized gigabit media independent
+ *  interface (sgmii), or serdes fiber is being used.  Configures the link
+ *  for auto-negotiation or forces speed/duplex.
+ **/
+static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
+	bool pcs_autoneg;
+	s32 ret_val = 0;
+	u16 data;
+
+	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+	    !igb_sgmii_active_82575(hw))
+		return ret_val;
+
+
+	/* On the 82575, SerDes loopback mode persists until it is
+	 * explicitly turned off or a power cycle is performed.  A read to
+	 * the register does not indicate its status.  Therefore, we ensure
+	 * loopback mode is disabled during initialization.
+	 */
+	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+	/* power on the sfp cage if present and turn on I2C */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+	ctrl_ext |= E1000_CTRL_I2C_ENA;
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+
+	ctrl_reg = rd32(E1000_CTRL);
+	ctrl_reg |= E1000_CTRL_SLU;
+
+	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
+		/* set both sw defined pins */
+		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+		/* Set switch control to serdes energy detect */
+		reg = rd32(E1000_CONNSW);
+		reg |= E1000_CONNSW_ENRGSRC;
+		wr32(E1000_CONNSW, reg);
+	}
+
+	reg = rd32(E1000_PCS_LCTL);
+
+	/* default pcs_autoneg to the same setting as mac autoneg */
+	pcs_autoneg = hw->mac.autoneg;
+
+	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+	case E1000_CTRL_EXT_LINK_MODE_SGMII:
+		/* sgmii mode lets the phy handle forcing speed/duplex */
+		pcs_autoneg = true;
+		/* autoneg time out should be disabled for SGMII mode */
+		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+		break;
+	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+		/* disable PCS autoneg and support parallel detect only */
+		pcs_autoneg = false;
+		fallthrough;
+	default:
+		if (hw->mac.type == e1000_82575 ||
+		    hw->mac.type == e1000_82576) {
+			ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+			if (ret_val) {
+				hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
+				return ret_val;
+			}
+
+			if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+				pcs_autoneg = false;
+		}
+
+		/* non-SGMII modes only supports a speed of 1000/Full for the
+		 * link so it is best to just force the MAC and let the pcs
+		 * link either autoneg or be forced to 1000/Full
+		 */
+		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+				E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+		/* set speed of 1000/Full if speed/duplex is forced */
+		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+		break;
+	}
+
+	wr32(E1000_CTRL, ctrl_reg);
+
+	/* New SerDes mode allows for forcing speed or autonegotiating speed
+	 * at 1gb. Autoneg should be default set by most drivers. This is the
+	 * mode that will be compatible with older link partners and switches.
+	 * However, both are supported by the hardware and some drivers/tools.
+	 */
+	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+	if (pcs_autoneg) {
+		/* Set PCS register for autoneg */
+		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+		/* Disable force flow control for autoneg */
+		reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+		/* Configure flow control advertisement for autoneg */
+		anadv_reg = rd32(E1000_PCS_ANADV);
+		anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+		switch (hw->fc.requested_mode) {
+		case e1000_fc_full:
+		case e1000_fc_rx_pause:
+			anadv_reg |= E1000_TXCW_ASM_DIR;
+			anadv_reg |= E1000_TXCW_PAUSE;
+			break;
+		case e1000_fc_tx_pause:
+			anadv_reg |= E1000_TXCW_ASM_DIR;
+			break;
+		default:
+			break;
+		}
+		wr32(E1000_PCS_ANADV, anadv_reg);
+
+		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+	} else {
+		/* Set PCS register for forced link */
+		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
+
+		/* Force flow control for forced link */
+		reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+	}
+
+	wr32(E1000_PCS_LCTL, reg);
+
+	if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
+		igb_force_mac_fc(hw);
+
+	return ret_val;
+}
+
+/**
+ *  igb_sgmii_active_82575 - Return sgmii state
+ *  @hw: pointer to the HW structure
+ *
+ *  82575 silicon has a serialized gigabit media independent interface (sgmii)
+ *  which can be enabled for use in the embedded applications.  Simply
+ *  return the current state of the sgmii interface.
+ **/
+static bool igb_sgmii_active_82575(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+	return dev_spec->sgmii_active;
+}
+
+/**
+ *  igb_reset_init_script_82575 - Inits HW defaults after reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Inits recommended HW defaults after a reset when there is no EEPROM
+ *  detected. This is only for the 82575.
+ **/
+static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
+{
+	if (hw->mac.type == e1000_82575) {
+		hw_dbg("Running reset init script for 82575\n");
+		/* SerDes configuration via SERDESCTRL */
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
+
+		/* CCM configuration via CCMCTL register */
+		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
+
+		/* PCIe lanes configuration */
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+		/* PCIe PLL Configuration */
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_read_mac_addr_82575 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* If there's an alternate MAC address place it in RAR0
+	 * so that it will override the Si installed default perm
+	 * address.
+	 */
+	ret_val = igb_check_alt_mac_addr(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_mac_addr(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * igb_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
+		igb_power_down_phy_copper(hw);
+}
+
+/**
+ *  igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+	igb_clear_hw_cntrs_base(hw);
+
+	rd32(E1000_PRC64);
+	rd32(E1000_PRC127);
+	rd32(E1000_PRC255);
+	rd32(E1000_PRC511);
+	rd32(E1000_PRC1023);
+	rd32(E1000_PRC1522);
+	rd32(E1000_PTC64);
+	rd32(E1000_PTC127);
+	rd32(E1000_PTC255);
+	rd32(E1000_PTC511);
+	rd32(E1000_PTC1023);
+	rd32(E1000_PTC1522);
+
+	rd32(E1000_ALGNERRC);
+	rd32(E1000_RXERRC);
+	rd32(E1000_TNCRS);
+	rd32(E1000_CEXTERR);
+	rd32(E1000_TSCTC);
+	rd32(E1000_TSCTFC);
+
+	rd32(E1000_MGTPRC);
+	rd32(E1000_MGTPDC);
+	rd32(E1000_MGTPTC);
+
+	rd32(E1000_IAC);
+	rd32(E1000_ICRXOC);
+
+	rd32(E1000_ICRXPTC);
+	rd32(E1000_ICRXATC);
+	rd32(E1000_ICTXPTC);
+	rd32(E1000_ICTXATC);
+	rd32(E1000_ICTXQEC);
+	rd32(E1000_ICTXQMTC);
+	rd32(E1000_ICRXDMTC);
+
+	rd32(E1000_CBTMPC);
+	rd32(E1000_HTDPMC);
+	rd32(E1000_CBRMPC);
+	rd32(E1000_RPTHC);
+	rd32(E1000_HGPTC);
+	rd32(E1000_HTCBDPC);
+	rd32(E1000_HGORCL);
+	rd32(E1000_HGORCH);
+	rd32(E1000_HGOTCL);
+	rd32(E1000_HGOTCH);
+	rd32(E1000_LENERRS);
+
+	/* This register should not be read in copper configurations */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+	    igb_sgmii_active_82575(hw))
+		rd32(E1000_SCVPC);
+}
+
+/**
+ *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
+ *  @hw: pointer to the HW structure
+ *
+ *  After rx enable if manageability is enabled then there is likely some
+ *  bad data at the start of the fifo and possibly in the DMA fifo. This
+ *  function clears the fifos and flushes any packets that came in as rx was
+ *  being enabled.
+ **/
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+	int i, ms_wait;
+
+	/* disable IPv6 options as per hardware errata */
+	rfctl = rd32(E1000_RFCTL);
+	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+	wr32(E1000_RFCTL, rfctl);
+
+	if (hw->mac.type != e1000_82575 ||
+	    !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+		return;
+
+	/* Disable all RX queues */
+	for (i = 0; i < 4; i++) {
+		rxdctl[i] = rd32(E1000_RXDCTL(i));
+		wr32(E1000_RXDCTL(i),
+		     rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+	}
+	/* Poll all queues to verify they have shut down */
+	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+		usleep_range(1000, 2000);
+		rx_enabled = 0;
+		for (i = 0; i < 4; i++)
+			rx_enabled |= rd32(E1000_RXDCTL(i));
+		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+			break;
+	}
+
+	if (ms_wait == 10)
+		hw_dbg("Queue disable timed out after 10ms\n");
+
+	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+	 * incoming packets are rejected.  Set enable and wait 2ms so that
+	 * any packet that was coming in as RCTL.EN was set is flushed
+	 */
+	wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+	rlpml = rd32(E1000_RLPML);
+	wr32(E1000_RLPML, 0);
+
+	rctl = rd32(E1000_RCTL);
+	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+	temp_rctl |= E1000_RCTL_LPE;
+
+	wr32(E1000_RCTL, temp_rctl);
+	wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+	wrfl();
+	usleep_range(2000, 3000);
+
+	/* Enable RX queues that were previously enabled and restore our
+	 * previous state
+	 */
+	for (i = 0; i < 4; i++)
+		wr32(E1000_RXDCTL(i), rxdctl[i]);
+	wr32(E1000_RCTL, rctl);
+	wrfl();
+
+	wr32(E1000_RLPML, rlpml);
+	wr32(E1000_RFCTL, rfctl);
+
+	/* Flush receive errors generated by workaround */
+	rd32(E1000_ROC);
+	rd32(E1000_RNBC);
+	rd32(E1000_MPC);
+}
+
+/**
+ *  igb_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 200ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+	u32 gcr = rd32(E1000_GCR);
+	s32 ret_val = 0;
+	u16 pcie_devctl2;
+
+	/* only take action if timeout value is defaulted to 0 */
+	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+		goto out;
+
+	/* if capabilities version is type 1 we can write the
+	 * timeout of 10ms to 200ms through the GCR register
+	 */
+	if (!(gcr & E1000_GCR_CAP_VER2)) {
+		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+		goto out;
+	}
+
+	/* for version 2 capabilities we need to write the config space
+	 * directly in order to set the completion timeout value for
+	 * 16ms to 55ms
+	 */
+	ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+					&pcie_devctl2);
+	if (ret_val)
+		goto out;
+
+	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+	ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+					 &pcie_devctl2);
+out:
+	/* disable completion timeout resend */
+	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+	wr32(E1000_GCR, gcr);
+	return ret_val;
+}
+
+/**
+ *  igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *  @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ *  enables/disables L2 switch anti-spoofing functionality.
+ **/
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+	u32 reg_val, reg_offset;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+		reg_offset = E1000_DTXSWC;
+		break;
+	case e1000_i350:
+	case e1000_i354:
+		reg_offset = E1000_TXSWC;
+		break;
+	default:
+		return;
+	}
+
+	reg_val = rd32(reg_offset);
+	if (enable) {
+		reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+			     E1000_DTXSWC_VLAN_SPOOF_MASK);
+		/* The PF can spoof - it has to in order to
+		 * support emulation mode NICs
+		 */
+		reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+	} else {
+		reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+			     E1000_DTXSWC_VLAN_SPOOF_MASK);
+	}
+	wr32(reg_offset, reg_val);
+}
+
+/**
+ *  igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables L2 switch loopback functionality.
+ **/
+void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+	u32 dtxswc;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+		dtxswc = rd32(E1000_DTXSWC);
+		if (enable)
+			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		else
+			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		wr32(E1000_DTXSWC, dtxswc);
+		break;
+	case e1000_i354:
+	case e1000_i350:
+		dtxswc = rd32(E1000_TXSWC);
+		if (enable)
+			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		else
+			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		wr32(E1000_TXSWC, dtxswc);
+		break;
+	default:
+		/* Currently no other hardware supports loopback */
+		break;
+	}
+
+}
+
+/**
+ *  igb_vmdq_set_replication_pf - enable or disable vmdq replication
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables replication of packets across multiple pools.
+ **/
+void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+	u32 vt_ctl = rd32(E1000_VT_CTL);
+
+	if (enable)
+		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+	else
+		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+	wr32(E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ *  igb_read_phy_reg_82580 - Read 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_82580 - Write 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ *  the values found in the EEPROM.  This addresses an issue in which these
+ *  bits are not restored from EEPROM after reset.
+ **/
+static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 mdicnfg;
+	u16 nvm_data = 0;
+
+	if (hw->mac.type != e1000_82580)
+		goto out;
+	if (!igb_sgmii_active_82575(hw))
+		goto out;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+				   NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+				   &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	mdicnfg = rd32(E1000_MDICNFG);
+	if (nvm_data & NVM_WORD24_EXT_MDIO)
+		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+	if (nvm_data & NVM_WORD24_COM_MDIO)
+		mdicnfg |= E1000_MDICNFG_COM_MDIO;
+	wr32(E1000_MDICNFG, mdicnfg);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_reset_hw_82580 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets function or entire device (all ports, etc.)
+ *  to a known state.
+ **/
+static s32 igb_reset_hw_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	/* BH SW mailbox bit in SW_FW_SYNC */
+	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+	u32 ctrl;
+	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+	hw->dev_spec._82575.global_device_reset = false;
+
+	/* due to hw errata, global device reset doesn't always
+	 * work on 82580
+	 */
+	if (hw->mac.type == e1000_82580)
+		global_device_reset = false;
+
+	/* Get current control state. */
+	ctrl = rd32(E1000_CTRL);
+
+	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = igb_disable_pcie_master(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Master disable polling has failed.\n");
+
+	hw_dbg("Masking off all interrupts\n");
+	wr32(E1000_IMC, 0xffffffff);
+	wr32(E1000_RCTL, 0);
+	wr32(E1000_TCTL, E1000_TCTL_PSP);
+	wrfl();
+
+	usleep_range(10000, 11000);
+
+	/* Determine whether or not a global dev reset is requested */
+	if (global_device_reset &&
+		hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
+			global_device_reset = false;
+
+	if (global_device_reset &&
+		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+		ctrl |= E1000_CTRL_DEV_RST;
+	else
+		ctrl |= E1000_CTRL_RST;
+
+	wr32(E1000_CTRL, ctrl);
+	wrfl();
+
+	/* Add delay to insure DEV_RST has time to complete */
+	if (global_device_reset)
+		usleep_range(5000, 6000);
+
+	ret_val = igb_get_auto_rd_done(hw);
+	if (ret_val) {
+		/* When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		hw_dbg("Auto Read Done did not complete\n");
+	}
+
+	/* clear global device reset status bit */
+	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+	/* Clear any pending interrupt events. */
+	wr32(E1000_IMC, 0xffffffff);
+	rd32(E1000_ICR);
+
+	ret_val = igb_reset_mdicnfg_82580(hw);
+	if (ret_val)
+		hw_dbg("Could not reset MDICNFG based on EEPROM\n");
+
+	/* Install any alternate MAC address into RAR0 */
+	ret_val = igb_check_alt_mac_addr(hw);
+
+	/* Release semaphore */
+	if (global_device_reset)
+		hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
+
+	return ret_val;
+}
+
+/**
+ *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
+ *  @data: data received by reading RXPBS register
+ *
+ *  The 82580 uses a table based approach for packet buffer allocation sizes.
+ *  This function converts the retrieved value into the correct table value
+ *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ *  0x0 36  72 144   1   2   4   8  16
+ *  0x8 35  70 140 rsv rsv rsv rsv rsv
+ */
+u16 igb_rxpbs_adjust_82580(u32 data)
+{
+	u16 ret_val = 0;
+
+	if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
+		ret_val = e1000_82580_rxpbs_table[data];
+
+	return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+						 u16 offset)
+{
+	s32 ret_val = 0;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		hw_dbg("NVM Checksum Invalid\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_with_offset - Update EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+	s32 ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error while updating checksum.\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+				&checksum);
+	if (ret_val)
+		hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 eeprom_regions_count = 1;
+	u16 j, nvm_data;
+	u16 nvm_offset;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+		/* if checksums compatibility bit is set validate checksums
+		 * for all 4 ports.
+		 */
+		eeprom_regions_count = 4;
+	}
+
+	for (j = 0; j < eeprom_regions_count; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_validate_nvm_checksum_with_offset(hw,
+								nvm_offset);
+		if (ret_val != 0)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 j, nvm_data;
+	u16 nvm_offset;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
+		goto out;
+	}
+
+	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+		/* set compatibility bit to validate checksums appropriately */
+		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+					&nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
+			goto out;
+		}
+	}
+
+	for (j = 0; j < 4; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 j;
+	u16 nvm_offset;
+
+	for (j = 0; j < 4; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_validate_nvm_checksum_with_offset(hw,
+								nvm_offset);
+		if (ret_val != 0)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 j;
+	u16 nvm_offset;
+
+	for (j = 0; j < 4; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+		if (ret_val != 0)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  __igb_access_emi_reg - Read/write EMI register
+ *  @hw: pointer to the HW structure
+ *  @addr: EMI address to program
+ *  @data: pointer to value to read/write from/to the EMI address
+ *  @read: boolean flag to indicate read or write
+ **/
+static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
+				  u16 *data, bool read)
+{
+	s32 ret_val = 0;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+	if (ret_val)
+		return ret_val;
+
+	if (read)
+		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+	else
+		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+	return ret_val;
+}
+
+/**
+ *  igb_read_emi_reg - Read Extended Management Interface register
+ *  @hw: pointer to the HW structure
+ *  @addr: EMI address to program
+ *  @data: value to be read from the EMI address
+ **/
+s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+	return __igb_access_emi_reg(hw, addr, data, true);
+}
+
+/**
+ *  igb_set_eee_i350 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @adv1G: boolean flag enabling 1G EEE advertisement
+ *  @adv100m: boolean flag enabling 100M EEE advertisement
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
+{
+	u32 ipcnfg, eeer;
+
+	if ((hw->mac.type < e1000_i350) ||
+	    (hw->phy.media_type != e1000_media_type_copper))
+		goto out;
+	ipcnfg = rd32(E1000_IPCNFG);
+	eeer = rd32(E1000_EEER);
+
+	/* enable or disable per user setting */
+	if (!(hw->dev_spec._82575.eee_disable)) {
+		u32 eee_su = rd32(E1000_EEE_SU);
+
+		if (adv100M)
+			ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
+		else
+			ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
+
+		if (adv1G)
+			ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
+		else
+			ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
+
+		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+			E1000_EEER_LPI_FC);
+
+		/* This bit should not be set in normal operation. */
+		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+			hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
+	} else {
+		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+			E1000_IPCNFG_EEE_100M_AN);
+		eeer &= ~(E1000_EEER_TX_LPI_EN |
+			E1000_EEER_RX_LPI_EN |
+			E1000_EEER_LPI_FC);
+	}
+	wr32(E1000_IPCNFG, ipcnfg);
+	wr32(E1000_EEER, eeer);
+	rd32(E1000_IPCNFG);
+	rd32(E1000_EEER);
+out:
+
+	return 0;
+}
+
+/**
+ *  igb_set_eee_i354 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @adv1G: boolean flag enabling 1G EEE advertisement
+ *  @adv100m: boolean flag enabling 100M EEE advertisement
+ *
+ *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data;
+
+	if ((hw->phy.media_type != e1000_media_type_copper) ||
+	    (phy->id != M88E1543_E_PHY_ID))
+		goto out;
+
+	if (!hw->dev_spec._82575.eee_disable) {
+		/* Switch to PHY page 18. */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+					     phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Return the PHY to page 0. */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+		if (ret_val)
+			goto out;
+
+		/* Turn on EEE advertisement. */
+		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+					     E1000_EEE_ADV_DEV_I354,
+					     &phy_data);
+		if (ret_val)
+			goto out;
+
+		if (adv100M)
+			phy_data |= E1000_EEE_ADV_100_SUPPORTED;
+		else
+			phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
+
+		if (adv1G)
+			phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
+		else
+			phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
+
+		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+						E1000_EEE_ADV_DEV_I354,
+						phy_data);
+	} else {
+		/* Turn off EEE advertisement. */
+		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+					     E1000_EEE_ADV_DEV_I354,
+					     &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+			      E1000_EEE_ADV_1000_SUPPORTED);
+		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+					      E1000_EEE_ADV_DEV_I354,
+					      phy_data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_eee_status_i354 - Get EEE status
+ *  @hw: pointer to the HW structure
+ *  @status: EEE status
+ *
+ *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ *  been received.
+ **/
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data;
+
+	/* Check if EEE is supported on this device. */
+	if ((hw->phy.media_type != e1000_media_type_copper) ||
+	    (phy->id != M88E1543_E_PHY_ID))
+		goto out;
+
+	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+				     E1000_PCS_STATUS_DEV_I354,
+				     &phy_data);
+	if (ret_val)
+		goto out;
+
+	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+	return ret_val;
+}
+
+static const u8 e1000_emc_temp_data[4] = {
+	E1000_EMC_INTERNAL_DATA,
+	E1000_EMC_DIODE1_DATA,
+	E1000_EMC_DIODE2_DATA,
+	E1000_EMC_DIODE3_DATA
+};
+static const u8 e1000_emc_therm_limit[4] = {
+	E1000_EMC_INTERNAL_THERM_LIMIT,
+	E1000_EMC_DIODE1_THERM_LIMIT,
+	E1000_EMC_DIODE2_THERM_LIMIT,
+	E1000_EMC_DIODE3_THERM_LIMIT
+};
+
+#ifdef CONFIG_IGB_HWMON
+/**
+ *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *
+ *  Updates the temperatures in mac.thermal_sensor_data
+ **/
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+{
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  num_sensors;
+	u8  sensor_index;
+	u8  sensor_location;
+	u8  i;
+	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+		return E1000_NOT_IMPLEMENTED;
+
+	data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
+
+	/* Return the internal sensor only if ETS is unsupported */
+	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+		return 0;
+
+	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+	    != NVM_ETS_TYPE_EMC)
+		return E1000_NOT_IMPLEMENTED;
+
+	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+	if (num_sensors > E1000_MAX_SENSORS)
+		num_sensors = E1000_MAX_SENSORS;
+
+	for (i = 1; i < num_sensors; i++) {
+		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+				NVM_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+				   NVM_ETS_DATA_LOC_SHIFT);
+
+		if (sensor_location != 0)
+			hw->phy.ops.read_i2c_byte(hw,
+					e1000_emc_temp_data[sensor_index],
+					E1000_I2C_THERMAL_SENSOR_ADDR,
+					&data->sensor[i].temp);
+	}
+	return 0;
+}
+
+/**
+ *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the thermal sensor thresholds according to the NVM map
+ *  and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+{
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  low_thresh_delta;
+	u8  num_sensors;
+	u8  sensor_index;
+	u8  sensor_location;
+	u8  therm_limit;
+	u8  i;
+	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+		return E1000_NOT_IMPLEMENTED;
+
+	memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
+
+	data->sensor[0].location = 0x1;
+	data->sensor[0].caution_thresh =
+		(rd32(E1000_THHIGHTC) & 0xFF);
+	data->sensor[0].max_op_thresh =
+		(rd32(E1000_THLOWTC) & 0xFF);
+
+	/* Return the internal sensor only if ETS is unsupported */
+	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+		return 0;
+
+	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+	    != NVM_ETS_TYPE_EMC)
+		return E1000_NOT_IMPLEMENTED;
+
+	low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+			    NVM_ETS_LTHRES_DELTA_SHIFT);
+	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+	for (i = 1; i <= num_sensors; i++) {
+		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+				NVM_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+				   NVM_ETS_DATA_LOC_SHIFT);
+		therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+		hw->phy.ops.write_i2c_byte(hw,
+			e1000_emc_therm_limit[sensor_index],
+			E1000_I2C_THERMAL_SENSOR_ADDR,
+			therm_limit);
+
+		if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
+			data->sensor[i].location = sensor_location;
+			data->sensor[i].caution_thresh = therm_limit;
+			data->sensor[i].max_op_thresh = therm_limit -
+							low_thresh_delta;
+		}
+	}
+	return 0;
+}
+
+#endif
+static struct e1000_mac_operations e1000_mac_ops_82575 = {
+	.init_hw              = igb_init_hw_82575,
+	.check_for_link       = igb_check_for_link_82575,
+	.rar_set              = igb_rar_set,
+	.read_mac_addr        = igb_read_mac_addr_82575,
+	.get_speed_and_duplex = igb_get_link_up_info_82575,
+#ifdef CONFIG_IGB_HWMON
+	.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
+	.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
+#endif
+};
+
+static struct e1000_phy_operations e1000_phy_ops_82575 = {
+	.acquire              = igb_acquire_phy_82575,
+	.get_cfg_done         = igb_get_cfg_done_82575,
+	.release              = igb_release_phy_82575,
+	.write_i2c_byte       = igb_write_i2c_byte,
+	.read_i2c_byte        = igb_read_i2c_byte,
+};
+
+static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
+	.acquire              = igb_acquire_nvm_82575,
+	.read                 = igb_read_nvm_eerd,
+	.release              = igb_release_nvm_82575,
+	.write                = igb_write_nvm_spi,
+};
+
+const struct e1000_info e1000_82575_info = {
+	.get_invariants = igb_get_invariants_82575,
+	.mac_ops = &e1000_mac_ops_82575,
+	.phy_ops = &e1000_phy_ops_82575,
+	.nvm_ops = &e1000_nvm_ops_82575,
+};
+
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_nvm.c	2022-03-21 12:58:29.412888737 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_defines.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+#include "e1000_mac.h"
+#include "e1000_nvm.h"
+
+/**
+ *  igb_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	wr32(E1000_EECD, *eecd);
+	wrfl();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  igb_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	wr32(E1000_EECD, *eecd);
+	wrfl();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  igb_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+	u32 mask;
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		wr32(E1000_EECD, eecd);
+		wrfl();
+
+		udelay(nvm->delay_usec);
+
+		igb_raise_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	wr32(E1000_EECD, eecd);
+}
+
+/**
+ *  igb_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+	eecd = rd32(E1000_EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		igb_raise_eec_clk(hw, &eecd);
+
+		eecd = rd32(E1000_EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		igb_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = rd32(E1000_EERD);
+		else
+			reg = rd32(E1000_EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE) {
+			ret_val = 0;
+			break;
+		}
+
+		udelay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_acquire_nvm - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 igb_acquire_nvm(struct e1000_hw *hw)
+{
+	u32 eecd = rd32(E1000_EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+	s32 ret_val = 0;
+
+
+	wr32(E1000_EECD, eecd | E1000_EECD_REQ);
+	eecd = rd32(E1000_EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		udelay(5);
+		eecd = rd32(E1000_EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		wr32(E1000_EECD, eecd);
+		hw_dbg("Could not acquire NVM grant\n");
+		ret_val = -E1000_ERR_NVM;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void igb_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		wr32(E1000_EECD, eecd);
+		wrfl();
+		udelay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		wr32(E1000_EECD, eecd);
+		wrfl();
+		udelay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	eecd = rd32(E1000_EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		igb_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  igb_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void igb_release_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	e1000_stop_nvm(hw);
+
+	eecd = rd32(E1000_EECD);
+	eecd &= ~E1000_EECD_REQ;
+	wr32(E1000_EECD, eecd);
+}
+
+/**
+ *  igb_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+	s32 ret_val = 0;
+	u16 timeout = 0;
+	u8 spi_stat_reg;
+
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		wr32(E1000_EECD, eecd);
+		wrfl();
+		udelay(1);
+		timeout = NVM_MAX_RETRY_SPI;
+
+		/* Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out.
+		 */
+		while (timeout) {
+			igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+					       hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			udelay(5);
+			igb_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			hw_dbg("SPI NVM Status error\n");
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i = 0;
+	s32 ret_val;
+	u16 word_in;
+	u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = nvm->ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	igb_standby_nvm(hw);
+
+	if ((nvm->address_bits == 8) && (offset >= 128))
+		read_opcode |= NVM_A8_OPCODE_SPI;
+
+	/* Send the READ command (opcode + addr) */
+	igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+	igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+	/* Read the data.  SPI NVMs increment the address with each byte
+	 * read and will roll over if reading beyond the end.  This allows
+	 * us to read the whole NVM from any offset
+	 */
+	for (i = 0; i < words; i++) {
+		word_in = igb_shift_in_eec_bits(hw, 16);
+		data[i] = (word_in >> 8) | (word_in << 8);
+	}
+
+release:
+	nvm->ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = 0;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+			E1000_NVM_RW_REG_START;
+
+		wr32(E1000_EERD, eerd);
+		ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (rd32(E1000_EERD) >>
+			E1000_NVM_RW_REG_DATA);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val = -E1000_ERR_NVM;
+	u16 widx = 0;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		return ret_val;
+	}
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = nvm->ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = igb_ready_nvm_eeprom(hw);
+		if (ret_val) {
+			nvm->ops.release(hw);
+			return ret_val;
+		}
+
+		igb_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+					 nvm->opcode_bits);
+
+		igb_standby_nvm(hw);
+
+		/* Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+					 nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+
+			word_out = (word_out >> 8) | (word_out << 8);
+			igb_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				igb_standby_nvm(hw);
+				break;
+			}
+		}
+		usleep_range(1000, 2000);
+		nvm->ops.release(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_read_part_string - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @part_num: pointer to device part number
+ *  @part_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in part_num.
+ **/
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
+{
+	s32 ret_val;
+	u16 nvm_data;
+	u16 pointer;
+	u16 offset;
+	u16 length;
+
+	if (part_num == NULL) {
+		hw_dbg("PBA string buffer was null\n");
+		ret_val = E1000_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	/* if nvm_data is not ptr guard the PBA must be in legacy format which
+	 * means pointer is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (nvm_data != NVM_PBA_PTR_GUARD) {
+		hw_dbg("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (part_num_size < 11) {
+			hw_dbg("PBA string buffer too small\n");
+			return E1000_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pointer */
+		part_num[0] = (nvm_data >> 12) & 0xF;
+		part_num[1] = (nvm_data >> 8) & 0xF;
+		part_num[2] = (nvm_data >> 4) & 0xF;
+		part_num[3] = nvm_data & 0xF;
+		part_num[4] = (pointer >> 12) & 0xF;
+		part_num[5] = (pointer >> 8) & 0xF;
+		part_num[6] = '-';
+		part_num[7] = 0;
+		part_num[8] = (pointer >> 4) & 0xF;
+		part_num[9] = pointer & 0xF;
+
+		/* put a null character on the end of our string */
+		part_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (part_num[offset] < 0xA)
+				part_num[offset] += '0';
+			else if (part_num[offset] < 0x10)
+				part_num[offset] += 'A' - 0xA;
+		}
+
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		hw_dbg("NVM PBA number section invalid length\n");
+		ret_val = E1000_ERR_NVM_PBA_SECTION;
+		goto out;
+	}
+	/* check if part_num buffer is big enough */
+	if (part_num_size < (((u32)length * 2) - 1)) {
+		hw_dbg("PBA string buffer too small\n");
+		ret_val = E1000_ERR_NO_SPACE;
+		goto out;
+	}
+
+	/* trim pba length from start of string */
+	pointer++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		part_num[offset * 2] = (u8)(nvm_data >> 8);
+		part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+	}
+	part_num[offset * 2] = '\0';
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_mac_addr - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 igb_read_mac_addr(struct e1000_hw *hw)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	rar_high = rd32(E1000_RAH(0));
+	rar_low = rd32(E1000_RAL(0));
+
+	for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+	for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+	return 0;
+}
+
+/**
+ *  igb_validate_nvm_checksum - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		hw_dbg("NVM Checksum Invalid\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 igb_update_nvm_checksum(struct e1000_hw *hw)
+{
+	s32  ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error while updating checksum.\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val)
+		hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_fw_version - Get firmware version information
+ *  @hw: pointer to the HW structure
+ *  @fw_vers: pointer to output structure
+ *
+ *  unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+	u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+	u8 q, hval, rem, result;
+	u16 comb_verh, comb_verl, comb_offset;
+
+	memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+	/* basic eeprom version numbers and bits used vary by part and by tool
+	 * used to create the nvm images. Check which data format we have.
+	 */
+	hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+	switch (hw->mac.type) {
+	case e1000_i211:
+		igb_read_invm_version(hw, fw_vers);
+		return;
+	case e1000_82575:
+	case e1000_82576:
+	case e1000_82580:
+		/* Use this format, unless EETRACK ID exists,
+		 * then use alternate format
+		 */
+		if ((etrack_test &  NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+			hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+			fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+					      >> NVM_MAJOR_SHIFT;
+			fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+					      >> NVM_MINOR_SHIFT;
+			fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+			goto etrack_id;
+		}
+		break;
+	case e1000_i210:
+		if (!(igb_get_flash_presence_i210(hw))) {
+			igb_read_invm_version(hw, fw_vers);
+			return;
+		}
+		fallthrough;
+	case e1000_i350:
+		/* find combo image version */
+		hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+		if ((comb_offset != 0x0) &&
+		    (comb_offset != NVM_VER_INVALID)) {
+
+			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+					 + 1), 1, &comb_verh);
+			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+					 1, &comb_verl);
+
+			/* get Option Rom version if it exists and is valid */
+			if ((comb_verh && comb_verl) &&
+			    ((comb_verh != NVM_VER_INVALID) &&
+			     (comb_verl != NVM_VER_INVALID))) {
+
+				fw_vers->or_valid = true;
+				fw_vers->or_major =
+					comb_verl >> NVM_COMB_VER_SHFT;
+				fw_vers->or_build =
+					(comb_verl << NVM_COMB_VER_SHFT)
+					| (comb_verh >> NVM_COMB_VER_SHFT);
+				fw_vers->or_patch =
+					comb_verh & NVM_COMB_VER_MASK;
+			}
+		}
+		break;
+	default:
+		return;
+	}
+	hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+	fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+			      >> NVM_MAJOR_SHIFT;
+
+	/* check for old style version format in newer images*/
+	if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+		eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+	} else {
+		eeprom_verl = (fw_version & NVM_MINOR_MASK)
+				>> NVM_MINOR_SHIFT;
+	}
+	/* Convert minor value to hex before assigning to output struct
+	 * Val to be converted will not be higher than 99, per tool output
+	 */
+	q = eeprom_verl / NVM_HEX_CONV;
+	hval = q * NVM_HEX_TENS;
+	rem = eeprom_verl % NVM_HEX_CONV;
+	result = hval + rem;
+	fw_vers->eep_minor = result;
+
+etrack_id:
+	if ((etrack_test &  NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+		hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+		hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+		fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+			| eeprom_verl;
+	}
+}
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_defines.h	2022-03-21 12:58:29.407888786 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/igb_hwmon.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_SDP2_DIR  0x00000400 /* SDP2 Data direction */
+#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* SDP3 Data direction */
+
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD	0x00004000
+#define E1000_CTRL_EXT_SDLPE	0X00040000  /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK	0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES	0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX	0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII	0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII	0x00000000
+#define E1000_CTRL_EXT_EIAME	0x01000000
+#define E1000_CTRL_EXT_IRCA		0x00000001
+/* Interrupt delay cancellation */
+/* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000
+/* Interrupt acknowledge Auto-mask */
+/* Clear Interrupt timers after IMS clear */
+/* packet buffer parity error detection enabled */
+/* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_PBA_CLR		0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN		0x00100000
+#define E1000_I2CCMD_REG_ADDR_SHIFT	16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT	24
+#define E1000_I2CCMD_OPCODE_READ	0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE	0x00000000
+#define E1000_I2CCMD_READY		0x20000000
+#define E1000_I2CCMD_ERROR		0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a)	(0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a)	(0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR	255
+#define E1000_I2CCMD_PHY_TIMEOUT	200
+#define E1000_IVAR_VALID		0x80
+#define E1000_GPIE_NSICR		0x00000001
+#define E1000_GPIE_MSIX_MODE		0x00000010
+#define E1000_GPIE_EIAME		0x40000000
+#define E1000_GPIE_PBA			0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_TS       0x10000 /* Pkt was time stamped */
+
+#define E1000_RXDEXT_STATERR_LB    0x00040000
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+	E1000_RXDEXT_STATERR_CE  |            \
+	E1000_RXDEXT_STATERR_SE  |            \
+	E1000_RXDEXT_STATERR_SEQ |            \
+	E1000_RXDEXT_STATERR_CXE |            \
+	E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_EN_BMC2OS     0x10000000 /* OSBMC is Enabled or not */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+
+/* Receive Control */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_DPF            0x00400000    /* Discard Pause Frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_PHY2_SM  0x20
+#define E1000_SWFW_PHY3_SM  0x40
+
+/* FACTPS Definitions */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+/* Defined polarity of Dock/Undock indication in SDP[0] */
+/* Reset both PHY ports, through PHYRST_N pin */
+/* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
+#define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+/* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_CONNSW_PHYSD		0x400
+#define E1000_CONNSW_PHY_PDN		0x800
+#define E1000_CONNSW_SERDESD		0x200
+#define E1000_CONNSW_AUTOSENSE_CONF	0x2
+#define E1000_CONNSW_AUTOSENSE_EN	0x1
+#define E1000_PCS_CFG_PCS_EN             8
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL       0x80
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+/* Change in Dock/Undock state. Clear on write '0'. */
+/* Status of Master requests. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
+/* BMC external code execution disabled */
+
+#define E1000_STATUS_2P5_SKU		0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER	0x00002000 /* Val of 2.5GBE SKU Over */
+/* Constants used to intrepret the masked PCI-X bus speed. */
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define SPEED_2500  2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF  |  ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+						      ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      (ADVERTISE_10_HALF  |  ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED     (ADVERTISE_10_HALF  |  ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX  (ADVERTISE_10_FULL  |  ADVERTISE_100_FULL | \
+						      ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX  (ADVERTISE_10_HALF  |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_SHIFT	0
+#define E1000_LEDCTL_LED0_BLINK		0x00000080
+#define E1000_LEDCTL_LED0_MODE_MASK	0x0000000F
+#define E1000_LEDCTL_LED0_IVRT		0x00000040
+
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coal Rx Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT       16
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe trans */
+#define E1000_DMACR_DMAC_LX_SHIFT       28
+#define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN	0x00008000
+
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coal Tx Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx pkt rate curr window */
+
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* FC Rx Thresh High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT      4
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
+
+/* Timestamp in Rx buffer */
+#define E1000_RXPBS_CFG_TS_EN           0x80000000
+
+#define I210_RXPBSIZE_DEFAULT		0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT		0x04000014 /* TXPBSIZE default */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_LEF                 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* PBA constants */
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_64K 0x0040    /* 64KB */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_TS            0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED  0x80000000
+/* LAN connected device generates an interrupt */
+#define E1000_ICR_DOUTSYNC      0x10000000 /* NIC DMA out of sync */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+	E1000_IMS_RXT0   |    \
+	E1000_IMS_TXDW   |    \
+	E1000_IMS_RXDMT0 |    \
+	E1000_IMS_RXSEQ  |    \
+	E1000_IMS_LSC    |    \
+	E1000_IMS_DOUTSYNC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
+#define E1000_IMS_TS        E1000_ICR_TS        /* Time Sync Interrupt */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */
+
+/* Extended Interrupt Cause Set */
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR     0x80000000 /* Don't reset counters on write */
+
+
+/* Transmit Descriptor Control */
+/* Enable the counting of descriptors still to be processed. */
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* Transmit Config Word */
+#define E1000_TXCW_ASM_DIR	0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE	0x00000080 /* TXCW sym pause request */
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_POOL_MASK 0x03FC0000
+#define E1000_RAH_POOL_1 0x00040000
+
+/* Error Codes */
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX      15
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
+#define E1000_ERR_INVM_VALUE_NOT_FOUND	19
+#define E1000_ERR_I2C               20
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+#define E1000_TSYNCTXCTL_VALID    0x00000001 /* tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable tx timestampping */
+
+#define E1000_TSYNCRXCTL_VALID      0x00000001 /* rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
+#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable rx timestampping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+
+/* Time Sync Interrupt Cause/Mask Register Bits */
+
+#define TSINTR_SYS_WRAP  (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS      (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_RXTS      (1 << 2) /* Receive Timestamp. */
+#define TSINTR_TT0       (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1       (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0     (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1     (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ      (1 << 7) /* Time Adjust Done. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+#define E1000_TSICR_TXTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0    (1 << 0)  /* Enable target time 0. */
+#define TSAUXC_EN_TT1    (1 << 1)  /* Enable target time 1. */
+#define TSAUXC_EN_CLK0   (1 << 2)  /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 (1 << 3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0       (1 << 4)  /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1   (1 << 5)  /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 (1 << 6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1       (1 << 7)  /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0    (1 << 8)  /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0     (1 << 9)  /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1    (1 << 10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1     (1 << 11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG      (1 << 17) /* Generate a pulse. */
+#define TSAUXC_DISABLE   (1 << 31) /* Disable SYSTIM Count Operation. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0    (0 << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1    (1 << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2    (2 << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3    (3 << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN   (1 << 2)  /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0    (0 << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1    (1 << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2    (2 << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3    (3 << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN   (1 << 5)  /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0  (0 << 6)  /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1  (1 << 6)  /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0  (2 << 6)  /* Freq clock  0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1  (3 << 6)  /* Freq clock  1 is output on SDP0. */
+#define TS_SDP0_EN       (1 << 8)  /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0  (0 << 9)  /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1  (1 << 9)  /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0  (2 << 9)  /* Freq clock  0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1  (3 << 9)  /* Freq clock  1 is output on SDP1. */
+#define TS_SDP1_EN       (1 << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0  (0 << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1  (1 << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0  (2 << 12) /* Freq clock  0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1  (3 << 12) /* Freq clock  1 is output on SDP2. */
+#define TS_SDP2_EN       (1 << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0  (0 << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1  (1 << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0  (2 << 15) /* Freq clock  0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1  (3 << 15) /* Freq clock  1 is output on SDP3. */
+#define TS_SDP3_EN       (1 << 17) /* SDP3 is assigned to Tsync. */
+
+#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK    0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT   21
+
+#define E1000_MEDIA_PORT_COPPER			1
+#define E1000_MEDIA_PORT_OTHER			2
+#define E1000_M88E1112_AUTO_COPPER_SGMII	0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX	0x3
+#define E1000_M88E1112_STATUS_LINK		0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1		0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK	0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT	7
+#define E1000_M88E1112_PAGE_ADDR		0x16
+#define E1000_M88E1112_STATUS			0x01
+
+/* PCI Express Control */
+#define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define E1000_GCR_CAP_VER2              0x00040000
+
+/* mPHY Address Control and Data Registers */
+#define E1000_MPHY_ADDR_CTL          0x0024 /* mPHY Address Control Register */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA                 0x0E10 /* mPHY Data Register */
+
+/* mPHY PCS CLK Register */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET  0x0004 /* mPHY PCS CLK AFE CSR Offset */
+/* mPHY Near End Digital Loopback Override Bit */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+#define E1000_PCS_LCTL_FORCE_FCTRL	0x80
+#define E1000_PCS_LSTS_AN_COMPLETE	0x10000
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* Autoneg Expansion Register */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+					/* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+					/* 0=Automatic Master/Slave config */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Register */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_FLUPD_I210		0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210		0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210	0x00080000 /* FLASH detected */
+#define E1000_FLUDONE_ATTEMPTS		20000
+#define E1000_EERD_EEWR_MAX_COUNT	512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX		0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i)	(0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY	E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX	0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX	0x01
+#define E1000_I210_FLASH_SECTOR_SIZE	0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK		0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET	328
+#define E1000_EECD_FLUPD_I210		0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210		0x04000000 /* Update FLASH done*/
+#define E1000_FLUDONE_ATTEMPTS		20000
+#define E1000_EERD_EEWR_MAX_COUNT	512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX		0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i)	(0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY	E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX	0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX	0x01
+
+
+/* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DATA   16
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004 /* SERDES output amplitude */
+#define NVM_VERSION                0x0005
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+#define NVM_COMPATIBILITY_REG_3    0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+#define NVM_MAC_ADDR               0x0000
+#define NVM_SUB_DEV_ID             0x000B
+#define NVM_SUB_VEN_ID             0x000C
+#define NVM_DEV_ID                 0x000D
+#define NVM_VEN_ID                 0x000E
+#define NVM_INIT_CTRL_2            0x000F
+#define NVM_INIT_CTRL_4            0x0013
+#define NVM_LED_1_CFG              0x001C
+#define NVM_LED_0_2_CFG            0x001F
+#define NVM_ETRACK_WORD            0x0042
+#define NVM_ETRACK_HIWORD          0x0043
+#define NVM_COMB_VER_OFF           0x0083
+#define NVM_COMB_VER_PTR           0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK			0xF000
+#define NVM_MINOR_MASK			0x0FF0
+#define NVM_IMAGE_ID_MASK		0x000F
+#define NVM_COMB_VER_MASK		0x00FF
+#define NVM_MAJOR_SHIFT			12
+#define NVM_MINOR_SHIFT			4
+#define NVM_COMB_VER_SHFT		8
+#define NVM_VER_INVALID			0xFFFF
+#define NVM_ETRACK_SHIFT		16
+#define NVM_ETRACK_VALID		0x8000
+#define NVM_NEW_DEC_MASK		0x0F00
+#define NVM_HEX_CONV			16
+#define NVM_HEX_TENS			10
+
+#define NVM_ETS_CFG			0x003E
+#define NVM_ETS_LTHRES_DELTA_MASK	0x07C0
+#define NVM_ETS_LTHRES_DELTA_SHIFT	6
+#define NVM_ETS_TYPE_MASK		0x0038
+#define NVM_ETS_TYPE_SHIFT		3
+#define NVM_ETS_TYPE_EMC		0x000
+#define NVM_ETS_NUM_SENSORS_MASK	0x0007
+#define NVM_ETS_DATA_LOC_MASK		0x3C00
+#define NVM_ETS_DATA_LOC_SHIFT		10
+#define NVM_ETS_DATA_INDEX_MASK		0x0300
+#define NVM_ETS_DATA_INDEX_SHIFT	8
+#define NVM_ETS_DATA_HTHRESH_MASK	0x00FF
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2  0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3  0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_ASM_DIR          0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+
+/* length of string needed to store part num */
+#define E1000_PBANUM_LENGTH         11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_RESERVED_WORD		0xFFFF
+#define NVM_PBA_PTR_GUARD          0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+
+/* NVM Commands - Microwire */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+			      (ID_LED_OFF1_OFF2 <<  8) | \
+			      (ID_LED_DEF1_DEF2 <<  4) | \
+			      (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIE_DEVICE_CONTROL2         0x28
+#define PCIE_DEVICE_CONTROL2_16ms    0x0005
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define M88E1112_E_PHY_ID    0x01410C90
+#define I347AT4_E_PHY_ID     0x01410DC0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define I82580_I_PHY_ID      0x015403A0
+#define I350_I_PHY_ID        0x015403B0
+#define M88_VENDOR           0x0141
+#define I210_I_PHY_ID        0x01410C00
+#define M88E1543_E_PHY_ID    0x01410EA0
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+					       /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+/* 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+
+/* Intel i347-AT4 Registers */
+
+#define I347AT4_PCDL                   0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC                   0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT            0x16
+
+/* i347-AT4 Extended PHY Specific Control Register */
+
+/*  Number of times we will attempt to autonegotiate before downshifting if we
+ *  are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK   0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X     0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X     0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X     0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X     0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X     0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X     0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X     0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X     0x7000
+
+/* i347-AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* Marvell 1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE       0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+#define E1000_MDIC_DEST      0x80000000
+
+/* Thermal Sensor */
+#define E1000_THSTAT_PWR_DOWN       0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE  0x00000002 /* Link Speed Throttle Event */
+
+/* Energy Efficient Ethernet */
+#define E1000_IPCNFG_EEE_1G_AN       0x00000008  /* EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN     0x00000004  /* EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN         0x00010000  /* EEE Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN         0x00020000  /* EEE Rx LPI Enable */
+#define E1000_EEER_FRC_AN            0x10000000  /* Enable EEE in loopback */
+#define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
+#define E1000_EEE_SU_LPI_CLK_STP     0X00800000  /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG           0x20000000  /* EEE capability nego */
+#define E1000_EEE_LP_ADV_ADDR_I350   0x040F      /* EEE LP Advertisement */
+#define E1000_EEE_LP_ADV_DEV_I210    7           /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210   61          /* EEE LP Adv Register */
+#define E1000_MMDAC_FUNC_DATA        0x4000      /* Data, no post increment */
+#define E1000_M88E1543_PAGE_ADDR	0x16       /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1	0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS	0x0001     /* EEE Master/Slave */
+#define E1000_EEE_ADV_DEV_I354		7
+#define E1000_EEE_ADV_ADDR_I354		60
+#define E1000_EEE_ADV_100_SUPPORTED	(1 << 1)   /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED	(1 << 2)   /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354	3
+#define E1000_PCS_STATUS_ADDR_I354	1
+#define E1000_PCS_STATUS_TX_LPI_IND	0x0200     /* Tx in LPI state */
+#define E1000_PCS_STATUS_RX_LPI_RCVD	0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD	0x0800
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+/* DMA Coalescing register fields */
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power on DMA coal */
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA		0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK	0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT	14
+#define E1000_RTTBCNRC_RF_INT_MASK	\
+	(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/igb_hwmon.c	2022-03-21 12:58:29.402888835 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_82575.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_IGB_HWMON
+static struct i2c_board_info i350_sensor_info = {
+	I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	return sprintf(buf, "loc%u\n",
+		       igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	unsigned int value;
+
+	/* reset the temp field */
+	igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+	value = igb_attr->sensor->temp;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	unsigned int value = igb_attr->sensor->caution_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	unsigned int value = igb_attr->sensor->max_op_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+			      unsigned int offset, int type)
+{
+	int rc;
+	unsigned int n_attr;
+	struct hwmon_attr *igb_attr;
+
+	n_attr = adapter->igb_hwmon_buff->n_hwmon;
+	igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
+
+	switch (type) {
+	case IGB_HWMON_TYPE_LOC:
+		igb_attr->dev_attr.show = igb_hwmon_show_location;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_label", offset + 1);
+		break;
+	case IGB_HWMON_TYPE_TEMP:
+		igb_attr->dev_attr.show = igb_hwmon_show_temp;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_input", offset + 1);
+		break;
+	case IGB_HWMON_TYPE_CAUTION:
+		igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_max", offset + 1);
+		break;
+	case IGB_HWMON_TYPE_MAX:
+		igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_crit", offset + 1);
+		break;
+	default:
+		rc = -EPERM;
+		return rc;
+	}
+
+	/* These always the same regardless of type */
+	igb_attr->sensor =
+		&adapter->hw.mac.thermal_sensor_data.sensor[offset];
+	igb_attr->hw = &adapter->hw;
+	igb_attr->dev_attr.store = NULL;
+	igb_attr->dev_attr.attr.mode = S_IRUGO;
+	igb_attr->dev_attr.attr.name = igb_attr->name;
+	sysfs_attr_init(&igb_attr->dev_attr.attr);
+
+	adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+	++adapter->igb_hwmon_buff->n_hwmon;
+
+	return 0;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+	igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+	struct hwmon_buff *igb_hwmon;
+	struct i2c_client *client;
+	struct device *hwmon_dev;
+	unsigned int i;
+	int rc = 0;
+
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+		goto exit;
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+	if (rc)
+		goto exit;
+
+	igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+				 GFP_KERNEL);
+	if (!igb_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+	adapter->igb_hwmon_buff = igb_hwmon;
+
+	for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+		/* Only create hwmon sysfs entries for sensors that have
+		 * meaningful data.
+		 */
+		if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+		if (rc)
+			goto exit;
+	}
+
+	/* init i2c_client */
+	client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+	if (client == NULL) {
+		dev_info(&adapter->pdev->dev,
+			 "Failed to create new i2c device.\n");
+		rc = -ENODEV;
+		goto exit;
+	}
+	adapter->i2c_client = client;
+
+	igb_hwmon->groups[0] = &igb_hwmon->group;
+	igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+							   client->name,
+							   igb_hwmon,
+							   igb_hwmon->groups);
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto err;
+	}
+
+	goto exit;
+
+err:
+	igb_sysfs_del_adapter(adapter);
+exit:
+	return rc;
+}
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_82575.h	2022-03-21 12:58:29.397888883 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/Makefile	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+		      u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+		       u8 data);
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+				     (ID_LED_DEF1_DEF2 <<  8) | \
+				     (ID_LED_DEF1_DEF2 <<  4) | \
+				     (ID_LED_OFF1_ON2))
+
+#define E1000_RAR_ENTRIES_82575        16
+#define E1000_RAR_ENTRIES_82576        24
+#define E1000_RAR_ENTRIES_82580        24
+#define E1000_RAR_ENTRIES_I350         32
+
+#define E1000_SW_SYNCH_MB              0x00000100
+#define E1000_STAT_DEV_RST_SET         0x00100000
+#define E1000_CTRL_DEV_RST             0x20000000
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DROP_EN                            0x80000000
+#define E1000_SRRCTL_TIMESTAMP                          0x40000000
+
+
+#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
+#define E1000_MRQC_ENABLE_VMDQ              0x00000003
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
+
+#define E1000_EICR_TX_QUEUE ( \
+	E1000_EICR_TX_QUEUE0 |    \
+	E1000_EICR_TX_QUEUE1 |    \
+	E1000_EICR_TX_QUEUE2 |    \
+	E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+	E1000_EICR_RX_QUEUE0 |    \
+	E1000_EICR_RX_QUEUE1 |    \
+	E1000_EICR_RX_QUEUE2 |    \
+	E1000_EICR_RX_QUEUE3)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+	struct {
+		__le64 pkt_addr;             /* Packet buffer address */
+		__le64 hdr_addr;             /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				__le16 pkt_info;   /* RSS type, Packet type */
+				__le16 hdr_info;   /* Split Head, buf len */
+			} lo_dword;
+			union {
+				__le32 rss;          /* RSS Hash */
+				struct {
+					__le16 ip_id;    /* IP id */
+					__le16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error;     /* ext status/error */
+			__le16 length;           /* Packet length */
+			__le16 vlan;             /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP           0x08000 /* timestamp in packet */
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+	struct {
+		__le64 buffer_addr;    /* Address of descriptor's data buf */
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd;       /* Reserved */
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+/* Adv ctxt IPSec ESP len mask */
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
+#define E1000_DCA_CTRL_DCA_MODE_CB2     0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
+/* Additional DCA related definitions, note change in position of CPUID */
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
+#define E1000_ETQF_1588            (1 << 30)
+
+/* FTQF register bit definitions */
+#define E1000_FTQF_VF_BP               0x00008000
+#define E1000_FTQF_1588_TIME_STAMP     0x08000000
+#define E1000_FTQF_MASK                0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP       0x10000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575          0x0400
+#define MAX_NUM_VFS                   8
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK   0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE        0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE       0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE       0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE      0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE       0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM        0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME       0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
+
+#define E1000_DVMOLR_HIDEVLAN  0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN   0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC    0x80000000 /* CRC stripping enable */
+
+#define E1000_VLVF_ARRAY_SIZE     32
+#define E1000_VLVF_VLANID_MASK    0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT  12
+#define E1000_VLVF_POOLSEL_MASK   (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN          0x00100000
+#define E1000_VLVF_VLANID_ENABLE  0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT      0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER        0x80000000 /* Never insert VLAN tag */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT	(1 << 14)
+
+#define ALL_QUEUES   0xFFFF
+
+/* RX packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
+void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
+void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
+u16 igb_rxpbs_adjust_82580(u32 data);
+s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
+s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M);
+s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M);
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
+
+#define E1000_I2C_THERMAL_SENSOR_ADDR	0xF8
+#define E1000_EMC_INTERNAL_DATA		0x00
+#define E1000_EMC_INTERNAL_THERM_LIMIT	0x20
+#define E1000_EMC_DIODE1_DATA		0x01
+#define E1000_EMC_DIODE1_THERM_LIMIT	0x19
+#define E1000_EMC_DIODE2_DATA		0x23
+#define E1000_EMC_DIODE2_THERM_LIMIT	0x1A
+#define E1000_EMC_DIODE3_DATA		0x2A
+#define E1000_EMC_DIODE3_THERM_LIMIT	0x30
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/Makefile	2022-03-21 12:58:29.393888922 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_hw.h	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_IGB) += rt_igb.o
+
+rt_igb-y :=  					\
+	e1000_82575.o				\
+	e1000_i210.o				\
+	e1000_mac.o				\
+	e1000_mbx.o				\
+	e1000_nvm.o				\
+	e1000_phy.o				\
+	igb_hwmon.o				\
+	igb_main.o
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_hw.h	2022-03-21 12:58:29.388888971 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/igb_main.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <rtnet_port.h>
+
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576			0x10C9
+#define E1000_DEV_ID_82576_FIBER		0x10E6
+#define E1000_DEV_ID_82576_SERDES		0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER		0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2	0x1526
+#define E1000_DEV_ID_82576_NS			0x150A
+#define E1000_DEV_ID_82576_NS_SERDES		0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD		0x150D
+#define E1000_DEV_ID_82575EB_COPPER		0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES	0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER	0x10D6
+#define E1000_DEV_ID_82580_COPPER		0x150E
+#define E1000_DEV_ID_82580_FIBER		0x150F
+#define E1000_DEV_ID_82580_SERDES		0x1510
+#define E1000_DEV_ID_82580_SGMII		0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL		0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER		0x1527
+#define E1000_DEV_ID_DH89XXCC_SGMII		0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES		0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE		0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP		0x0440
+#define E1000_DEV_ID_I350_COPPER		0x1521
+#define E1000_DEV_ID_I350_FIBER			0x1522
+#define E1000_DEV_ID_I350_SERDES		0x1523
+#define E1000_DEV_ID_I350_SGMII			0x1524
+#define E1000_DEV_ID_I210_COPPER		0x1533
+#define E1000_DEV_ID_I210_FIBER			0x1536
+#define E1000_DEV_ID_I210_SERDES		0x1537
+#define E1000_DEV_ID_I210_SGMII			0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS	0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS	0x157C
+#define E1000_DEV_ID_I211_COPPER		0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS	0x1F40
+#define E1000_DEV_ID_I354_SGMII			0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS	0x1F45
+
+#define E1000_REVISION_2 2
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+#define E1000_FUNC_2     2
+#define E1000_FUNC_3     3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2   6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3   9
+
+enum e1000_mac_type {
+	e1000_undefined = 0,
+	e1000_82575,
+	e1000_82576,
+	e1000_82580,
+	e1000_i350,
+	e1000_i354,
+	e1000_i210,
+	e1000_i211,
+	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_flash_hw,
+	e1000_nvm_invm,
+	e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large,
+};
+
+enum e1000_phy_type {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+	e1000_phy_82580,
+	e1000_phy_i210,
+};
+
+enum e1000_bus_type {
+	e1000_bus_type_unknown = 0,
+	e1000_bus_type_pci,
+	e1000_bus_type_pcix,
+	e1000_bus_type_pci_express,
+	e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+	e1000_bus_speed_unknown = 0,
+	e1000_bus_speed_33,
+	e1000_bus_speed_66,
+	e1000_bus_speed_100,
+	e1000_bus_speed_120,
+	e1000_bus_speed_133,
+	e1000_bus_speed_2500,
+	e1000_bus_speed_5000,
+	e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_pcie_x8 = 8,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 tor;
+	u64 tot;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+	u64 cbtmpc;
+	u64 htdpmc;
+	u64 cbrdpc;
+	u64 cbrmpc;
+	u64 rpthc;
+	u64 hgptc;
+	u64 htcbdpc;
+	u64 hgorc;
+	u64 hgotc;
+	u64 lenerrs;
+	u64 scvpc;
+	u64 hrmpc;
+	u64 doosync;
+	u64 o2bgptc;
+	u64 o2bspc;
+	u64 b2ospc;
+	u64 b2ogprc;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+	s32 (*check_for_link)(struct e1000_hw *);
+	s32 (*reset_hw)(struct e1000_hw *);
+	s32 (*init_hw)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *);
+	s32 (*setup_physical_interface)(struct e1000_hw *);
+	void (*rar_set)(struct e1000_hw *, u8 *, u32);
+	s32 (*read_mac_addr)(struct e1000_hw *);
+	s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+	s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+	void (*release_swfw_sync)(struct e1000_hw *, u16);
+#ifdef CONFIG_IGB_HWMON
+	s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+#endif
+
+};
+
+struct e1000_phy_operations {
+	s32 (*acquire)(struct e1000_hw *);
+	s32 (*check_polarity)(struct e1000_hw *);
+	s32 (*check_reset_block)(struct e1000_hw *);
+	s32 (*force_speed_duplex)(struct e1000_hw *);
+	s32 (*get_cfg_done)(struct e1000_hw *hw);
+	s32 (*get_cable_length)(struct e1000_hw *);
+	s32 (*get_phy_info)(struct e1000_hw *);
+	s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32 (*reset)(struct e1000_hw *);
+	s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32 (*write_reg)(struct e1000_hw *, u32, u16);
+	s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+	s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
+};
+
+struct e1000_nvm_operations {
+	s32 (*acquire)(struct e1000_hw *);
+	s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+	s32 (*update)(struct e1000_hw *);
+	s32 (*validate)(struct e1000_hw *);
+	s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+};
+
+#define E1000_MAX_SENSORS		3
+
+struct e1000_thermal_diode_data {
+	u8 location;
+	u8 temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct e1000_thermal_sensor_data {
+	struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+};
+
+struct e1000_info {
+	s32 (*get_invariants)(struct e1000_hw *);
+	struct e1000_mac_operations *mac_ops;
+	struct e1000_phy_operations *phy_ops;
+	struct e1000_nvm_operations *nvm_ops;
+};
+
+extern const struct e1000_info e1000_82575_info;
+
+struct e1000_mac_info {
+	struct e1000_mac_operations ops;
+
+	u8 addr[6];
+	u8 perm_addr[6];
+
+	enum e1000_mac_type type;
+
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 mc_filter_type;
+	u32 txcw;
+
+	u16 mta_reg_count;
+	u16 uta_reg_count;
+
+	/* Maximum size of the MTA register table in all supported adapters */
+	#define MAX_MTA_REG 128
+	u32 mta_shadow[MAX_MTA_REG];
+	u16 rar_entry_count;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool arc_subsystem_valid;
+	bool asf_firmware_present;
+	bool autoneg;
+	bool autoneg_failed;
+	bool disable_hw_init_bits;
+	bool get_link_status;
+	bool ifs_params_forced;
+	bool in_ifs_mode;
+	bool report_tx_early;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+	struct e1000_thermal_sensor_data thermal_sensor_data;
+};
+
+struct e1000_phy_info {
+	struct e1000_phy_operations ops;
+
+	enum e1000_phy_type type;
+
+	enum e1000_1000t_rx_status local_rx;
+	enum e1000_1000t_rx_status remote_rx;
+	enum e1000_ms_type ms_type;
+	enum e1000_ms_type original_ms_type;
+	enum e1000_rev_polarity cable_polarity;
+	enum e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	enum e1000_media_type media_type;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool reset_disable;
+	bool speed_downgraded;
+	bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+	struct e1000_nvm_operations ops;
+	enum e1000_nvm_type type;
+	enum e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	enum e1000_bus_type type;
+	enum e1000_bus_speed speed;
+	enum e1000_bus_width width;
+
+	u32 snoop;
+
+	u16 func;
+	u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+	u32 high_water;     /* Flow control high-water mark */
+	u32 low_water;      /* Flow control low-water mark */
+	u16 pause_time;     /* Flow control pause timer */
+	bool send_xon;      /* Flow control send XON */
+	bool strict_ieee;   /* Strict IEEE mode */
+	enum e1000_fc_mode current_mode; /* Type of flow control */
+	enum e1000_fc_mode requested_mode;
+};
+
+struct e1000_mbx_operations {
+	s32 (*init_params)(struct e1000_hw *hw);
+	s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
+	s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+	s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
+	s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+	s32 (*check_for_msg)(struct e1000_hw *, u16);
+	s32 (*check_for_ack)(struct e1000_hw *, u16);
+	s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct e1000_mbx_info {
+	struct e1000_mbx_operations ops;
+	struct e1000_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+	bool sgmii_active;
+	bool global_device_reset;
+	bool eee_disable;
+	bool clear_semaphore_once;
+	struct e1000_sfp_flags eth_flags;
+	bool module_plugged;
+	u8 media_port;
+	bool media_changed;
+	bool mas_capable;
+};
+
+struct e1000_hw {
+	void *back;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+	unsigned long io_base;
+
+	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_mbx_info mbx;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	union {
+		struct e1000_dev_spec_82575	_82575;
+	} dev_spec;
+
+	u16 device_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_device_id;
+	u16 vendor_id;
+
+	u8  revision_id;
+};
+
+struct rtnet_device *igb_get_hw_dev(struct e1000_hw *hw);
+#define hw_dbg(format, arg...) \
+	rtdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
+/* These functions must be implemented by drivers */
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
+++ linux-patched/drivers/xenomai/net/drivers/igb/igb_main.c	2022-03-21 12:58:29.384889010 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_i210.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2015 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ * Copyright(c) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/if_ether.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
+#include <linux/i2c.h>
+#include "igb.h"
+
+#include <rtnet_port.h>
+
+// RTNET redefines
+#ifdef  NETIF_F_TSO
+#undef  NETIF_F_TSO
+#define NETIF_F_TSO 0
+#endif
+
+#ifdef  NETIF_F_TSO6
+#undef  NETIF_F_TSO6
+#define NETIF_F_TSO6 0
+#endif
+
+#ifdef  NETIF_F_HW_VLAN_TX
+#undef  NETIF_F_HW_VLAN_TX
+#define NETIF_F_HW_VLAN_TX 0
+#endif
+
+#ifdef  NETIF_F_HW_VLAN_RX
+#undef  NETIF_F_HW_VLAN_RX
+#define NETIF_F_HW_VLAN_RX 0
+#endif
+
+#ifdef  NETIF_F_HW_VLAN_FILTER
+#undef  NETIF_F_HW_VLAN_FILTER
+#define NETIF_F_HW_VLAN_FILTER 0
+#endif
+
+#ifdef  IGB_MAX_TX_QUEUES
+#undef  IGB_MAX_TX_QUEUES
+#define IGB_MAX_TX_QUEUES 1
+#endif
+
+#ifdef  IGB_MAX_RX_QUEUES
+#undef  IGB_MAX_RX_QUEUES
+#define IGB_MAX_RX_QUEUES 1
+#endif
+
+#ifdef CONFIG_IGB_NAPI
+#undef CONFIG_IGB_NAPI
+#endif
+
+#ifdef IGB_HAVE_TX_TIMEOUT
+#undef IGB_HAVE_TX_TIMEOUT
+#endif
+
+#ifdef ETHTOOL_GPERMADDR
+#undef ETHTOOL_GPERMADDR
+#endif
+
+#ifdef CONFIG_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifdef MAX_SKB_FRAGS
+#undef MAX_SKB_FRAGS
+#define MAX_SKB_FRAGS 1
+#endif
+
+#ifdef IGB_FRAMES_SUPPORT
+#undef IGB_FRAMES_SUPPORT
+#endif
+
+#define MAJ 5
+#define MIN 2
+#define BUILD 18
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+__stringify(BUILD) "-k"
+char igb_driver_name[] = "rt_igb";
+char igb_driver_version[] = DRV_VERSION;
+static const char igb_driver_string[] =
+				"Intel(R) Gigabit Ethernet Network Driver";
+static const char igb_copyright[] =
+				"Copyright (c) 2007-2014 Intel Corporation.";
+
+static const struct e1000_info *igb_info_tbl[] = {
+	[board_82575] = &e1000_82575_info,
+};
+
+#define MAX_UNITS 8
+static int InterruptThrottle = 0;
+module_param(InterruptThrottle, uint, 0);
+MODULE_PARM_DESC(InterruptThrottle, "Throttle interrupts (boolean, false by default)");
+
+static const struct pci_device_id igb_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
+	/* required last entry */
+	{0, }
+};
+
+MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
+
+static int igb_setup_all_tx_resources(struct igb_adapter *);
+static int igb_setup_all_rx_resources(struct igb_adapter *);
+static void igb_free_all_tx_resources(struct igb_adapter *);
+static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
+static int igb_probe(struct pci_dev *, const struct pci_device_id *);
+static void igb_remove(struct pci_dev *pdev);
+static int igb_sw_init(struct igb_adapter *);
+static int igb_open(struct rtnet_device *);
+static int igb_close(struct rtnet_device *);
+static void igb_configure(struct igb_adapter *);
+static void igb_configure_tx(struct igb_adapter *);
+static void igb_configure_rx(struct igb_adapter *);
+static void igb_clean_all_tx_rings(struct igb_adapter *);
+static void igb_clean_all_rx_rings(struct igb_adapter *);
+static void igb_clean_tx_ring(struct igb_ring *);
+static void igb_clean_rx_ring(struct igb_ring *);
+static void igb_set_rx_mode(struct rtnet_device *);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void igb_update_phy_info(struct timer_list *);
+static void igb_watchdog(struct timer_list *);
+#else
+static void igb_update_phy_info(unsigned long);
+static void igb_watchdog(unsigned long);
+#endif
+static void igb_watchdog_task(struct work_struct *);
+static netdev_tx_t igb_xmit_frame(struct rtskb *skb, struct rtnet_device *);
+static struct net_device_stats *igb_get_stats(struct rtnet_device *);
+static int igb_intr(rtdm_irq_t *irq_handle);
+static int igb_intr_msi(rtdm_irq_t *irq_handle);
+static void igb_nrtsig_watchdog(rtdm_nrtsig_t *sig, void *data);
+static irqreturn_t igb_msix_other(int irq, void *);
+static int igb_msix_ring(rtdm_irq_t *irq_handle);
+static void igb_poll(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_ioctl(struct rtnet_device *, struct ifreq *ifr, int cmd);
+static void igb_reset_task(struct work_struct *);
+static void igb_vlan_mode(struct rtnet_device *netdev,
+			  netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct rtnet_device *, __be16, u16);
+static void igb_restore_vlan(struct igb_adapter *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int igb_suspend(struct device *);
+#endif
+static int igb_resume(struct device *);
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+static const struct dev_pm_ops igb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+			igb_runtime_idle)
+};
+#endif
+static void igb_shutdown(struct pci_dev *);
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void igb_netpoll(struct rtnet_device *);
+#endif
+
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
+		     pci_channel_state_t);
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
+static void igb_io_resume(struct pci_dev *);
+
+static const struct pci_error_handlers igb_err_handler = {
+	.error_detected = igb_io_error_detected,
+	.slot_reset = igb_io_slot_reset,
+	.resume = igb_io_resume,
+};
+
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
+
+static struct pci_driver igb_driver = {
+	.name     = igb_driver_name,
+	.id_table = igb_pci_tbl,
+	.probe    = igb_probe,
+	.remove   = igb_remove,
+#ifdef CONFIG_PM
+	.driver.pm = &igb_pm_ops,
+#endif
+	.shutdown = igb_shutdown,
+	.sriov_configure = igb_pci_sriov_configure,
+	.err_handler = &igb_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int local_debug = -1;
+module_param_named(debug, local_debug, int, 0);
+MODULE_PARM_DESC(debug, "debug level (0=none,...,16=all)");
+
+struct igb_reg_info {
+	u32 ofs;
+	char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+	/* General Registers */
+	{E1000_CTRL, "CTRL"},
+	{E1000_STATUS, "STATUS"},
+	{E1000_CTRL_EXT, "CTRL_EXT"},
+
+	/* Interrupt Registers */
+	{E1000_ICR, "ICR"},
+
+	/* RX Registers */
+	{E1000_RCTL, "RCTL"},
+	{E1000_RDLEN(0), "RDLEN"},
+	{E1000_RDH(0), "RDH"},
+	{E1000_RDT(0), "RDT"},
+	{E1000_RXDCTL(0), "RXDCTL"},
+	{E1000_RDBAL(0), "RDBAL"},
+	{E1000_RDBAH(0), "RDBAH"},
+
+	/* TX Registers */
+	{E1000_TCTL, "TCTL"},
+	{E1000_TDBAL(0), "TDBAL"},
+	{E1000_TDBAH(0), "TDBAH"},
+	{E1000_TDLEN(0), "TDLEN"},
+	{E1000_TDH(0), "TDH"},
+	{E1000_TDT(0), "TDT"},
+	{E1000_TXDCTL(0), "TXDCTL"},
+	{E1000_TDFH, "TDFH"},
+	{E1000_TDFT, "TDFT"},
+	{E1000_TDFHS, "TDFHS"},
+	{E1000_TDFPC, "TDFPC"},
+
+	/* List Terminator */
+	{}
+};
+
+/* igb_regdump - register printout routine */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+	int n = 0;
+	char rname[16];
+	u32 regs[8];
+
+	switch (reginfo->ofs) {
+	case E1000_RDLEN(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDLEN(n));
+		break;
+	case E1000_RDH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDH(n));
+		break;
+	case E1000_RDT(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDT(n));
+		break;
+	case E1000_RXDCTL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RXDCTL(n));
+		break;
+	case E1000_RDBAL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDBAL(n));
+		break;
+	case E1000_RDBAH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDBAH(n));
+		break;
+	case E1000_TDBAL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDBAL(n));
+		break;
+	case E1000_TDBAH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDBAH(n));
+		break;
+	case E1000_TDLEN(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDLEN(n));
+		break;
+	case E1000_TDH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDH(n));
+		break;
+	case E1000_TDT(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDT(n));
+		break;
+	case E1000_TXDCTL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TXDCTL(n));
+		break;
+	default:
+		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+		return;
+	}
+
+	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+		regs[2], regs[3]);
+}
+
+/* igb_dump - Print registers, Tx-rings and Rx-rings */
+static void igb_dump(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct igb_reg_info *reginfo;
+	struct igb_ring *tx_ring;
+	union e1000_adv_tx_desc *tx_desc;
+	struct my_u0 { u64 a; u64 b; } *u0;
+	struct igb_ring *rx_ring;
+	union e1000_adv_rx_desc *rx_desc;
+	u32 staterr;
+	u16 i, n;
+
+	/* Print netdevice Info */
+	if (netdev) {
+		dev_info(&adapter->pdev->dev, "Net device Info\n");
+		pr_info("Device Name\n");
+		pr_info("%s\n", netdev->name);
+	}
+
+	/* Print Registers */
+	dev_info(&adapter->pdev->dev, "Register Dump\n");
+	pr_info(" Register Name   Value\n");
+	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+	     reginfo->name; reginfo++) {
+		igb_regdump(hw, reginfo);
+	}
+
+	/* Print TX Ring Summary */
+	if (!netdev || !rtnetif_running(netdev))
+		goto exit;
+
+	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
+	for (n = 0; n < adapter->num_tx_queues; n++) {
+		struct igb_tx_buffer *buffer_info;
+		tx_ring = adapter->tx_ring[n];
+		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+		pr_info(" %5d %5X %5X %p %016llX\n",
+			n, tx_ring->next_to_use, tx_ring->next_to_clean,
+			buffer_info->next_to_watch,
+			(u64)buffer_info->time_stamp);
+	}
+
+	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+	/* Transmit Descriptor Formats
+	 *
+	 * Advanced Transmit Descriptor
+	 *   +--------------------------------------------------------------+
+	 * 0 |         Buffer Address [63:0]                                |
+	 *   +--------------------------------------------------------------+
+	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
+	 *   +--------------------------------------------------------------+
+	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
+	 */
+
+	for (n = 0; n < adapter->num_tx_queues; n++) {
+		tx_ring = adapter->tx_ring[n];
+		pr_info("------------------------------------\n");
+		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+		pr_info("------------------------------------\n");
+		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
+			"[bi->dma       ] leng  ntw timestamp        "
+			"bi->skb\n");
+
+		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+			const char *next_desc;
+			struct igb_tx_buffer *buffer_info;
+			tx_desc = IGB_TX_DESC(tx_ring, i);
+			buffer_info = &tx_ring->tx_buffer_info[i];
+			u0 = (struct my_u0 *)tx_desc;
+			if (i == tx_ring->next_to_use &&
+			    i == tx_ring->next_to_clean)
+				next_desc = " NTC/U";
+			else if (i == tx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == tx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
+			pr_info("T [0x%03X]    %016llX %016llX"
+				"  %p %016llX %p%s\n", i,
+				le64_to_cpu(u0->a),
+				le64_to_cpu(u0->b),
+				buffer_info->next_to_watch,
+				(u64)buffer_info->time_stamp,
+				buffer_info->skb, next_desc);
+
+			if (buffer_info->skb)
+				print_hex_dump(KERN_INFO, "",
+					DUMP_PREFIX_ADDRESS,
+					16, 1, buffer_info->skb->data,
+					14,
+					true);
+		}
+	}
+
+	/* Print RX Rings Summary */
+	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+	pr_info("Queue [NTU] [NTC]\n");
+	for (n = 0; n < adapter->num_rx_queues; n++) {
+		rx_ring = adapter->rx_ring[n];
+		pr_info(" %5d %5X %5X\n",
+			n, rx_ring->next_to_use, rx_ring->next_to_clean);
+	}
+
+	/* Print RX Rings */
+	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+	/* Advanced Receive Descriptor (Read) Format
+	 *    63                                           1        0
+	 *    +-----------------------------------------------------+
+	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
+	 *    +----------------------------------------------+------+
+	 *  8 |       Header Buffer Address [63:1]           |  DD  |
+	 *    +-----------------------------------------------------+
+	 *
+	 *
+	 * Advanced Receive Descriptor (Write-Back) Format
+	 *
+	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
+	 *   +------------------------------------------------------+
+	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
+	 *   | Checksum   Ident  |   |           |    | Type | Type |
+	 *   +------------------------------------------------------+
+	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+	 *   +------------------------------------------------------+
+	 *   63       48 47    32 31            20 19               0
+	 */
+
+	for (n = 0; n < adapter->num_rx_queues; n++) {
+		rx_ring = adapter->rx_ring[n];
+		pr_info("------------------------------------\n");
+		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+		pr_info("------------------------------------\n");
+		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
+			"[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+			"----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+
+		for (i = 0; i < rx_ring->count; i++) {
+			const char *next_desc;
+			struct igb_rx_buffer *buffer_info;
+			buffer_info = &rx_ring->rx_buffer_info[i];
+			rx_desc = IGB_RX_DESC(rx_ring, i);
+			u0 = (struct my_u0 *)rx_desc;
+			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+			if (i == rx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == rx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
+			if (staterr & E1000_RXD_STAT_DD) {
+				/* Descriptor Done */
+				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
+					"RWB", i,
+					le64_to_cpu(u0->a),
+					le64_to_cpu(u0->b),
+					next_desc);
+			} else {
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
+					"R  ", i,
+					le64_to_cpu(u0->a),
+					le64_to_cpu(u0->b),
+					(u64)buffer_info->dma,
+					next_desc);
+
+			}
+		}
+	}
+
+exit:
+	return;
+}
+
+/**
+ *  igb_get_hw_dev - return device
+ *  @hw: pointer to hardware structure
+ *
+ *  used by hardware layer to print debugging information
+ **/
+struct rtnet_device *igb_get_hw_dev(struct e1000_hw *hw)
+{
+	struct igb_adapter *adapter = hw->back;
+	return adapter->netdev;
+}
+
+/**
+ *  igb_init_module - Driver Registration Routine
+ *
+ *  igb_init_module is the first routine called when the driver is
+ *  loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igb_init_module(void)
+{
+	int ret;
+
+	pr_info("%s - version %s\n",
+	       igb_driver_string, igb_driver_version);
+	pr_info("%s\n", igb_copyright);
+
+	ret = pci_register_driver(&igb_driver);
+	return ret;
+}
+
+module_init(igb_init_module);
+
+/**
+ *  igb_exit_module - Driver Exit Cleanup Routine
+ *
+ *  igb_exit_module is called just before the driver is removed
+ *  from memory.
+ **/
+static void __exit igb_exit_module(void)
+{
+	pci_unregister_driver(&igb_driver);
+}
+
+module_exit(igb_exit_module);
+
+#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+/**
+ *  igb_cache_ring_register - Descriptor ring to register mapping
+ *  @adapter: board private structure to initialize
+ *
+ *  Once we know the feature-set enabled for the device, we'll cache
+ *  the register offset the descriptor ring is assigned to.
+ **/
+static void igb_cache_ring_register(struct igb_adapter *adapter)
+{
+	int i = 0, j = 0;
+	u32 rbase_offset = 0;
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82576:
+		/* The queues are allocated for virtualization such that VF 0
+		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+		 * In order to avoid collision we start at the first free queue
+		 * and continue consuming queues in the same sequence
+		 */
+		fallthrough;
+	case e1000_82575:
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		fallthrough;
+	default:
+		for (; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+		for (; j < adapter->num_tx_queues; j++)
+			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+		break;
+	}
+}
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg)
+{
+	struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
+	u32 value = 0;
+
+	if (E1000_REMOVED(hw_addr))
+		return ~value;
+
+	value = readl(&hw_addr[reg]);
+
+	/* reads should not return all F's */
+	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+		struct rtnet_device *netdev = igb->netdev;
+		hw->hw_addr = NULL;
+		rtnetif_device_detach(netdev);
+		rtdev_err(netdev, "PCIe link lost, device now detached\n");
+	}
+
+	return value;
+}
+
+/**
+ *  igb_write_ivar - configure ivar for given MSI-X vector
+ *  @hw: pointer to the HW structure
+ *  @msix_vector: vector number we are allocating to a given ring
+ *  @index: row index of IVAR register to write within IVAR table
+ *  @offset: column offset of in IVAR, should be multiple of 8
+ *
+ *  This function is intended to handle the writing of the IVAR register
+ *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
+ *  each containing an cause allocation for an Rx and Tx ring, and a
+ *  variable number of rows depending on the number of queues supported.
+ **/
+static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
+			   int index, int offset)
+{
+	u32 ivar = array_rd32(E1000_IVAR0, index);
+
+	/* clear any bits that are currently set */
+	ivar &= ~((u32)0xFF << offset);
+
+	/* write vector and valid bit */
+	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+	array_wr32(E1000_IVAR0, index, ivar);
+}
+
+#define IGB_N0_QUEUE -1
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+	int rx_queue = IGB_N0_QUEUE;
+	int tx_queue = IGB_N0_QUEUE;
+	u32 msixbm = 0;
+
+	if (q_vector->rx.ring)
+		rx_queue = q_vector->rx.ring->reg_idx;
+	if (q_vector->tx.ring)
+		tx_queue = q_vector->tx.ring->reg_idx;
+
+	switch (hw->mac.type) {
+	case e1000_82575:
+		/* The 82575 assigns vectors using a bitmask, which matches the
+		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one
+		 * or more queues to a vector, we write the appropriate bits
+		 * into the MSIXBM register for that vector.
+		 */
+		if (rx_queue > IGB_N0_QUEUE)
+			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
+		if (tx_queue > IGB_N0_QUEUE)
+			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
+			msixbm |= E1000_EIMS_OTHER;
+		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+		q_vector->eims_value = msixbm;
+		break;
+	case e1000_82576:
+		/* 82576 uses a table that essentially consists of 2 columns
+		 * with 8 rows.  The ordering is column-major so we use the
+		 * lower 3 bits as the row index, and the 4th bit as the
+		 * column offset.
+		 */
+		if (rx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       rx_queue & 0x7,
+				       (rx_queue & 0x8) << 1);
+		if (tx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       tx_queue & 0x7,
+				       ((tx_queue & 0x8) << 1) + 8);
+		q_vector->eims_value = 1 << msix_vector;
+		break;
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		/* On 82580 and newer adapters the scheme is similar to 82576
+		 * however instead of ordering column-major we have things
+		 * ordered row-major.  So we traverse the table by using
+		 * bit 0 as the column offset, and the remaining bits as the
+		 * row index.
+		 */
+		if (rx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       rx_queue >> 1,
+				       (rx_queue & 0x1) << 4);
+		if (tx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       tx_queue >> 1,
+				       ((tx_queue & 0x1) << 4) + 8);
+		q_vector->eims_value = 1 << msix_vector;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	/* add q_vector eims value to global eims_enable_mask */
+	adapter->eims_enable_mask |= q_vector->eims_value;
+
+	/* configure q_vector to set itr on first interrupt */
+	q_vector->set_itr = 1;
+}
+
+/**
+ *  igb_configure_msix - Configure MSI-X hardware
+ *  @adapter: board private structure to initialize
+ *
+ *  igb_configure_msix sets up the hardware to properly
+ *  generate MSI-X interrupts.
+ **/
+static void igb_configure_msix(struct igb_adapter *adapter)
+{
+	u32 tmp;
+	int i, vector = 0;
+	struct e1000_hw *hw = &adapter->hw;
+
+	adapter->eims_enable_mask = 0;
+
+	/* set vector for other causes, i.e. link changes */
+	switch (hw->mac.type) {
+	case e1000_82575:
+		tmp = rd32(E1000_CTRL_EXT);
+		/* enable MSI-X PBA support*/
+		tmp |= E1000_CTRL_EXT_PBA_CLR;
+
+		/* Auto-Mask interrupts upon ICR read. */
+		tmp |= E1000_CTRL_EXT_EIAME;
+		tmp |= E1000_CTRL_EXT_IRCA;
+
+		wr32(E1000_CTRL_EXT, tmp);
+
+		/* enable msix_other interrupt */
+		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
+		adapter->eims_other = E1000_EIMS_OTHER;
+
+		break;
+
+	case e1000_82576:
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		/* Turn on MSI-X capability first, or our settings
+		 * won't stick.  And it will take days to debug.
+		 */
+		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+		     E1000_GPIE_PBA | E1000_GPIE_EIAME |
+		     E1000_GPIE_NSICR);
+
+		/* enable msix_other interrupt */
+		adapter->eims_other = 1 << vector;
+		tmp = (vector++ | E1000_IVAR_VALID) << 8;
+
+		wr32(E1000_IVAR_MISC, tmp);
+		break;
+	default:
+		/* do nothing, since nothing else supports MSI-X */
+		break;
+	} /* switch (hw->mac.type) */
+
+	adapter->eims_enable_mask |= adapter->eims_other;
+
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		igb_assign_vector(adapter->q_vector[i], vector++);
+
+	wrfl();
+}
+
+/**
+ *  igb_request_msix - Initialize MSI-X interrupts
+ *  @adapter: board private structure to initialize
+ *
+ *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ *  kernel.
+ **/
+static int igb_request_msix(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	int i, err = 0, vector = 0, free_vector = 0;
+
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  igb_msix_other, 0, netdev->name, adapter);
+	if (err)
+		goto err_out;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+		vector++;
+
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+		if (q_vector->rx.ring && q_vector->tx.ring)
+			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+				q_vector->rx.ring->queue_index);
+		else if (q_vector->tx.ring)
+			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+				q_vector->tx.ring->queue_index);
+		else if (q_vector->rx.ring)
+			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+				q_vector->rx.ring->queue_index);
+		else
+			sprintf(q_vector->name, "%s-unused", netdev->name);
+
+		err = rtdm_irq_request(&adapter->msix_irq_handle[vector],
+				adapter->msix_entries[vector].vector,
+				igb_msix_ring, 0, q_vector->name, q_vector);
+		if (err)
+			goto err_free;
+	}
+
+	igb_configure_msix(adapter);
+	return 0;
+
+err_free:
+	/* free already assigned IRQs */
+	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+	vector--;
+	for (i = 0; i < vector; i++)
+		rtdm_irq_free(&adapter->msix_irq_handle[free_vector++]);
+err_out:
+	return err;
+}
+
+/**
+ *  igb_free_q_vector - Free memory allocated for specific interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_idx: Index of vector to be freed
+ *
+ *  This function frees the memory allocated to the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+	adapter->q_vector[v_idx] = NULL;
+
+	/* igb_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	if (q_vector)
+		kfree_rcu(q_vector, rcu);
+}
+
+/**
+ *  igb_reset_q_vector - Reset config for interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_idx: Index of vector to be reset
+ *
+ *  If NAPI is enabled it will delete any references to the
+ *  NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+	/* Coming from igb_set_interrupt_capability, the vectors are not yet
+	 * allocated. So, q_vector is NULL so we should stop here.
+	 */
+	if (!q_vector)
+		return;
+
+	if (q_vector->tx.ring)
+		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+	if (q_vector->rx.ring)
+		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
+		pci_disable_msix(adapter->pdev);
+	else if (adapter->flags & IGB_FLAG_HAS_MSI)
+		pci_disable_msi(adapter->pdev);
+
+	while (v_idx--)
+		igb_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ *  igb_free_q_vectors - Free memory allocated for interrupt vectors
+ *  @adapter: board private structure to initialize
+ *
+ *  This function frees the memory allocated to the q_vectors.  In addition if
+ *  NAPI is enabled it will delete any references to the NAPI struct prior
+ *  to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--) {
+		igb_reset_q_vector(adapter, v_idx);
+		igb_free_q_vector(adapter, v_idx);
+	}
+}
+
+/**
+ *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *  @adapter: board private structure to initialize
+ *
+ *  This function resets the device so that it has 0 Rx queues, Tx queues, and
+ *  MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+	igb_free_q_vectors(adapter);
+	igb_reset_interrupt_capability(adapter);
+}
+
+/**
+ *  igb_set_interrupt_capability - set MSI or MSI-X if supported
+ *  @adapter: board private structure to initialize
+ *  @msix: boolean value of MSIX capability
+ *
+ *  Attempt to configure interrupts using the best available
+ *  capabilities of the hardware and kernel.
+ **/
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
+{
+	int err;
+	int numvecs, i;
+
+	if (!msix)
+		goto msi_only;
+	adapter->flags |= IGB_FLAG_HAS_MSIX;
+
+	/* Number of supported queues. */
+	adapter->num_rx_queues = adapter->rss_queues;
+	adapter->num_tx_queues = adapter->rss_queues;
+
+	/* start with one vector for every Rx queue */
+	numvecs = adapter->num_rx_queues;
+
+	/* if Tx handler is separate add 1 for every Tx queue */
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+		numvecs += adapter->num_tx_queues;
+
+	/* store the number of vectors reserved for queues */
+	adapter->num_q_vectors = numvecs;
+
+	/* add 1 vector for link status interrupts */
+	numvecs++;
+	for (i = 0; i < numvecs; i++)
+		adapter->msix_entries[i].entry = i;
+
+	err = pci_enable_msix_range(adapter->pdev,
+				    adapter->msix_entries,
+				    numvecs,
+				    numvecs);
+	if (err > 0)
+		return;
+
+	igb_reset_interrupt_capability(adapter);
+
+	/* If we can't do MSI-X, try MSI */
+msi_only:
+	adapter->flags &= ~IGB_FLAG_HAS_MSIX;
+	adapter->rss_queues = 1;
+	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+	adapter->num_q_vectors = 1;
+	if (!pci_enable_msi(adapter->pdev))
+		adapter->flags |= IGB_FLAG_HAS_MSI;
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+			 struct igb_ring_container *head)
+{
+	head->ring = ring;
+	head->count++;
+}
+
+/**
+ *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_count: q_vectors allocated on adapter, used for ring interleaving
+ *  @v_idx: index of vector in adapter struct
+ *  @txr_count: total number of Tx rings to allocate
+ *  @txr_idx: index of first Tx ring to allocate
+ *  @rxr_count: total number of Rx rings to allocate
+ *  @rxr_idx: index of first Rx ring to allocate
+ *
+ *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+			      int v_count, int v_idx,
+			      int txr_count, int txr_idx,
+			      int rxr_count, int rxr_idx)
+{
+	struct igb_q_vector *q_vector;
+	struct igb_ring *ring;
+	int ring_count, size;
+
+	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
+	if (txr_count > 1 || rxr_count > 1)
+		return -ENOMEM;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct igb_q_vector) +
+	       (sizeof(struct igb_ring) * ring_count);
+
+	/* allocate q_vector and rings */
+	q_vector = adapter->q_vector[v_idx];
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	else
+		memset(q_vector, 0, size);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* tie q_vector and adapter together */
+	adapter->q_vector[v_idx] = q_vector;
+	q_vector->adapter = adapter;
+
+	/* initialize work limits */
+	q_vector->tx.work_limit = adapter->tx_work_limit;
+
+	/* initialize ITR configuration */
+	q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+	q_vector->itr_val = IGB_START_ITR;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	/* intialize ITR */
+	if (rxr_count) {
+		/* rx or rx/tx vector */
+		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+			q_vector->itr_val = adapter->rx_itr_setting;
+	} else {
+		/* tx only vector */
+		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+			q_vector->itr_val = adapter->tx_itr_setting;
+	}
+
+	if (txr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		igb_add_ring(ring, &q_vector->tx);
+
+		/* For 82575, context index must be unique per ring. */
+		if (adapter->hw.mac.type == e1000_82575)
+			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_count;
+		ring->queue_index = txr_idx;
+
+		/* assign ring to adapter */
+		adapter->tx_ring[txr_idx] = ring;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	if (rxr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		igb_add_ring(ring, &q_vector->rx);
+
+		/* set flag indicating ring supports SCTP checksum offload */
+		if (adapter->hw.mac.type >= e1000_82576)
+			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+		/* On i350, i354, i210, and i211, loopback VLAN packets
+		 * have the tag byte-swapped.
+		 */
+		if (adapter->hw.mac.type >= e1000_i350)
+			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_count;
+		ring->queue_index = rxr_idx;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[rxr_idx] = ring;
+	}
+
+	return 0;
+}
+
+
+/**
+ *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ *  @adapter: board private structure to initialize
+ *
+ *  We allocate one q_vector per queue interrupt.  If allocation fails we
+ *  return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+	int q_vectors = adapter->num_q_vectors;
+	int rxr_remaining = adapter->num_rx_queues;
+	int txr_remaining = adapter->num_tx_queues;
+	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+	int err;
+
+	if (q_vectors >= (rxr_remaining + txr_remaining)) {
+		for (; rxr_remaining; v_idx++) {
+			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+						 0, 0, 1, rxr_idx);
+
+			if (err)
+				goto err_out;
+
+			/* update counts and index */
+			rxr_remaining--;
+			rxr_idx++;
+		}
+	}
+
+	for (; v_idx < q_vectors; v_idx++) {
+		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+					 tqpv, txr_idx, rqpv, rxr_idx);
+
+		if (err)
+			goto err_out;
+
+		/* update counts and index */
+		rxr_remaining -= rqpv;
+		txr_remaining -= tqpv;
+		rxr_idx++;
+		txr_idx++;
+	}
+
+	return 0;
+
+err_out:
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		igb_free_q_vector(adapter, v_idx);
+
+	return -ENOMEM;
+}
+
+/**
+ *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *  @adapter: board private structure to initialize
+ *  @msix: boolean value of MSIX capability
+ *
+ *  This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	igb_set_interrupt_capability(adapter, msix);
+
+	err = igb_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	igb_cache_ring_register(adapter);
+
+	return 0;
+
+err_alloc_q_vectors:
+	igb_reset_interrupt_capability(adapter);
+	return err;
+}
+
+/**
+ *  igb_request_irq - initialize interrupts
+ *  @adapter: board private structure to initialize
+ *
+ *  Attempts to configure interrupts using the best available
+ *  capabilities of the hardware and kernel.
+ **/
+static int igb_request_irq(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	int err = 0;
+
+	rt_stack_connect(netdev, &STACK_manager);
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		err = igb_request_msix(adapter);
+		if (!err)
+			goto request_done;
+		/* fall back to MSI */
+		igb_free_all_tx_resources(adapter);
+		igb_free_all_rx_resources(adapter);
+
+		igb_clear_interrupt_scheme(adapter);
+		err = igb_init_interrupt_scheme(adapter, false);
+		if (err)
+			goto request_done;
+
+		igb_setup_all_tx_resources(adapter);
+		igb_setup_all_rx_resources(adapter);
+		igb_configure(adapter);
+	}
+
+	igb_assign_vector(adapter->q_vector[0], 0);
+
+	if (adapter->flags & IGB_FLAG_HAS_MSI) {
+		err = rtdm_irq_request(&adapter->irq_handle,
+				pdev->irq, igb_intr_msi, 0,
+				netdev->name, adapter);
+		if (!err)
+			goto request_done;
+
+		/* fall back to legacy interrupts */
+		igb_reset_interrupt_capability(adapter);
+		adapter->flags &= ~IGB_FLAG_HAS_MSI;
+	}
+
+	err = rtdm_irq_request(&adapter->irq_handle,
+			pdev->irq, igb_intr, IRQF_SHARED,
+			netdev->name, adapter);
+
+	if (err)
+		dev_err(&pdev->dev, "Error %d getting interrupt\n",
+			err);
+
+request_done:
+	return err;
+}
+
+static void igb_free_irq(struct igb_adapter *adapter)
+{
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		int vector = 0, i;
+
+		free_irq(adapter->msix_entries[vector++].vector, adapter);
+
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			rtdm_irq_free(&adapter->msix_irq_handle[vector++]);
+	} else {
+		rtdm_irq_free(&adapter->irq_handle);
+	}
+}
+
+/**
+ *  igb_irq_disable - Mask off interrupt generation on the NIC
+ *  @adapter: board private structure
+ **/
+static void igb_irq_disable(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* we need to be careful when disabling interrupts.  The VFs are also
+	 * mapped into these registers and so clearing the bits can cause
+	 * issues on the VF drivers so we only need to clear what we set
+	 */
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		u32 regval = rd32(E1000_EIAM);
+
+		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
+		wr32(E1000_EIMC, adapter->eims_enable_mask);
+		regval = rd32(E1000_EIAC);
+		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
+	}
+
+	wr32(E1000_IAM, 0);
+	wr32(E1000_IMC, ~0);
+	wrfl();
+
+	msleep(10);
+}
+
+/**
+ *  igb_irq_enable - Enable default interrupt generation settings
+ *  @adapter: board private structure
+ **/
+static void igb_irq_enable(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
+		u32 regval = rd32(E1000_EIAC);
+
+		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
+		regval = rd32(E1000_EIAM);
+		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
+		wr32(E1000_EIMS, adapter->eims_enable_mask);
+		wr32(E1000_IMS, ims);
+	} else {
+		wr32(E1000_IMS, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
+		wr32(E1000_IAM, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
+	}
+}
+
+static void igb_update_mng_vlan(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+
+	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		/* add VID to filter table */
+		igb_vfta_set(hw, vid, true);
+		adapter->mng_vlan_id = vid;
+	} else {
+		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+	}
+
+	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+	    (vid != old_vid) &&
+	    !test_bit(old_vid, adapter->active_vlans)) {
+		/* remove VID from filter table */
+		igb_vfta_set(hw, old_vid, false);
+	}
+}
+
+/**
+ *  igb_release_hw_control - release control of the h/w to f/w
+ *  @adapter: address of board private structure
+ *
+ *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ *  For ASF and Pass Through versions of f/w this means that the
+ *  driver is no longer loaded.
+ **/
+static void igb_release_hw_control(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+
+	/* Let firmware take over control of h/w */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT,
+			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ *  igb_get_hw_control - get control of the h/w from f/w
+ *  @adapter: address of board private structure
+ *
+ *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ *  For ASF and Pass Through versions of f/w this means that
+ *  the driver is loaded.
+ **/
+static void igb_get_hw_control(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+
+	/* Let firmware know the driver has taken over */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT,
+			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ *  igb_configure - configure the hardware for RX and TX
+ *  @adapter: private board structure
+ **/
+static void igb_configure(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int i;
+
+	igb_get_hw_control(adapter);
+	igb_set_rx_mode(netdev);
+
+	igb_restore_vlan(adapter);
+
+	igb_setup_tctl(adapter);
+	igb_setup_mrqc(adapter);
+	igb_setup_rctl(adapter);
+
+	igb_configure_tx(adapter);
+	igb_configure_rx(adapter);
+
+	igb_rx_fifo_flush_82575(&adapter->hw);
+
+	/* call igb_desc_unused which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = adapter->rx_ring[i];
+		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+	}
+}
+
+/**
+ *  igb_power_up_link - Power up the phy/serdes link
+ *  @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+	igb_reset_phy(&adapter->hw);
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper)
+		igb_power_up_phy_copper(&adapter->hw);
+	else
+		igb_power_up_serdes_link_82575(&adapter->hw);
+
+	igb_setup_link(&adapter->hw);
+}
+
+/**
+ *  igb_power_down_link - Power down the phy/serdes link
+ *  @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+	if (adapter->hw.phy.media_type == e1000_media_type_copper)
+		igb_power_down_phy_copper_82575(&adapter->hw);
+	else
+		igb_shutdown_serdes_link_82575(&adapter->hw);
+}
+
+/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext, connsw;
+	bool swap_now = false;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	connsw = rd32(E1000_CONNSW);
+
+	/* need to live swap if current media is copper and we have fiber/serdes
+	 * to go to.
+	 */
+
+	if ((hw->phy.media_type == e1000_media_type_copper) &&
+	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+		swap_now = true;
+	} else if (!(connsw & E1000_CONNSW_SERDESD)) {
+		/* copper signal takes time to appear */
+		if (adapter->copper_tries < 4) {
+			adapter->copper_tries++;
+			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+			wr32(E1000_CONNSW, connsw);
+			return;
+		} else {
+			adapter->copper_tries = 0;
+			if ((connsw & E1000_CONNSW_PHYSD) &&
+			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
+				swap_now = true;
+				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+				wr32(E1000_CONNSW, connsw);
+			}
+		}
+	}
+
+	if (!swap_now)
+		return;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		rtdev_info(adapter->netdev,
+			"MAS: changing media to fiber/serdes\n");
+		ctrl_ext |=
+			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+		adapter->flags |= IGB_FLAG_MEDIA_RESET;
+		adapter->copper_tries = 0;
+		break;
+	case e1000_media_type_internal_serdes:
+	case e1000_media_type_fiber:
+		rtdev_info(adapter->netdev,
+			"MAS: changing media to copper\n");
+		ctrl_ext &=
+			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+		adapter->flags |= IGB_FLAG_MEDIA_RESET;
+		break;
+	default:
+		/* shouldn't get here during regular operation */
+		rtdev_err(adapter->netdev,
+			"AMS: Invalid media type found, returning\n");
+		break;
+	}
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
+ *  igb_up - Open the interface and prepare it to handle traffic
+ *  @adapter: board private structure
+ **/
+int igb_up(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* hardware has been reset, we need to reload some things */
+	igb_configure(adapter);
+
+	clear_bit(__IGB_DOWN, &adapter->state);
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
+		igb_configure_msix(adapter);
+	else
+		igb_assign_vector(adapter->q_vector[0], 0);
+
+	/* Clear any pending interrupts. */
+	rd32(E1000_ICR);
+	igb_irq_enable(adapter);
+
+	rtnetif_start_queue(adapter->netdev);
+
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
+
+	if ((adapter->flags & IGB_FLAG_EEE) &&
+	    (!hw->dev_spec._82575.eee_disable))
+		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
+	return 0;
+}
+
+void igb_down(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl, rctl;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer
+	 */
+	set_bit(__IGB_DOWN, &adapter->state);
+
+	/* disable receives in the hardware */
+	rctl = rd32(E1000_RCTL);
+	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+	rtnetif_stop_queue(netdev);
+
+	/* disable transmits in the hardware */
+	tctl = rd32(E1000_TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	wr32(E1000_TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	wrfl();
+	usleep_range(10000, 11000);
+
+	igb_irq_disable(adapter);
+
+	adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	/* record the stats before reset*/
+	spin_lock(&adapter->stats64_lock);
+	igb_update_stats(adapter);
+	spin_unlock(&adapter->stats64_lock);
+
+	rtnetif_carrier_off(netdev);
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	if (!pci_channel_offline(adapter->pdev))
+		igb_reset(adapter);
+	igb_clean_all_tx_rings(adapter);
+	igb_clean_all_rx_rings(adapter);
+}
+
+void igb_reinit_locked(struct igb_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	igb_down(adapter);
+	igb_up(adapter);
+	clear_bit(__IGB_RESETTING, &adapter->state);
+}
+
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_enable_mas(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 connsw = rd32(E1000_CONNSW);
+
+	/* configure for SerDes media detect */
+	if ((hw->phy.media_type == e1000_media_type_copper) &&
+	    (!(connsw & E1000_CONNSW_SERDESD))) {
+		connsw |= E1000_CONNSW_ENRGSRC;
+		connsw |= E1000_CONNSW_AUTOSENSE_EN;
+		wr32(E1000_CONNSW, connsw);
+		wrfl();
+	}
+}
+
+void igb_reset(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_fc_info *fc = &hw->fc;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+	switch (mac->type) {
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_82580:
+		pba = rd32(E1000_RXPBS);
+		pba = igb_rxpbs_adjust_82580(pba);
+		break;
+	case e1000_82576:
+		pba = rd32(E1000_RXPBS);
+		pba &= E1000_RXPBS_SIZE_MASK_82576;
+		break;
+	case e1000_82575:
+	case e1000_i210:
+	case e1000_i211:
+	default:
+		pba = E1000_PBA_34K;
+		break;
+	}
+
+	if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+	    (mac->type < e1000_82576)) {
+		/* adjust PBA for jumbo frames */
+		wr32(E1000_PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB.
+		 */
+		pba = rd32(E1000_PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* the Tx fifo also stores 16 bytes of information about the Tx
+		 * but don't include ethernet FCS because hardware appends it
+		 */
+		min_tx_space = (adapter->max_frame_size +
+				sizeof(union e1000_adv_tx_desc) -
+				ETH_FCS_LEN) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = adapter->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation
+		 */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* if short on Rx space, Rx wins and must trump Tx
+			 * adjustment
+			 */
+			if (pba < min_rx_space)
+				pba = min_rx_space;
+		}
+		wr32(E1000_PBA, pba);
+	}
+
+	/* flow control settings */
+	/* The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, or
+	 * - the full Rx FIFO size minus one full frame
+	 */
+	hwm = min(((pba << 10) * 9 / 10),
+			((pba << 10) - 2 * adapter->max_frame_size));
+
+	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
+	fc->low_water = fc->high_water - 16;
+	fc->pause_time = 0xFFFF;
+	fc->send_xon = 1;
+	fc->current_mode = fc->requested_mode;
+
+	/* Allow time for pending master requests to run */
+	hw->mac.ops.reset_hw(hw);
+	wr32(E1000_WUC, 0);
+
+	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+		/* need to resetup here after media swap */
+		adapter->ei.get_invariants(hw);
+		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+	}
+	if ((mac->type == e1000_82575) &&
+	    (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+		igb_enable_mas(adapter);
+	}
+	if (hw->mac.ops.init_hw(hw))
+		dev_err(&pdev->dev, "Hardware Error\n");
+
+	/* Flow control settings reset on hardware reset, so guarantee flow
+	 * control is off when forcing speed.
+	 */
+	if (!hw->mac.autoneg)
+		igb_force_mac_fc(hw);
+
+	igb_init_dmac(adapter, pba);
+#ifdef CONFIG_IGB_HWMON
+	/* Re-initialize the thermal sensor on i350 devices. */
+	if (!test_bit(__IGB_DOWN, &adapter->state)) {
+		if (mac->type == e1000_i350 && hw->bus.func == 0) {
+			/* If present, re-initialize the external thermal sensor
+			 * interface.
+			 */
+			if (adapter->ets)
+				mac->ops.init_thermal_sensor_thresh(hw);
+		}
+	}
+#endif
+	/* Re-establish EEE setting */
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		switch (mac->type) {
+		case e1000_i350:
+		case e1000_i210:
+		case e1000_i211:
+			igb_set_eee_i350(hw, true, true);
+			break;
+		case e1000_i354:
+			igb_set_eee_i354(hw, true, true);
+			break;
+		default:
+			break;
+		}
+	}
+	if (!rtnetif_running(adapter->netdev))
+		igb_power_down_link(adapter);
+
+	igb_update_mng_vlan(adapter);
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	igb_get_phy_info(hw);
+}
+
+
+/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ **/
+void igb_set_fw_version(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_fw_version fw;
+
+	igb_get_fw_version(hw, &fw);
+
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+		if (!(igb_get_flash_presence_i210(hw))) {
+			snprintf(adapter->fw_version,
+				 sizeof(adapter->fw_version),
+				 "%2d.%2d-%d",
+				 fw.invm_major, fw.invm_minor,
+				 fw.invm_img_type);
+			break;
+		}
+		fallthrough;
+	default:
+		/* if option is rom valid, display its version too */
+		if (fw.or_valid) {
+			snprintf(adapter->fw_version,
+				 sizeof(adapter->fw_version),
+				 "%d.%d, 0x%08x, %d.%d.%d",
+				 fw.eep_major, fw.eep_minor, fw.etrack_id,
+				 fw.or_major, fw.or_build, fw.or_patch);
+		/* no option rom */
+		} else if (fw.etrack_id != 0X0000) {
+			snprintf(adapter->fw_version,
+			    sizeof(adapter->fw_version),
+			    "%d.%d, 0x%08x",
+			    fw.eep_major, fw.eep_minor, fw.etrack_id);
+		} else {
+		snprintf(adapter->fw_version,
+		    sizeof(adapter->fw_version),
+		    "%d.%d.%d",
+		    fw.eep_major, fw.eep_minor, fw.eep_build);
+		}
+		break;
+	}
+}
+
+/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 eeprom_data;
+
+	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+	switch (hw->bus.func) {
+	case E1000_FUNC_0:
+		if (eeprom_data & IGB_MAS_ENABLE_0) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_1:
+		if (eeprom_data & IGB_MAS_ENABLE_1) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_2:
+		if (eeprom_data & IGB_MAS_ENABLE_2) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_3:
+		if (eeprom_data & IGB_MAS_ENABLE_3) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	default:
+		/* Shouldn't get here */
+		rtdev_err(adapter->netdev,
+			"MAS: Invalid port configuration, returning\n");
+		break;
+	}
+}
+
+static dma_addr_t igb_map_rtskb(struct rtnet_device *netdev,
+				struct rtskb *skb)
+{
+	struct igb_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+	dma_addr_t addr;
+
+	addr = dma_map_single(dev, skb->buf_start, RTSKB_SIZE,
+			      DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, addr)) {
+		dev_err(dev, "DMA map failed\n");
+		return RTSKB_UNMAPPED;
+	}
+	return addr;
+}
+
+static void igb_unmap_rtskb(struct rtnet_device *netdev,
+			      struct rtskb *skb)
+{
+	struct igb_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+
+	dma_unmap_single(dev, skb->buf_dma_addr, RTSKB_SIZE,
+			 DMA_BIDIRECTIONAL);
+}
+
+/**
+ *  igb_probe - Device Initialization Routine
+ *  @pdev: PCI device information struct
+ *  @ent: entry in igb_pci_tbl
+ *
+ *  Returns 0 on success, negative on failure
+ *
+ *  igb_probe initializes an adapter identified by a pci_dev structure.
+ *  The OS initialization, configuring of the adapter private structure,
+ *  and a hardware reset occur.
+ **/
+static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct rtnet_device *netdev;
+	struct igb_adapter *adapter;
+	struct e1000_hw *hw;
+	u16 eeprom_data = 0;
+	s32 ret_val;
+	static int global_quad_port_a; /* global quad port a indication */
+	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
+	int err, pci_using_dac;
+	u8 part_str[E1000_PBANUM_LENGTH];
+
+	/* Catch broken hardware that put the wrong VF device ID in
+	 * the PCIe SR-IOV capability.
+	 */
+	if (pdev->is_virtfn) {
+		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+			pci_name(pdev), pdev->vendor, pdev->device);
+		return -EINVAL;
+	}
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	pci_using_dac = 0;
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (!err) {
+		pci_using_dac = 1;
+	} else {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_dma;
+		}
+	}
+
+	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+					   IORESOURCE_MEM),
+					   igb_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	pci_enable_pcie_error_reporting(pdev);
+
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	err = -ENOMEM;
+	netdev = rt_alloc_etherdev(sizeof(*adapter),
+				2 * IGB_DEFAULT_RXD + IGB_DEFAULT_TXD);
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	rtdev_alloc_name(netdev, "rteth%d");
+	rt_rtdev_connect(netdev, &RTDEV_manager);
+
+	netdev->vers = RTDEV_VERS_2_0;
+	netdev->sysbind = &pdev->dev;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = rtnetdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	err = -EIO;
+	hw->hw_addr = pci_iomap(pdev, 0, 0);
+	if (!hw->hw_addr)
+		goto err_ioremap;
+
+	netdev->open = igb_open;
+	netdev->stop = igb_close;
+	netdev->hard_start_xmit = igb_xmit_frame;
+	netdev->get_stats = igb_get_stats;
+	netdev->map_rtskb = igb_map_rtskb;
+	netdev->unmap_rtskb = igb_unmap_rtskb;
+	netdev->do_ioctl = igb_ioctl;
+#if 0
+	netdev->set_multicast_list = igb_set_multi;
+	netdev->set_mac_address = igb_set_mac;
+	netdev->change_mtu = igb_change_mtu;
+
+	// No ethtool support for now
+	igb_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
+#endif
+
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = pci_resource_start(pdev, 0);
+	netdev->mem_end = pci_resource_end(pdev, 0);
+
+	/* PCI config space info */
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->revision_id = pdev->revision;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	/* Copy the default MAC, PHY and NVM function pointers */
+	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+	/* Initialize skew-specific constants */
+	err = ei->get_invariants(hw);
+	if (err)
+		goto err_sw_init;
+
+	/* setup the private structure */
+	err = igb_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	igb_get_bus_info_pcie(hw);
+
+	hw->phy.autoneg_wait_to_complete = false;
+
+	/* Copper options */
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		hw->phy.mdix = AUTO_ALL_MODES;
+		hw->phy.disable_polarity_correction = false;
+		hw->phy.ms_type = e1000_ms_hw_default;
+	}
+
+	if (igb_check_reset_block(hw))
+		dev_info(&pdev->dev,
+			"PHY reset is blocked due to SOL/IDER session.\n");
+
+	/* features is initialized to 0 in allocation, it might have bits
+	 * set by igb_sw_init so we should use an or instead of an
+	 * assignment.
+	 */
+	netdev->features |= NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_IPV6_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXHASH |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
+
+#if 0
+	/* set this bit last since it cannot be part of hw_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+
+	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
+
+	/* before reading the NVM, reset the controller to put the device in a
+	 * known good starting state
+	 */
+	hw->mac.ops.reset_hw(hw);
+
+	/* make sure the NVM is good , i211/i210 parts can have special NVM
+	 * that doesn't contain a checksum
+	 */
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+		if (igb_get_flash_presence_i210(hw)) {
+			if (hw->nvm.ops.validate(hw) < 0) {
+				dev_err(&pdev->dev,
+					"The NVM Checksum Is Not Valid\n");
+				err = -EIO;
+				goto err_eeprom;
+			}
+		}
+		break;
+	default:
+		if (hw->nvm.ops.validate(hw) < 0) {
+			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
+			err = -EIO;
+			goto err_eeprom;
+		}
+		break;
+	}
+
+	/* copy the MAC address out of the NVM */
+	if (hw->mac.ops.read_mac_addr(hw))
+		dev_err(&pdev->dev, "NVM Read Error\n");
+
+	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		dev_err(&pdev->dev, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* get firmware version for ethtool -i */
+	igb_set_fw_version(adapter);
+
+	/* configure RXPBSIZE and TXPBSIZE */
+	if (hw->mac.type == e1000_i210) {
+		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+	timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
+	timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
+#else /* < 4.14 */
+	setup_timer(&adapter->watchdog_timer, igb_watchdog,
+		    (unsigned long) adapter);
+	setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
+		    (unsigned long) adapter);
+#endif /* < 4.14 */
+
+	INIT_WORK(&adapter->reset_task, igb_reset_task);
+	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
+	rtdm_nrtsig_init(&adapter->watchdog_nrtsig,
+			igb_nrtsig_watchdog, adapter);
+
+	/* Initialize link properties that are user-changeable */
+	adapter->fc_autoneg = true;
+	hw->mac.autoneg = true;
+	hw->phy.autoneg_advertised = 0x2f;
+
+	hw->fc.requested_mode = e1000_fc_default;
+	hw->fc.current_mode = e1000_fc_default;
+
+	igb_validate_mdi_setting(hw);
+
+	/* By default, support wake on port A */
+	if (hw->bus.func == 0)
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+	/* Check the NVM for wake support on non-port A ports */
+	if (hw->mac.type >= e1000_82580)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+				 &eeprom_data);
+	else if (hw->bus.func == 1)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+	if (eeprom_data & IGB_EEPROM_APME)
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+	/* now that we have the eeprom settings, apply the special cases where
+	 * the eeprom may be wrong or the board simply won't support wake on
+	 * lan on a particular port
+	 */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+		break;
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+	case E1000_DEV_ID_82576_FIBER:
+	case E1000_DEV_ID_82576_SERDES:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting
+		 */
+		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+		break;
+	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+		/* if quad port adapter, disable WoL on all but port A */
+		if (global_quad_port_a != 0)
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+		else
+			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+		/* Reset for multiple quad port adapters */
+		if (++global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	default:
+		/* If the device can't wake, don't set software support */
+		if (!device_can_wakeup(&adapter->pdev->dev))
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	/* Some vendors want WoL disabled by default, but still supported */
+	if ((hw->mac.type == e1000_i350) &&
+	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+		adapter->wol = 0;
+	}
+
+	device_set_wakeup_enable(&adapter->pdev->dev,
+				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
+
+	/* reset the hardware with the new settings */
+	igb_reset(adapter);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver.
+	 */
+	igb_get_hw_control(adapter);
+
+	strcpy(netdev->name, "rteth%d");
+	err = rt_register_rtnetdev(netdev);
+	if (err)
+		goto err_release_hw_control;
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	rtnetif_carrier_off(netdev);
+
+#ifdef CONFIG_IGB_HWMON
+	/* Initialize the thermal sensor on i350 devices. */
+	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
+		u16 ets_word;
+
+		/* Read the NVM to determine if this i350 device supports an
+		 * external thermal sensor.
+		 */
+		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
+		if (ets_word != 0x0000 && ets_word != 0xFFFF)
+			adapter->ets = true;
+		else
+			adapter->ets = false;
+		if (igb_sysfs_init(adapter))
+			dev_err(&pdev->dev,
+				"failed to allocate sysfs resources\n");
+	} else {
+		adapter->ets = false;
+	}
+#endif
+	/* Check if Media Autosense is enabled */
+	adapter->ei = *ei;
+	if (hw->dev_spec._82575.mas_capable)
+		igb_init_mas(adapter);
+
+	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
+	/* print bus type/speed/width info, not applicable to i354 */
+	if (hw->mac.type != e1000_i354) {
+		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
+			 netdev->name,
+			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
+			   "unknown"),
+			 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
+			  "Width x4" :
+			  (hw->bus.width == e1000_bus_width_pcie_x2) ?
+			  "Width x2" :
+			  (hw->bus.width == e1000_bus_width_pcie_x1) ?
+			  "Width x1" : "unknown"), netdev->dev_addr);
+	}
+
+	if ((hw->mac.type >= e1000_i210 ||
+	     igb_get_flash_presence_i210(hw))) {
+		ret_val = igb_read_part_string(hw, part_str,
+					       E1000_PBANUM_LENGTH);
+	} else {
+		ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	}
+
+	if (ret_val)
+		strcpy(part_str, "Unknown");
+	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
+	dev_info(&pdev->dev,
+		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
+		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+		adapter->num_rx_queues, adapter->num_tx_queues);
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		switch (hw->mac.type) {
+		case e1000_i350:
+		case e1000_i210:
+		case e1000_i211:
+			/* Enable EEE for internal copper PHY devices */
+			err = igb_set_eee_i350(hw, true, true);
+			if ((!err) &&
+			    (!hw->dev_spec._82575.eee_disable)) {
+				adapter->eee_advert =
+					MDIO_EEE_100TX | MDIO_EEE_1000T;
+				adapter->flags |= IGB_FLAG_EEE;
+			}
+			break;
+		case e1000_i354:
+			if ((rd32(E1000_CTRL_EXT) &
+			    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+				err = igb_set_eee_i354(hw, true, true);
+				if ((!err) &&
+					(!hw->dev_spec._82575.eee_disable)) {
+					adapter->eee_advert =
+					   MDIO_EEE_100TX | MDIO_EEE_1000T;
+					adapter->flags |= IGB_FLAG_EEE;
+				}
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	pm_runtime_put_noidle(&pdev->dev);
+	return 0;
+
+err_release_hw_control:
+	igb_release_hw_control(adapter);
+	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+err_eeprom:
+	if (!igb_check_reset_block(hw))
+		igb_reset_phy(hw);
+
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+err_sw_init:
+	igb_clear_interrupt_scheme(adapter);
+	pci_iounmap(pdev, hw->hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ *  igb_remove_i2c - Cleanup  I2C interface
+ *  @adapter: pointer to adapter structure
+ **/
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+	/* free the adapter bus structure */
+	i2c_del_adapter(&adapter->i2c_adap);
+}
+
+/**
+ *  igb_remove - Device Removal Routine
+ *  @pdev: PCI device information struct
+ *
+ *  igb_remove is called by the PCI subsystem to alert the driver
+ *  that it should release a PCI device.  The could be caused by a
+ *  Hot-Plug event, or because the driver is going to be removed from
+ *  memory.
+ **/
+static void igb_remove(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	rtdev_down(netdev);
+	igb_down(adapter);
+
+	pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_HWMON
+	igb_sysfs_exit(adapter);
+#endif
+	igb_remove_i2c(adapter);
+	/* The watchdog timer may be rescheduled, so explicitly
+	 * disable watchdog from being rescheduled.
+	 */
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	igb_release_hw_control(adapter);
+
+	rt_rtdev_disconnect(netdev);
+	rt_unregister_rtnetdev(netdev);
+
+	igb_clear_interrupt_scheme(adapter);
+
+	pci_iounmap(pdev, hw->hw_addr);
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	kfree(adapter->shadow_vfta);
+	rtdev_free(netdev);
+
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ *  @adapter: board private structure to initialize
+ *
+ *  This function initializes the vf specific data storage and then attempts to
+ *  allocate the VFs.  The reason for ordering it this way is because it is much
+ *  mor expensive time wise to disable SR-IOV than it is to allocate and free
+ *  the memory for the VFs.
+ **/
+static void igb_probe_vfs(struct igb_adapter *adapter)
+{
+}
+
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 max_rss_queues;
+
+	max_rss_queues = 1;
+	adapter->rss_queues = max_rss_queues;
+
+	/* Determine if we need to pair queues. */
+	switch (hw->mac.type) {
+	case e1000_82575:
+	case e1000_i211:
+		/* Device supports enough interrupts without queue pairing. */
+		break;
+	case e1000_82576:
+		/* If VFs are going to be allocated with RSS queues then we
+		 * should pair the queues in order to conserve interrupts due
+		 * to limited supply.
+		 */
+		fallthrough;
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	default:
+		/* If rss_queues > half of max_rss_queues, pair the queues in
+		 * order to conserve interrupts due to limited supply.
+		 */
+		if (adapter->rss_queues > (max_rss_queues / 2))
+			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+		break;
+	}
+}
+
+/**
+ *  igb_sw_init - Initialize general software structures (struct igb_adapter)
+ *  @adapter: board private structure to initialize
+ *
+ *  igb_sw_init initializes the Adapter private data structure.
+ *  Fields are initialized based on PCI device information and
+ *  OS network device settings (MTU size).
+ **/
+static int igb_sw_init(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+	/* set default ring sizes */
+	adapter->tx_ring_count = IGB_DEFAULT_TXD;
+	adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+	/* set default ITR values */
+	if (InterruptThrottle) {
+		adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+		adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+	} else {
+		adapter->rx_itr_setting = IGB_MIN_ITR_USECS;
+		adapter->tx_itr_setting = IGB_MIN_ITR_USECS;
+	}
+
+	/* set default work limits */
+	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+				  VLAN_HLEN;
+	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+	spin_lock_init(&adapter->stats64_lock);
+
+	igb_init_queue_configuration(adapter);
+
+	/* Setup and initialize a copy of the hw vlan table array */
+	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+				       GFP_ATOMIC);
+
+	/* This call may decrease the number of queues */
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	igb_probe_vfs(adapter);
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	igb_irq_disable(adapter);
+
+	if (hw->mac.type >= e1000_i350)
+		adapter->flags &= ~IGB_FLAG_DMAC;
+
+	set_bit(__IGB_DOWN, &adapter->state);
+	return 0;
+}
+
+/**
+ *  igb_open - Called when a network interface is made active
+ *  @netdev: network interface device structure
+ *
+ *  Returns 0 on success, negative value on failure
+ *
+ *  The open entry point is called when a network interface is made
+ *  active by the system (IFF_UP).  At this point all resources needed
+ *  for transmit and receive operations are allocated, the interrupt
+ *  handler is registered with the OS, the watchdog timer is started,
+ *  and the stack is notified that the interface is ready.
+ **/
+static int __igb_open(struct rtnet_device *netdev, bool resuming)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__IGB_TESTING, &adapter->state)) {
+		WARN_ON(resuming);
+		return -EBUSY;
+	}
+
+	if (!resuming)
+		pm_runtime_get_sync(&pdev->dev);
+
+	rtnetif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = igb_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = igb_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	igb_power_up_link(adapter);
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.
+	 */
+	igb_configure(adapter);
+
+	err = igb_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* From here on the code is the same as igb_up() */
+	clear_bit(__IGB_DOWN, &adapter->state);
+
+	/* Clear any pending interrupts. */
+	rd32(E1000_ICR);
+
+	igb_irq_enable(adapter);
+
+	rtnetif_start_queue(netdev);
+
+	if (!resuming)
+		pm_runtime_put(&pdev->dev);
+
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
+
+	return 0;
+
+err_req_irq:
+	igb_release_hw_control(adapter);
+	igb_power_down_link(adapter);
+	igb_free_all_rx_resources(adapter);
+err_setup_rx:
+	igb_free_all_tx_resources(adapter);
+err_setup_tx:
+	igb_reset(adapter);
+	if (!resuming)
+		pm_runtime_put(&pdev->dev);
+
+	return err;
+}
+
+static int igb_open(struct rtnet_device *netdev)
+{
+	return __igb_open(netdev, false);
+}
+
+/**
+ *  igb_close - Disables a network interface
+ *  @netdev: network interface device structure
+ *
+ *  Returns 0, this is not allowed to fail
+ *
+ *  The close entry point is called when an interface is de-activated
+ *  by the OS.  The hardware is still under the driver's control, but
+ *  needs to be disabled.  A global MAC reset is issued to stop the
+ *  hardware, and all transmit and receive resources are freed.
+ **/
+static int __igb_close(struct rtnet_device *netdev, bool suspending)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
+
+	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+
+	if (!suspending)
+		pm_runtime_get_sync(&pdev->dev);
+
+	igb_down(adapter);
+	igb_free_irq(adapter);
+
+	rt_stack_disconnect(netdev);
+
+	igb_free_all_tx_resources(adapter);
+	igb_free_all_rx_resources(adapter);
+
+	if (!suspending)
+		pm_runtime_put_sync(&pdev->dev);
+	return 0;
+}
+
+static int igb_close(struct rtnet_device *netdev)
+{
+	return __igb_close(netdev, false);
+}
+
+/**
+ *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ *  @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ *  Return 0 on success, negative on failure
+ **/
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int size;
+
+	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+
+	tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	return 0;
+
+err:
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ *				 (Descriptors) for all queues
+ *  @adapter: board private structure
+ *
+ *  Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = igb_setup_tx_resources(adapter->tx_ring[i]);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_tx_resources(adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ *  igb_setup_tctl - configure the transmit control registers
+ *  @adapter: Board private structure
+ **/
+void igb_setup_tctl(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl;
+
+	/* disable queue 0 which is enabled by default on 82575 and 82576 */
+	wr32(E1000_TXDCTL(0), 0);
+
+	/* Program the Transmit Control Register */
+	tctl = rd32(E1000_TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	igb_config_collision_dist(hw);
+
+	/* Enable transmits */
+	tctl |= E1000_TCTL_EN;
+
+	wr32(E1000_TCTL, tctl);
+}
+
+/**
+ *  igb_configure_tx_ring - Configure transmit ring after Reset
+ *  @adapter: board private structure
+ *  @ring: tx ring to configure
+ *
+ *  Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+			   struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 txdctl = 0;
+	u64 tdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+
+	/* disable the queue */
+	wr32(E1000_TXDCTL(reg_idx), 0);
+	wrfl();
+	mdelay(10);
+
+	wr32(E1000_TDLEN(reg_idx),
+	     ring->count * sizeof(union e1000_adv_tx_desc));
+	wr32(E1000_TDBAL(reg_idx),
+	     tdba & 0x00000000ffffffffULL);
+	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+	wr32(E1000_TDH(reg_idx), 0);
+	writel(0, ring->tail);
+
+	txdctl |= IGB_TX_PTHRESH;
+	txdctl |= IGB_TX_HTHRESH << 8;
+	txdctl |= IGB_TX_WTHRESH << 16;
+
+	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+	wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ *  igb_configure_tx - Configure transmit Unit after Reset
+ *  @adapter: board private structure
+ *
+ *  Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ *  Returns 0 on success, negative on failure
+ **/
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int size;
+
+	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+
+	rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto err;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
+	if (!rx_ring->desc)
+		goto err;
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+
+err:
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ *				 (Descriptors) for all queues
+ *  @adapter: board private structure
+ *
+ *  Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = igb_setup_rx_resources(adapter->rx_ring[i]);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_rx_resources(adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ *  igb_setup_mrqc - configure the multiple receive queue control registers
+ *  @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 mrqc, rxcsum;
+	u32 j, num_rx_queues;
+	u32 rss_key[10];
+
+	get_random_bytes(rss_key, sizeof(rss_key));
+	for (j = 0; j < 10; j++)
+		wr32(E1000_RSSRK(j), rss_key[j]);
+
+	num_rx_queues = adapter->rss_queues;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+		/* 82576 supports 2 RSS queues for SR-IOV */
+		break;
+	default:
+		break;
+	}
+
+	if (adapter->rss_indir_tbl_init != num_rx_queues) {
+		for (j = 0; j < IGB_RETA_SIZE; j++)
+			adapter->rss_indir_tbl[j] =
+			(j * num_rx_queues) / IGB_RETA_SIZE;
+		adapter->rss_indir_tbl_init = num_rx_queues;
+	}
+
+	/* Disable raw packet checksumming so that RSS hash is placed in
+	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+	 * offloads as they are enabled by default
+	 */
+	rxcsum = rd32(E1000_RXCSUM);
+	rxcsum |= E1000_RXCSUM_PCSD;
+
+	if (adapter->hw.mac.type >= e1000_82576)
+		/* Enable Receive Checksum Offload for SCTP */
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+
+	/* Don't need to set TUOFL or IPOFL, they default to 1 */
+	wr32(E1000_RXCSUM, rxcsum);
+
+	/* Generate RSS hash based on packet types, TCP/UDP
+	 * port numbers and/or IPv4/v6 src and dst addresses
+	 */
+	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
+	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
+	       E1000_MRQC_RSS_FIELD_IPV6 |
+	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
+	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+	/* If VMDq is enabled then we set the appropriate mode for that, else
+	 * we default to RSS so that an RSS hash is calculated per packet even
+	 * if we are only using one queue
+	 */
+	if (hw->mac.type != e1000_i211)
+		mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
+
+	wr32(E1000_MRQC, mrqc);
+}
+
+/**
+ *  igb_setup_rctl - configure the receive control registers
+ *  @adapter: Board private structure
+ **/
+void igb_setup_rctl(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	rctl = rd32(E1000_RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
+		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* enable stripping of CRC. It's unlikely this will break BMC
+	 * redirection as it did with e1000. Newer features require
+	 * that the HW strips the CRC.
+	 */
+	rctl |= E1000_RCTL_SECRC;
+
+	/* disable store bad packets and clear size bits. */
+	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
+
+	/* enable LPE to prevent packets larger than max_frame_size */
+	rctl |= E1000_RCTL_LPE;
+
+	/* disable queue 0 to prevent tail write w/o re-config */
+	wr32(E1000_RXDCTL(0), 0);
+
+	/* This is useful for sniffing bad packets. */
+	if (adapter->netdev->features & NETIF_F_RXALL) {
+		/* UPE and MPE will be handled by normal PROMISC logic
+		 * in e1000e_set_rx_mode
+		 */
+		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
+			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+			  E1000_RCTL_DPF | /* Allow filtered pause */
+			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+		 * and that breaks VLANs.
+		 */
+	}
+
+	wr32(E1000_RCTL, rctl);
+}
+
+/**
+ *  igb_rlpml_set - set maximum receive packet size
+ *  @adapter: board private structure
+ *
+ *  Configure maximum receivable packet size.
+ **/
+static void igb_rlpml_set(struct igb_adapter *adapter)
+{
+	u32 max_frame_size = adapter->max_frame_size;
+	struct e1000_hw *hw = &adapter->hw;
+
+	wr32(E1000_RLPML, max_frame_size);
+}
+
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+				 int vfn, bool aupe)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
+
+	/* This register exists only on 82576 and newer so if we are older then
+	 * we should exit and do nothing
+	 */
+	if (hw->mac.type < e1000_82576)
+		return;
+
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+	if (hw->mac.type == e1000_i350) {
+		u32 dvmolr;
+
+		dvmolr = rd32(E1000_DVMOLR(vfn));
+		dvmolr |= E1000_DVMOLR_STRVLAN;
+		wr32(E1000_DVMOLR(vfn), dvmolr);
+	}
+	if (aupe)
+		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+	else
+		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
+
+	/* clear all bits that might not be set */
+	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+	if (adapter->rss_queues > 1)
+		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+	/* for VMDq only allow the VFs and pool 0 to accept broadcast and
+	 * multicast packets
+	 */
+	vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
+/**
+ *  igb_configure_rx_ring - Configure a receive ring after Reset
+ *  @adapter: board private structure
+ *  @ring: receive ring to be configured
+ *
+ *  Configure the Rx unit of the MAC after a reset.
+ **/
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+			   struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u64 rdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+	u32 srrctl = 0, rxdctl = 0;
+
+	ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
+				MAXIMUM_ETHERNET_VLAN_SIZE);
+
+	/* disable the queue */
+	wr32(E1000_RXDCTL(reg_idx), 0);
+
+	/* Set DMA base address registers */
+	wr32(E1000_RDBAL(reg_idx),
+	     rdba & 0x00000000ffffffffULL);
+	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+	wr32(E1000_RDLEN(reg_idx),
+	     ring->count * sizeof(union e1000_adv_rx_desc));
+
+	/* initialize head and tail */
+	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+	wr32(E1000_RDH(reg_idx), 0);
+	writel(0, ring->tail);
+
+	/* set descriptor configuration */
+	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+	srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+	if (hw->mac.type >= e1000_82580)
+		srrctl |= E1000_SRRCTL_TIMESTAMP;
+	/* Only set Drop Enable if we are supporting multiple queues */
+	if (adapter->num_rx_queues > 1)
+		srrctl |= E1000_SRRCTL_DROP_EN;
+
+	wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+	/* set filtering for VMDQ pools */
+	igb_set_vmolr(adapter, reg_idx & 0x7, true);
+
+	rxdctl |= IGB_RX_PTHRESH;
+	rxdctl |= IGB_RX_HTHRESH << 8;
+	rxdctl |= IGB_RX_WTHRESH << 16;
+
+	/* enable receive descriptor fetching */
+	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+	wr32(E1000_RXDCTL(reg_idx), rxdctl);
+}
+
+/**
+ *  igb_configure_rx - Configure receive Unit after Reset
+ *  @adapter: board private structure
+ *
+ *  Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+	int i;
+
+	/* set the correct pool for the PF default MAC address in entry 0 */
+	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 0);
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ *  igb_free_tx_resources - Free Tx Resources per Queue
+ *  @tx_ring: Tx descriptor ring for a specific queue
+ *
+ *  Free all transmit software resources
+ **/
+void igb_free_tx_resources(struct igb_ring *tx_ring)
+{
+	igb_clean_tx_ring(tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size,
+			  tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ *  igb_free_all_tx_resources - Free Tx Resources for All Queues
+ *  @adapter: board private structure
+ *
+ *  Free all transmit software resources
+ **/
+static void igb_free_all_tx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		if (adapter->tx_ring[i])
+			igb_free_tx_resources(adapter->tx_ring[i]);
+}
+
+void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
+				    struct igb_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		kfree_rtskb(tx_buffer->skb);
+		tx_buffer->skb = NULL;
+	}
+	tx_buffer->next_to_watch = NULL;
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ *  igb_clean_tx_ring - Free Tx Buffers
+ *  @tx_ring: ring to be cleaned
+ **/
+static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+{
+	struct igb_tx_buffer *buffer_info;
+	unsigned long size;
+	u16 i;
+
+	if (!tx_ring->tx_buffer_info)
+		return;
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->tx_buffer_info[i];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+	}
+
+	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ *  @adapter: board private structure
+ **/
+static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		if (adapter->tx_ring[i])
+			igb_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+/**
+ *  igb_free_rx_resources - Free Rx Resources
+ *  @rx_ring: ring to clean the resources from
+ *
+ *  Free all receive software resources
+ **/
+void igb_free_rx_resources(struct igb_ring *rx_ring)
+{
+	igb_clean_rx_ring(rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(rx_ring->dev, rx_ring->size,
+			  rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ *  igb_free_all_rx_resources - Free Rx Resources for All Queues
+ *  @adapter: board private structure
+ *
+ *  Free all receive software resources
+ **/
+static void igb_free_all_rx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		if (adapter->rx_ring[i])
+			igb_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ *  igb_clean_rx_ring - Free Rx Buffers per Queue
+ *  @rx_ring: ring to free buffers from
+ **/
+static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+{
+	unsigned long size;
+	u16 i;
+
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+
+		if (buffer_info->dma)
+			buffer_info->dma = 0;
+
+		if (buffer_info->skb) {
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+/**
+ *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ *  @adapter: board private structure
+ **/
+static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		if (adapter->rx_ring[i])
+			igb_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ *  igb_write_mc_addr_list - write multicast addresses to MTA
+ *  @netdev: network interface device structure
+ *
+ *  Writes multicast address list to the MTA hash table.
+ *  Returns: -ENOMEM on failure
+ *           0 on no addresses written
+ *           X on writing X addresses to MTA
+ **/
+static int igb_write_mc_addr_list(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+#if 0
+	struct netdev_hw_addr *ha;
+	u8  *mta_list;
+	int i;
+	if (netdev_mc_empty(netdev)) {
+		/* nothing to program, so clear mc list */
+		igb_update_mc_addr_list(hw, NULL, 0);
+		igb_restore_vf_multicasts(adapter);
+		return 0;
+	}
+
+	mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+	if (!mta_list)
+		return -ENOMEM;
+
+	/* The shared function expects a packed array of only addresses. */
+	i = 0;
+	netdev_for_each_mc_addr(ha, netdev)
+		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+	igb_update_mc_addr_list(hw, mta_list, i);
+	kfree(mta_list);
+
+	return netdev_mc_count(netdev);
+#else
+	igb_update_mc_addr_list(hw, NULL, 0);
+	return 0;
+#endif
+}
+
+/**
+ *  igb_write_uc_addr_list - write unicast addresses to RAR table
+ *  @netdev: network interface device structure
+ *
+ *  Writes unicast address list to the RAR table.
+ *  Returns: -ENOMEM on failure/insufficient address space
+ *           0 on no addresses written
+ *           X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = 0;
+	unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
+	int count = 0;
+
+	/* write the addresses in reverse order to avoid write combining */
+	for (; rar_entries > 0 ; rar_entries--) {
+		wr32(E1000_RAH(rar_entries), 0);
+		wr32(E1000_RAL(rar_entries), 0);
+	}
+	wrfl();
+
+	return count;
+}
+
+/**
+ *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ *  @netdev: network interface device structure
+ *
+ *  The set_rx_mode entry point is called whenever the unicast or multicast
+ *  address lists or the network interface flags are updated.  This routine is
+ *  responsible for configuring the hardware for proper unicast, multicast,
+ *  promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = 0;
+	u32 rctl, vmolr = 0;
+	int count;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = rd32(E1000_RCTL);
+
+	/* clear the effected bits */
+	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+			vmolr |= E1000_VMOLR_MPME;
+		} else {
+			/* Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscuous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = igb_write_mc_addr_list(netdev);
+			if (count < 0) {
+				rctl |= E1000_RCTL_MPE;
+				vmolr |= E1000_VMOLR_MPME;
+			} else if (count) {
+				vmolr |= E1000_VMOLR_ROMPE;
+			}
+		}
+		/* Write addresses to available RAR registers, if there is not
+		 * sufficient space to store all the addresses then enable
+		 * unicast promiscuous mode
+		 */
+		count = igb_write_uc_addr_list(netdev);
+		if (count < 0) {
+			rctl |= E1000_RCTL_UPE;
+			vmolr |= E1000_VMOLR_ROPE;
+		}
+		rctl |= E1000_RCTL_VFE;
+	}
+	wr32(E1000_RCTL, rctl);
+
+	/* In order to support SR-IOV and eventually VMDq it is necessary to set
+	 * the VMOLR to enable the appropriate modes.  Without this workaround
+	 * we will have issues with VLAN tag stripping not being done for frames
+	 * that are only arriving because we are the default pool
+	 */
+	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
+		return;
+
+	vmolr |= rd32(E1000_VMOLR(vfn)) &
+		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
+static void igb_check_wvbr(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 wvbr = 0;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+	case e1000_i350:
+		wvbr = rd32(E1000_WVBR);
+		if (!wvbr)
+			return;
+		break;
+	default:
+		break;
+	}
+
+	adapter->wvbr |= wvbr;
+}
+
+#define IGB_STAGGERED_QUEUE_OFFSET 8
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void igb_update_phy_info(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+#else /* < 4.14 */
+static void igb_update_phy_info(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
+#endif /* < 4.14 */
+	igb_get_phy_info(&adapter->hw);
+}
+
+/**
+ *  igb_has_link - check shared code for link and determine up/down
+ *  @adapter: pointer to driver private info
+ **/
+bool igb_has_link(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = false;
+
+	/* get_link_status is set on LSC (link status) interrupt or
+	 * rx sequence error interrupt.  get_link_status will stay
+	 * false until the e1000_check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (!hw->mac.get_link_status)
+			return true;
+		fallthrough;
+	case e1000_media_type_internal_serdes:
+		hw->mac.ops.check_for_link(hw);
+		link_active = !hw->mac.get_link_status;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if (((hw->mac.type == e1000_i210) ||
+	     (hw->mac.type == e1000_i211)) &&
+	     (hw->phy.id == I210_I_PHY_ID)) {
+		if (!rtnetif_carrier_ok(adapter->netdev)) {
+			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+		} else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+			adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+			adapter->link_check_timeout = jiffies;
+		}
+	}
+
+	return link_active;
+}
+
+static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
+{
+	bool ret = false;
+	u32 ctrl_ext, thstat;
+
+	/* check for thermal sensor event on i350 copper only */
+	if (hw->mac.type == e1000_i350) {
+		thstat = rd32(E1000_THSTAT);
+		ctrl_ext = rd32(E1000_CTRL_EXT);
+
+		if ((hw->phy.media_type == e1000_media_type_copper) &&
+		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
+			ret = !!(thstat & event);
+	}
+
+	return ret;
+}
+
+/**
+ *  igb_check_lvmmc - check for malformed packets received
+ *  and indicated in LVMMC register
+ *  @adapter: pointer to adapter
+ **/
+static void igb_check_lvmmc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 lvmmc;
+
+	lvmmc = rd32(E1000_LVMMC);
+	if (lvmmc) {
+		if (unlikely(net_ratelimit())) {
+			rtdev_warn(adapter->netdev,
+				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
+				    lvmmc);
+		}
+	}
+}
+
+/**
+ *  igb_watchdog - Timer Call-back
+ *  @data: pointer to adapter cast into an unsigned long
+ **/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void igb_watchdog(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+#else /* < 4.14 */
+static void igb_watchdog(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *)data;
+#endif /* < 4.14 */
+	/* Do the rest outside of interrupt context */
+	schedule_work(&adapter->watchdog_task);
+}
+
+static void igb_watchdog_task(struct work_struct *work)
+{
+	struct igb_adapter *adapter = container_of(work,
+						   struct igb_adapter,
+						   watchdog_task);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_phy_info *phy = &hw->phy;
+	struct rtnet_device *netdev = adapter->netdev;
+	u32 link;
+	int i;
+	u32 connsw;
+
+	link = igb_has_link(adapter);
+
+	if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+		else
+			link = false;
+	}
+
+	/* Force link down if we have fiber to swap to */
+	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+		if (hw->phy.media_type == e1000_media_type_copper) {
+			connsw = rd32(E1000_CONNSW);
+			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+				link = 0;
+		}
+	}
+	if (link) {
+		/* Perform a reset if the media type changed. */
+		if (hw->dev_spec._82575.media_changed) {
+			hw->dev_spec._82575.media_changed = false;
+			adapter->flags |= IGB_FLAG_MEDIA_RESET;
+			igb_reset(adapter);
+		}
+		/* Cancel scheduled suspend requests. */
+		pm_runtime_resume(adapter->pdev->dev.parent);
+
+		if (!rtnetif_carrier_ok(netdev)) {
+			u32 ctrl;
+
+			hw->mac.ops.get_speed_and_duplex(hw,
+							 &adapter->link_speed,
+							 &adapter->link_duplex);
+
+			ctrl = rd32(E1000_CTRL);
+			/* Links status message must follow this format */
+			rtdev_info(netdev,
+			       "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
+			       "Full" : "Half",
+			       (ctrl & E1000_CTRL_TFCE) &&
+			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
+			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
+
+			/* disable EEE if enabled */
+			if ((adapter->flags & IGB_FLAG_EEE) &&
+				(adapter->link_duplex == HALF_DUPLEX)) {
+				dev_info(&adapter->pdev->dev,
+				"EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+				adapter->hw.dev_spec._82575.eee_disable = true;
+				adapter->flags &= ~IGB_FLAG_EEE;
+			}
+
+			/* check if SmartSpeed worked */
+			igb_check_downshift(hw);
+			if (phy->speed_downgraded)
+				rtdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
+
+			/* check for thermal sensor event */
+			if (igb_thermal_sensor_event(hw,
+			    E1000_THSTAT_LINK_THROTTLE))
+				rtdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
+
+			/* adjust timeout factor according to speed/duplex */
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				adapter->tx_timeout_factor = 14;
+				break;
+			case SPEED_100:
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			rtnetif_carrier_on(netdev);
+
+			/* link state has changed, schedule phy info update */
+			if (!test_bit(__IGB_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+
+			/* check for thermal sensor event */
+			if (igb_thermal_sensor_event(hw,
+			    E1000_THSTAT_PWR_DOWN)) {
+				rtdev_err(netdev, "The network adapter was stopped because it overheated\n");
+			}
+
+			/* Links status message must follow this format */
+			rtdev_info(netdev, "igb: %s NIC Link is Down\n",
+			       netdev->name);
+			rtnetif_carrier_off(netdev);
+
+			/* link state has changed, schedule phy info update */
+			if (!test_bit(__IGB_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+
+			/* link is down, time to check for alternate media */
+			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+				igb_check_swap_media(adapter);
+				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+					schedule_work(&adapter->reset_task);
+					/* return immediately */
+					return;
+				}
+			}
+			pm_schedule_suspend(adapter->pdev->dev.parent,
+					    MSEC_PER_SEC * 5);
+
+		/* also check for alternate media here */
+		} else if (!rtnetif_carrier_ok(netdev) &&
+			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+			igb_check_swap_media(adapter);
+			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+				schedule_work(&adapter->reset_task);
+				/* return immediately */
+				return;
+			}
+		}
+	}
+
+	spin_lock(&adapter->stats64_lock);
+	igb_update_stats(adapter);
+	spin_unlock(&adapter->stats64_lock);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *tx_ring = adapter->tx_ring[i];
+		if (!rtnetif_carrier_ok(netdev)) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context).
+			 */
+			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+				adapter->tx_timeout_count++;
+				schedule_work(&adapter->reset_task);
+				/* return immediately since reset is imminent */
+				return;
+			}
+		}
+
+		/* Force detection of hung controller every watchdog period */
+		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+	}
+
+	/* Cause software interrupt to ensure Rx ring is cleaned */
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		u32 eics = 0;
+
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			eics |= adapter->q_vector[i]->eims_value;
+		wr32(E1000_EICS, eics);
+	} else {
+		wr32(E1000_ICS, E1000_ICS_RXDMT0);
+	}
+
+	/* Check LVMMC register on i350/i354 only */
+	if ((adapter->hw.mac.type == e1000_i350) ||
+	    (adapter->hw.mac.type == e1000_i354))
+		igb_check_lvmmc(adapter);
+
+	/* Reset the timer */
+	if (!test_bit(__IGB_DOWN, &adapter->state)) {
+		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+			mod_timer(&adapter->watchdog_timer,
+				  round_jiffies(jiffies +  HZ));
+		else
+			mod_timer(&adapter->watchdog_timer,
+				  round_jiffies(jiffies + 2 * HZ));
+	}
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ *  igb_update_ring_itr - update the dynamic ITR value based on packet size
+ *  @q_vector: pointer to q_vector
+ *
+ *  Stores a new ITR value based on strictly on packet size.  This
+ *  algorithm is less sophisticated than that used in igb_update_itr,
+ *  due to the difficulty of synchronizing statistics across multiple
+ *  receive rings.  The divisors and thresholds used by this function
+ *  were determined based on theoretical maximum wire speed and testing
+ *  data, in order to minimize response time while increasing bulk
+ *  throughput.
+ *  This functionality is controlled by ethtool's coalescing settings.
+ *  NOTE:  This function is called only when operating in a multiqueue
+ *         receive environment.
+ **/
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
+{
+	int new_val = q_vector->itr_val;
+	int avg_wire_size = 0;
+	struct igb_adapter *adapter = q_vector->adapter;
+	unsigned int packets;
+
+	if (!InterruptThrottle)
+		return;
+
+	/* For non-gigabit speeds, just fix the interrupt rate at 4000
+	 * ints/sec - ITR timer value of 120 ticks.
+	 */
+	if (adapter->link_speed != SPEED_1000) {
+		new_val = IGB_4K_ITR;
+		goto set_itr_val;
+	}
+
+	packets = q_vector->rx.total_packets;
+	if (packets)
+		avg_wire_size = q_vector->rx.total_bytes / packets;
+
+	packets = q_vector->tx.total_packets;
+	if (packets)
+		avg_wire_size = max_t(u32, avg_wire_size,
+				      q_vector->tx.total_bytes / packets);
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
+
+	/* Add 24 bytes to size to account for CRC, preamble, and gap */
+	avg_wire_size += 24;
+
+	/* Don't starve jumbo frames */
+	avg_wire_size = min(avg_wire_size, 3000);
+
+	/* Give a little boost to mid-size frames */
+	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+		new_val = avg_wire_size / 3;
+	else
+		new_val = avg_wire_size / 2;
+
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (new_val < IGB_20K_ITR &&
+	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+		new_val = IGB_20K_ITR;
+
+set_itr_val:
+	if (new_val != q_vector->itr_val) {
+		q_vector->itr_val = new_val;
+		q_vector->set_itr = 1;
+	}
+clear_counts:
+	q_vector->rx.total_bytes = 0;
+	q_vector->rx.total_packets = 0;
+	q_vector->tx.total_bytes = 0;
+	q_vector->tx.total_packets = 0;
+}
+
+/**
+ *  igb_update_itr - update the dynamic ITR value based on statistics
+ *  @q_vector: pointer to q_vector
+ *  @ring_container: ring info to update the itr for
+ *
+ *  Stores a new ITR value based on packets and byte
+ *  counts during the last interrupt.  The advantage of per interrupt
+ *  computation is faster updates and more accurate ITR for the current
+ *  traffic pattern.  Constants in this function were computed
+ *  based on theoretical maximum wire speed and thresholds were set based
+ *  on testing data as well as attempting to minimize response time
+ *  while increasing bulk throughput.
+ *  This functionality is controlled by ethtool's coalescing settings.
+ *  NOTE:  These calculations are only valid when operating in a single-
+ *         queue environment.
+ **/
+static void igb_update_itr(struct igb_q_vector *q_vector,
+			   struct igb_ring_container *ring_container)
+{
+	unsigned int packets = ring_container->total_packets;
+	unsigned int bytes = ring_container->total_bytes;
+	u8 itrval = ring_container->itr;
+
+	/* no packets, exit with status unchanged */
+	if (packets == 0)
+		return;
+
+	switch (itrval) {
+	case lowest_latency:
+		/* handle TSO and jumbo frames */
+		if (bytes/packets > 8000)
+			itrval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512))
+			itrval = low_latency;
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* this if handles the TSO accounting */
+			if (bytes/packets > 8000)
+				itrval = bulk_latency;
+			else if ((packets < 10) || ((bytes/packets) > 1200))
+				itrval = bulk_latency;
+			else if ((packets > 35))
+				itrval = lowest_latency;
+		} else if (bytes/packets > 2000) {
+			itrval = bulk_latency;
+		} else if (packets <= 2 && bytes < 512) {
+			itrval = lowest_latency;
+		}
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35)
+				itrval = low_latency;
+		} else if (bytes < 1500) {
+			itrval = low_latency;
+		}
+		break;
+	}
+
+	/* clear work counters since we have the values we need */
+	ring_container->total_bytes = 0;
+	ring_container->total_packets = 0;
+
+	/* write updated itr to ring container */
+	ring_container->itr = itrval;
+}
+
+static void igb_set_itr(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	u32 new_itr = q_vector->itr_val;
+	u8 current_itr = 0;
+
+	if (!InterruptThrottle)
+		return;
+
+	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+	if (adapter->link_speed != SPEED_1000) {
+		current_itr = 0;
+		new_itr = IGB_4K_ITR;
+		goto set_itr_now;
+	}
+
+	igb_update_itr(q_vector, &q_vector->tx);
+	igb_update_itr(q_vector, &q_vector->rx);
+
+	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (current_itr == lowest_latency &&
+	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+		current_itr = low_latency;
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
+		break;
+	case low_latency:
+		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
+		break;
+	case bulk_latency:
+		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
+		break;
+	default:
+		break;
+	}
+
+set_itr_now:
+	if (new_itr != q_vector->itr_val) {
+		/* this attempts to bias the interrupt rate towards Bulk
+		 * by adding intermediate steps when interrupt rate is
+		 * increasing
+		 */
+		new_itr = new_itr > q_vector->itr_val ?
+			  max((new_itr * q_vector->itr_val) /
+			  (new_itr + (q_vector->itr_val >> 2)),
+			  new_itr) : new_itr;
+		/* Don't write the value here; it resets the adapter's
+		 * internal timer, and causes us to delay far longer than
+		 * we should between interrupts.  Instead, we write the ITR
+		 * value at the beginning of the next interrupt so the timing
+		 * ends up being correct.
+		 */
+		q_vector->itr_val = new_itr;
+		q_vector->set_itr = 1;
+	}
+}
+
+
+#define IGB_SET_FLAG(_input, _flag, _result) \
+	((_flag <= _result) ? \
+	 ((u32)(_input & _flag) * (_result / _flag)) : \
+	 ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct rtskb *skb, u32 tx_flags)
+{
+	/* set type for advanced descriptor with frame checksum insertion */
+	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+		       E1000_ADVTXD_DCMD_DEXT |
+		       E1000_ADVTXD_DCMD_IFCS;
+
+	return cmd_type;
+}
+
+static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
+				 union e1000_adv_tx_desc *tx_desc,
+				 u32 tx_flags, unsigned int paylen)
+{
+	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
+
+	/* 82575 requires a unique index per ring */
+	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+		olinfo_status |= tx_ring->reg_idx << 4;
+
+	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+}
+
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+	struct rtnet_device *netdev = tx_ring->netdev;
+
+	rtnetif_stop_queue(netdev);
+
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it.
+	 */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available.
+	 */
+	if (igb_desc_unused(tx_ring) < size)
+		return -EBUSY;
+
+	/* A reprieve! */
+	rtnetif_wake_queue(netdev);
+
+	tx_ring->tx_stats.restart_queue2++;
+
+	return 0;
+}
+
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+	if (igb_desc_unused(tx_ring) >= size)
+		return 0;
+	return __igb_maybe_stop_tx(tx_ring, size);
+}
+
+static void igb_tx_map(struct igb_ring *tx_ring,
+		       struct igb_tx_buffer *first,
+		       const u8 hdr_len)
+{
+	struct rtskb *skb = first->skb;
+	struct igb_tx_buffer *tx_buffer;
+	union e1000_adv_tx_desc *tx_desc;
+	dma_addr_t dma;
+	unsigned int size;
+	u32 tx_flags = first->tx_flags;
+	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
+	u16 i = tx_ring->next_to_use;
+
+	/* first descriptor is also last, set RS and EOP bits */
+	cmd_type |= IGB_TXD_DCMD;
+	tx_desc = IGB_TX_DESC(tx_ring, i);
+
+	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+	size = skb->len;
+
+	dma = rtskb_data_dma_addr(skb, 0);
+
+	tx_buffer = first;
+
+	tx_desc->read.buffer_addr = cpu_to_le64(dma);
+	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+	first->next_to_watch = tx_desc;
+
+	i++;
+	tx_desc++;
+	if (i == tx_ring->count) {
+		tx_desc = IGB_TX_DESC(tx_ring, 0);
+		i = 0;
+	}
+
+	/* Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	wmb();
+
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+	/* set next_to_watch value indicating a packet is present */
+	tx_ring->next_to_use = i;
+
+	/* Make sure there is space in the ring for the next send. */
+	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	writel(i, tx_ring->tail);
+
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems
+	 */
+	mmiowb();
+
+	return;
+}
+
+netdev_tx_t igb_xmit_frame_ring(struct rtskb *skb,
+				struct igb_ring *tx_ring)
+{
+	struct igb_tx_buffer *first;
+	u32 tx_flags = 0;
+	u16 count = 2;
+	u8 hdr_len = 0;
+
+	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
+		/* this is a hard error */
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tx_flags |= IGB_TX_FLAGS_IPV4;
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+
+	/* record initial flags and protocol */
+	first->tx_flags = tx_flags;
+	first->protocol = skb->protocol;
+
+	igb_tx_map(tx_ring, first, hdr_len);
+
+	return NETDEV_TX_OK;
+}
+
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+						    struct rtskb *skb)
+{
+	return adapter->tx_ring[0];
+}
+
+static netdev_tx_t igb_xmit_frame(struct rtskb *skb,
+				  struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	if (test_bit(__IGB_DOWN, &adapter->state)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
+	 * in order to meet this minimum size requirement.
+	 */
+	if (skb->len < 17) {
+		skb = rtskb_padto(skb, 17);
+		if (!skb)
+			return NETDEV_TX_OK;
+	}
+
+	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
+}
+
+static void igb_reset_task(struct work_struct *work)
+{
+	struct igb_adapter *adapter;
+	adapter = container_of(work, struct igb_adapter, reset_task);
+
+	igb_dump(adapter);
+	rtdev_err(adapter->netdev, "Reset adapter\n");
+	igb_reinit_locked(adapter);
+}
+
+/**
+ * igb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *
+igb_get_stats(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = netdev->priv;
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ *  igb_update_stats - Update the board statistics counters
+ *  @adapter: board private structure
+ **/
+void igb_update_stats(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device_stats *net_stats;
+	u32 reg, mpc;
+	int i;
+	u64 bytes, packets;
+
+	/* Prevent stats update while adapter is being reset, or if the pci
+	 * connection is down.
+	 */
+	if (adapter->link_speed == 0)
+		return;
+	if (pci_channel_offline(pdev))
+		return;
+
+	net_stats = &adapter->net_stats;
+	bytes = 0;
+	packets = 0;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = adapter->rx_ring[i];
+		u32 rqdpc = rd32(E1000_RQDPC(i));
+		if (hw->mac.type >= e1000_i210)
+			wr32(E1000_RQDPC(i), 0);
+
+		if (rqdpc) {
+			ring->rx_stats.drops += rqdpc;
+			net_stats->rx_fifo_errors += rqdpc;
+		}
+
+		bytes += ring->rx_stats.bytes;
+		packets += ring->rx_stats.packets;
+	}
+
+	net_stats->rx_bytes = bytes;
+	net_stats->rx_packets = packets;
+
+	bytes = 0;
+	packets = 0;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *ring = adapter->tx_ring[i];
+		bytes += ring->tx_stats.bytes;
+		packets += ring->tx_stats.packets;
+	}
+	net_stats->tx_bytes = bytes;
+	net_stats->tx_packets = packets;
+	rcu_read_unlock();
+
+	/* read stats registers */
+	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
+	adapter->stats.gprc += rd32(E1000_GPRC);
+	adapter->stats.gorc += rd32(E1000_GORCL);
+	rd32(E1000_GORCH); /* clear GORCL */
+	adapter->stats.bprc += rd32(E1000_BPRC);
+	adapter->stats.mprc += rd32(E1000_MPRC);
+	adapter->stats.roc += rd32(E1000_ROC);
+
+	adapter->stats.prc64 += rd32(E1000_PRC64);
+	adapter->stats.prc127 += rd32(E1000_PRC127);
+	adapter->stats.prc255 += rd32(E1000_PRC255);
+	adapter->stats.prc511 += rd32(E1000_PRC511);
+	adapter->stats.prc1023 += rd32(E1000_PRC1023);
+	adapter->stats.prc1522 += rd32(E1000_PRC1522);
+	adapter->stats.symerrs += rd32(E1000_SYMERRS);
+	adapter->stats.sec += rd32(E1000_SEC);
+
+	mpc = rd32(E1000_MPC);
+	adapter->stats.mpc += mpc;
+	net_stats->rx_fifo_errors += mpc;
+	adapter->stats.scc += rd32(E1000_SCC);
+	adapter->stats.ecol += rd32(E1000_ECOL);
+	adapter->stats.mcc += rd32(E1000_MCC);
+	adapter->stats.latecol += rd32(E1000_LATECOL);
+	adapter->stats.dc += rd32(E1000_DC);
+	adapter->stats.rlec += rd32(E1000_RLEC);
+	adapter->stats.xonrxc += rd32(E1000_XONRXC);
+	adapter->stats.xontxc += rd32(E1000_XONTXC);
+	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
+	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
+	adapter->stats.fcruc += rd32(E1000_FCRUC);
+	adapter->stats.gptc += rd32(E1000_GPTC);
+	adapter->stats.gotc += rd32(E1000_GOTCL);
+	rd32(E1000_GOTCH); /* clear GOTCL */
+	adapter->stats.rnbc += rd32(E1000_RNBC);
+	adapter->stats.ruc += rd32(E1000_RUC);
+	adapter->stats.rfc += rd32(E1000_RFC);
+	adapter->stats.rjc += rd32(E1000_RJC);
+	adapter->stats.tor += rd32(E1000_TORH);
+	adapter->stats.tot += rd32(E1000_TOTH);
+	adapter->stats.tpr += rd32(E1000_TPR);
+
+	adapter->stats.ptc64 += rd32(E1000_PTC64);
+	adapter->stats.ptc127 += rd32(E1000_PTC127);
+	adapter->stats.ptc255 += rd32(E1000_PTC255);
+	adapter->stats.ptc511 += rd32(E1000_PTC511);
+	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
+	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
+
+	adapter->stats.mptc += rd32(E1000_MPTC);
+	adapter->stats.bptc += rd32(E1000_BPTC);
+
+	adapter->stats.tpt += rd32(E1000_TPT);
+	adapter->stats.colc += rd32(E1000_COLC);
+
+	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
+	/* read internal phy specific stats */
+	reg = rd32(E1000_CTRL_EXT);
+	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+		adapter->stats.rxerrc += rd32(E1000_RXERRC);
+
+		/* this stat has invalid values on i210/i211 */
+		if ((hw->mac.type != e1000_i210) &&
+		    (hw->mac.type != e1000_i211))
+			adapter->stats.tncrs += rd32(E1000_TNCRS);
+	}
+
+	adapter->stats.tsctc += rd32(E1000_TSCTC);
+	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
+
+	adapter->stats.iac += rd32(E1000_IAC);
+	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
+	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
+	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
+	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
+	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
+	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
+	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
+	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
+
+	/* Fill out the OS statistics structure */
+	net_stats->multicast = adapter->stats.mprc;
+	net_stats->collisions = adapter->stats.colc;
+
+	/* Rx Errors */
+
+	/* RLEC on some newer hardware can be incorrect so build
+	 * our own version based on RUC and ROC
+	 */
+	net_stats->rx_errors = adapter->stats.rxerrc +
+		adapter->stats.crcerrs + adapter->stats.algnerrc +
+		adapter->stats.ruc + adapter->stats.roc +
+		adapter->stats.cexterr;
+	net_stats->rx_length_errors = adapter->stats.ruc +
+				      adapter->stats.roc;
+	net_stats->rx_crc_errors = adapter->stats.crcerrs;
+	net_stats->rx_frame_errors = adapter->stats.algnerrc;
+	net_stats->rx_missed_errors = adapter->stats.mpc;
+
+	/* Tx Errors */
+	net_stats->tx_errors = adapter->stats.ecol +
+			       adapter->stats.latecol;
+	net_stats->tx_aborted_errors = adapter->stats.ecol;
+	net_stats->tx_window_errors = adapter->stats.latecol;
+	net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+	/* Tx Dropped needs to be maintained elsewhere */
+
+	/* Management Stats */
+	adapter->stats.mgptc += rd32(E1000_MGTPTC);
+	adapter->stats.mgprc += rd32(E1000_MGTPRC);
+	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+
+	/* OS2BMC Stats */
+	reg = rd32(E1000_MANC);
+	if (reg & E1000_MANC_EN_BMC2OS) {
+		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+	}
+}
+
+static void igb_nrtsig_watchdog(rtdm_nrtsig_t *sig, void *data)
+{
+	struct igb_adapter *adapter = data;
+	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+}
+
+static void igb_other_handler(struct igb_adapter *adapter, u32 icr, bool root)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (icr & E1000_ICR_DRSTA)
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+
+	if (icr & E1000_ICR_DOUTSYNC) {
+		/* HW is reporting DMA is out of sync */
+		adapter->stats.doosync++;
+		/* The DMA Out of Sync is also indication of a spoof event
+		 * in IOV mode. Check the Wrong VM Behavior register to
+		 * see if it is really a spoof event.
+		 */
+		igb_check_wvbr(adapter);
+	}
+
+	if (icr & E1000_ICR_LSC) {
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__IGB_DOWN, &adapter->state)) {
+			if (root)
+				mod_timer(&adapter->watchdog_timer,
+					jiffies + 1);
+			else
+				rtdm_nrtsig_pend(&adapter->watchdog_nrtsig);
+		}
+	}
+}
+
+static irqreturn_t igb_msix_other(int irq, void *data)
+{
+	struct igb_adapter *adapter = data;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = rd32(E1000_ICR);
+	/* reading ICR causes bit 31 of EICR to be cleared */
+
+	igb_other_handler(adapter, icr, true);
+
+	wr32(E1000_EIMS, adapter->eims_other);
+
+	return IRQ_HANDLED;
+}
+
+static void igb_write_itr(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	u32 itr_val = (q_vector->itr_val + 0x3) & 0x7FFC;
+
+	if (!q_vector->set_itr)
+		return;
+
+	if (!itr_val)
+		itr_val = 0x4;
+
+	if (adapter->hw.mac.type == e1000_82575)
+		itr_val |= itr_val << 16;
+	else
+		itr_val |= E1000_EITR_CNT_IGNR;
+
+	writel(itr_val, q_vector->itr_register);
+	q_vector->set_itr = 0;
+}
+
+static int igb_msix_ring(rtdm_irq_t *ih)
+{
+	struct igb_q_vector *q_vector =
+		rtdm_irq_get_arg(ih, struct igb_q_vector);
+
+	/* Write the ITR value calculated from the previous interrupt. */
+	igb_write_itr(q_vector);
+
+	igb_poll(q_vector);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+
+/**
+ *  igb_intr_msi - Interrupt Handler
+ *  @irq: interrupt number
+ *  @data: pointer to a network interface device structure
+ **/
+static int igb_intr_msi(rtdm_irq_t *ih)
+{
+	struct igb_adapter *adapter =
+		rtdm_irq_get_arg(ih, struct igb_adapter);
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = rd32(E1000_ICR);
+
+	igb_write_itr(q_vector);
+
+	igb_other_handler(adapter, icr, false);
+
+	igb_poll(q_vector);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ *  igb_intr - Legacy Interrupt Handler
+ *  @irq: interrupt number
+ *  @data: pointer to a network interface device structure
+ **/
+static int igb_intr(rtdm_irq_t *ih)
+{
+	struct igb_adapter *adapter =
+		rtdm_irq_get_arg(ih, struct igb_adapter);
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
+	struct e1000_hw *hw = &adapter->hw;
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write
+	 */
+	u32 icr = rd32(E1000_ICR);
+
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt
+	 */
+	if (!(icr & E1000_ICR_INT_ASSERTED))
+		return IRQ_NONE;
+
+	igb_write_itr(q_vector);
+
+	igb_other_handler(adapter, icr, false);
+
+	igb_poll(q_vector);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+		if (adapter->num_q_vectors == 1)
+			igb_set_itr(q_vector);
+		else
+			igb_update_ring_itr(q_vector);
+	}
+
+	if (!test_bit(__IGB_DOWN, &adapter->state)) {
+		if (adapter->flags & IGB_FLAG_HAS_MSIX)
+			wr32(E1000_EIMS, q_vector->eims_value);
+		else
+			igb_irq_enable(adapter);
+	}
+}
+
+/**
+ *  igb_poll - NAPI Rx polling callback
+ *  @napi: napi polling structure
+ *  @budget: count of how many packets we should handle
+ **/
+static void igb_poll(struct igb_q_vector *q_vector)
+{
+	if (q_vector->tx.ring)
+		igb_clean_tx_irq(q_vector);
+
+	if (q_vector->rx.ring)
+		igb_clean_rx_irq(q_vector, 64);
+
+	igb_ring_irq_enable(q_vector);
+}
+
+/**
+ *  igb_clean_tx_irq - Reclaim resources after transmit completes
+ *  @q_vector: pointer to q_vector containing needed info
+ *
+ *  returns true if ring is completely cleaned
+ **/
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct igb_ring *tx_ring = q_vector->tx.ring;
+	struct igb_tx_buffer *tx_buffer;
+	union e1000_adv_tx_desc *tx_desc;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = q_vector->tx.work_limit;
+	unsigned int i = tx_ring->next_to_clean;
+
+	if (test_bit(__IGB_DOWN, &adapter->state))
+		return true;
+
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = IGB_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		smp_rmb();
+
+		/* if DD is not set pending work has not been completed */
+		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
+
+		/* free the skb */
+		kfree_rtskb(tx_buffer->skb);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+
+		/* clear last DMA location and unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = IGB_TX_DESC(tx_ring, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = IGB_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	tx_ring->tx_stats.bytes += total_bytes;
+	tx_ring->tx_stats.packets += total_packets;
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+
+	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+		struct e1000_hw *hw = &adapter->hw;
+
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
+		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+		if (tx_buffer->next_to_watch &&
+		    time_after(jiffies, tx_buffer->time_stamp +
+			       (adapter->tx_timeout_factor * HZ)) &&
+		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			dev_err(tx_ring->dev,
+				"Detected Tx Unit Hang\n"
+				"  Tx Queue             <%d>\n"
+				"  TDH                  <%x>\n"
+				"  TDT                  <%x>\n"
+				"  next_to_use          <%x>\n"
+				"  next_to_clean        <%x>\n"
+				"buffer_info[next_to_clean]\n"
+				"  time_stamp           <%lx>\n"
+				"  next_to_watch        <%p>\n"
+				"  jiffies              <%lx>\n"
+				"  desc.status          <%x>\n",
+				tx_ring->queue_index,
+				rd32(E1000_TDH(tx_ring->reg_idx)),
+				readl(tx_ring->tail),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_buffer->time_stamp,
+				tx_buffer->next_to_watch,
+				jiffies,
+				tx_buffer->next_to_watch->wb.status);
+			rtnetif_stop_queue(tx_ring->netdev);
+
+			/* we are about to reset, no point in enabling stuff */
+			return true;
+		}
+	}
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(total_packets &&
+	    rtnetif_carrier_ok(tx_ring->netdev) &&
+	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (rtnetif_queue_stopped(tx_ring->netdev) &&
+		    !(test_bit(__IGB_DOWN, &adapter->state))) {
+			rtnetif_wake_queue(tx_ring->netdev);
+
+			tx_ring->tx_stats.restart_queue++;
+		}
+	}
+
+	return !!budget;
+}
+
+static struct rtskb *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+					   union e1000_adv_rx_desc *rx_desc)
+{
+	struct igb_rx_buffer *rx_buffer;
+	struct rtskb *skb;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	skb = rx_buffer->skb;
+	prefetchw(skb->data);
+
+	/* pull the header of the skb in */
+	rtskb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
+	rx_buffer->skb = NULL;
+	rx_buffer->dma = 0;
+
+	return skb;
+}
+
+static inline void igb_rx_checksum(struct igb_ring *ring,
+				   union e1000_adv_rx_desc *rx_desc,
+				   struct rtskb *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Ignore Checksum bit is set */
+	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
+		return;
+
+	/* Rx checksum disabled via ethtool */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* TCP/UDP checksum error bit is set */
+	if (igb_test_staterr(rx_desc,
+			     E1000_RXDEXT_STATERR_TCPE |
+			     E1000_RXDEXT_STATERR_IPE)) {
+		/* work around errata with sctp packets where the TCPE aka
+		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+		 * packets, (aka let the stack check the crc32c)
+		 */
+		if (!((skb->len == 60) &&
+		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
+			ring->rx_stats.csum_err++;
+		}
+		/* let the stack verify checksum errors */
+		return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
+				      E1000_RXD_STAT_UDPCS))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	dev_dbg(ring->dev, "cksum success: bits %08X\n",
+		le32_to_cpu(rx_desc->wb.upper.status_error));
+}
+
+/**
+ *  igb_is_non_eop - process handling of non-EOP buffers
+ *  @rx_ring: Rx ring being processed
+ *  @rx_desc: Rx descriptor for current buffer
+ *  @skb: current socket buffer containing buffer in progress
+ *
+ *  This function updates next to clean.  If the buffer is an EOP buffer
+ *  this function exits returning false, otherwise it will place the
+ *  sk_buff in the next buffer to be chained and return true indicating
+ *  that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+			   union e1000_adv_rx_desc *rx_desc)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+		return false;
+
+	return true;
+}
+
+/**
+ *  igb_cleanup_headers - Correct corrupted or empty headers
+ *  @rx_ring: rx descriptor ring packet is being transacted on
+ *  @rx_desc: pointer to the EOP Rx descriptor
+ *  @skb: pointer to current skb being fixed
+ *
+ *  Address the case where we are pulling data in on pages only
+ *  and as such no data is present in the skb header.
+ *
+ *  In addition if skb is not at least 60 bytes we need to pad it so that
+ *  it is large enough to qualify as a valid Ethernet frame.
+ *
+ *  Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+				union e1000_adv_rx_desc *rx_desc,
+				struct rtskb *skb)
+{
+	if (unlikely((igb_test_staterr(rx_desc,
+				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+		struct rtnet_device *netdev = rx_ring->netdev;
+		if (!(netdev->features & NETIF_F_RXALL)) {
+			kfree_rtskb(skb);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/**
+ *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ *  @rx_ring: rx descriptor ring packet is being transacted on
+ *  @rx_desc: pointer to the EOP Rx descriptor
+ *  @skb: pointer to current skb being populated
+ *
+ *  This function checks the ring, descriptor, and packet information in
+ *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ *  other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+				   union e1000_adv_rx_desc *rx_desc,
+				   struct rtskb *skb)
+{
+	igb_rx_checksum(rx_ring, rx_desc, skb);
+
+	skb->protocol = rt_eth_type_trans(skb, rx_ring->netdev);
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+{
+	struct igb_ring *rx_ring = q_vector->rx.ring;
+	unsigned int total_bytes = 0, total_packets = 0;
+	u16 cleaned_count = igb_desc_unused(rx_ring);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtskb *skb;
+
+	while (likely(total_packets < budget)) {
+		union e1000_adv_rx_desc *rx_desc;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+			igb_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		if (!rx_desc->wb.upper.status_error)
+			break;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		rmb();
+
+		/* retrieve a buffer from the ring */
+		skb = igb_fetch_rx_buffer(rx_ring, rx_desc);
+		skb->time_stamp = time_stamp;
+
+		cleaned_count++;
+
+		/* fetch next buffer in frame if non-eop */
+		if (igb_is_non_eop(rx_ring, rx_desc)) {
+			kfree_rtskb(skb);
+			continue;
+		}
+
+		/* verify the packet layout is correct */
+		if (igb_cleanup_headers(rx_ring, rx_desc, skb))
+			continue;
+
+		/* probably a little skewed due to removing CRC */
+		total_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+		rtnetif_rx(skb);
+
+		/* reset skb pointer */
+		skb = NULL;
+
+		/* update budget accounting */
+		total_packets++;
+	}
+
+	rx_ring->rx_stats.packets += total_packets;
+	rx_ring->rx_stats.bytes += total_bytes;
+	q_vector->rx.total_packets += total_packets;
+	q_vector->rx.total_bytes += total_bytes;
+
+	if (cleaned_count)
+		igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+	if (total_packets)
+		rt_mark_stack_mgr(q_vector->adapter->netdev);
+
+	return total_packets < budget;
+}
+
+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
+				 struct igb_rx_buffer *bi)
+{
+	struct igb_adapter *adapter = rx_ring->q_vector->adapter;
+	struct rtskb *skb = bi->skb;
+	dma_addr_t dma = bi->dma;
+
+	if (dma)
+		return true;
+
+	if (likely(!skb)) {
+		skb = rtnetdev_alloc_rtskb(adapter->netdev,
+					rx_ring->rx_buffer_len + NET_IP_ALIGN);
+		if (!skb) {
+			rx_ring->rx_stats.alloc_failed++;
+			return false;
+		}
+
+		rtskb_reserve(skb, NET_IP_ALIGN);
+		skb->rtdev = adapter->netdev;
+
+		bi->skb = skb;
+		bi->dma = rtskb_data_dma_addr(skb, 0);
+	}
+
+	return true;
+}
+
+/**
+ *  igb_alloc_rx_buffers - Replace used receive buffers; packet split
+ *  @adapter: address of board private structure
+ **/
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
+{
+	union e1000_adv_rx_desc *rx_desc;
+	struct igb_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = IGB_RX_DESC(rx_ring, i);
+	bi = &rx_ring->rx_buffer_info[i];
+	i -= rx_ring->count;
+
+	do {
+		if (!igb_alloc_mapped_skb(rx_ring, bi))
+			break;
+
+		/* Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = IGB_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the status bits for the next_to_use descriptor */
+		rx_desc->wb.upper.status_error = 0;
+
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i) {
+		/* record the next descriptor to use */
+		rx_ring->next_to_use = i;
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64).
+		 */
+		wmb();
+		writel(i, rx_ring->tail);
+	}
+}
+
+/**
+ * igb_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_mii_ioctl(struct rtnet_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	if (adapter->hw.phy.media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = adapter->hw.phy.addr;
+		break;
+	case SIOCGMIIREG:
+		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+				     &data->val_out))
+			return -EIO;
+		break;
+	case SIOCSMIIREG:
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/**
+ * igb_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_ioctl(struct rtnet_device *netdev, struct ifreq *ifr, int cmd)
+{
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+	
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return igb_mii_ioctl(netdev, ifr, cmd);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	if (pcie_capability_read_word(adapter->pdev, reg, value))
+		return -E1000_ERR_CONFIG;
+
+	return 0;
+}
+
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	if (pcie_capability_write_word(adapter->pdev, reg, *value))
+		return -E1000_ERR_CONFIG;
+
+	return 0;
+}
+
+static void igb_vlan_mode(struct rtnet_device *netdev, netdev_features_t features)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	/* disable VLAN tag insert/strip */
+	ctrl = rd32(E1000_CTRL);
+	ctrl &= ~E1000_CTRL_VME;
+	wr32(E1000_CTRL, ctrl);
+
+	igb_rlpml_set(adapter);
+}
+
+static int igb_vlan_rx_add_vid(struct rtnet_device *netdev,
+			       __be16 proto, u16 vid)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* add the filter since PF can receive vlans w/o entry in vlvf */
+	igb_vfta_set(hw, vid, true);
+
+	set_bit(vid, adapter->active_vlans);
+
+	return 0;
+}
+
+static void igb_restore_vlan(struct igb_adapter *adapter)
+{
+	u16 vid;
+
+	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
+
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
+}
+
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+			  bool runtime)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, rctl, status;
+	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	rtnetif_device_detach(netdev);
+
+	if (rtnetif_running(netdev))
+		__igb_close(netdev, true);
+
+	igb_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	status = rd32(E1000_STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		igb_setup_rctl(adapter);
+		igb_set_rx_mode(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = rd32(E1000_RCTL);
+			rctl |= E1000_RCTL_MPE;
+			wr32(E1000_RCTL, rctl);
+		}
+
+		ctrl = rd32(E1000_CTRL);
+		/* advertise wake from D3Cold */
+		#define E1000_CTRL_ADVD3WUC 0x00100000
+		/* phy power management enable */
+		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+		ctrl |= E1000_CTRL_ADVD3WUC;
+		wr32(E1000_CTRL, ctrl);
+
+		/* Allow time for pending master requests to run */
+		igb_disable_pcie_master(hw);
+
+		wr32(E1000_WUC, E1000_WUC_PME_EN);
+		wr32(E1000_WUFC, wufc);
+	} else {
+		wr32(E1000_WUC, 0);
+		wr32(E1000_WUFC, 0);
+	}
+
+	*enable_wake = wufc || adapter->en_mng_pt;
+	if (!*enable_wake)
+		igb_power_down_link(adapter);
+	else
+		igb_power_up_link(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	igb_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int igb_suspend(struct device *dev)
+{
+	int retval;
+	bool wake;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	retval = __igb_shutdown(pdev, &wake, 0);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int igb_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+
+	if (!pci_device_is_present(pdev))
+		return -ENODEV;
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"igb: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	igb_reset(adapter);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver.
+	 */
+	igb_get_hw_control(adapter);
+
+	wr32(E1000_WUS, ~0);
+
+	if (netdev->flags & IFF_UP) {
+		rtnl_lock();
+		err = __igb_open(netdev, true);
+		rtnl_unlock();
+		if (err)
+			return err;
+	}
+
+	rtnetif_device_attach(netdev);
+	return 0;
+}
+
+static int igb_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	if (!igb_has_link(adapter))
+		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+	return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int retval;
+	bool wake;
+
+	retval = __igb_shutdown(pdev, &wake, 1);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int igb_runtime_resume(struct device *dev)
+{
+	return igb_resume(dev);
+}
+#endif /* CONFIG_PM */
+
+static void igb_shutdown(struct pci_dev *pdev)
+{
+	bool wake;
+
+	__igb_shutdown(pdev, &wake, 0);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+}
+
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+	return 0;
+}
+
+/**
+ *  igb_io_error_detected - called when PCI error is detected
+ *  @pdev: Pointer to PCI device
+ *  @state: The current pci connection state
+ *
+ *  This function is called after a PCI bus error affecting
+ *  this device has been detected.
+ **/
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	rtnetif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (rtnetif_running(netdev))
+		igb_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ *  igb_io_slot_reset - called after the pci bus has been reset.
+ *  @pdev: Pointer to PCI device
+ *
+ *  Restart the card from scratch, as if from a cold-boot. Implementation
+ *  resembles the first-half of the igb_resume routine.
+ **/
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	pci_ers_result_t result;
+	int err;
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+
+		igb_reset(adapter);
+		wr32(E1000_WUS, ~0);
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	err = pci_aer_clear_nonfatal_status(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_aer_clear_nonfatal_status failed 0x%0x\n",
+			err);
+		/* non-fatal, continue */
+	}
+
+	return result;
+}
+
+/**
+ *  igb_io_resume - called when traffic can start flowing again.
+ *  @pdev: Pointer to PCI device
+ *
+ *  This callback is called when the error recovery driver tells us that
+ *  its OK to resume normal operation. Implementation resembles the
+ *  second-half of the igb_resume routine.
+ */
+static void igb_io_resume(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	if (rtnetif_running(netdev)) {
+		if (igb_up(adapter)) {
+			dev_err(&pdev->dev, "igb_up failed after reset\n");
+			return;
+		}
+	}
+
+	rtnetif_device_attach(netdev);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver.
+	 */
+	igb_get_hw_control(adapter);
+}
+
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+			     u8 qsel)
+{
+	u32 rar_low, rar_high;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* Indicate to hardware the Address is Valid. */
+	rar_high |= E1000_RAH_AV;
+
+	if (hw->mac.type == e1000_82575)
+		rar_high |= E1000_RAH_POOL_1 * qsel;
+	else
+		rar_high |= E1000_RAH_POOL_1 << qsel;
+
+	wr32(E1000_RAL(index), rar_low);
+	wrfl();
+	wr32(E1000_RAH(index), rar_high);
+	wrfl();
+}
+
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 dmac_thr;
+	u16 hwm;
+
+	if (hw->mac.type > e1000_82580) {
+		if (adapter->flags & IGB_FLAG_DMAC) {
+			u32 reg;
+
+			/* force threshold to 0. */
+			wr32(E1000_DMCTXTH, 0);
+
+			/* DMA Coalescing high water mark needs to be greater
+			 * than the Rx threshold. Set hwm to PBA - max frame
+			 * size in 16B units, capping it at PBA - 6KB.
+			 */
+			hwm = 64 * pba - adapter->max_frame_size / 16;
+			if (hwm < 64 * (pba - 6))
+				hwm = 64 * (pba - 6);
+			reg = rd32(E1000_FCRTC);
+			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+				& E1000_FCRTC_RTH_COAL_MASK);
+			wr32(E1000_FCRTC, reg);
+
+			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
+			 * frame size, capping it at PBA - 10KB.
+			 */
+			dmac_thr = pba - adapter->max_frame_size / 512;
+			if (dmac_thr < pba - 10)
+				dmac_thr = pba - 10;
+			reg = rd32(E1000_DMACR);
+			reg &= ~E1000_DMACR_DMACTHR_MASK;
+			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
+				& E1000_DMACR_DMACTHR_MASK);
+
+			/* transition to L0x or L1 if available..*/
+			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+			/* watchdog timer= +-1000 usec in 32usec intervals */
+			reg |= (1000 >> 5);
+
+			/* Disable BMC-to-OS Watchdog Enable */
+			if (hw->mac.type != e1000_i354)
+				reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+
+			wr32(E1000_DMACR, reg);
+
+			/* no lower threshold to disable
+			 * coalescing(smart fifb)-UTRESH=0
+			 */
+			wr32(E1000_DMCRTRH, 0);
+
+			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
+
+			wr32(E1000_DMCTLX, reg);
+
+			/* free space in tx packet buffer to wake from
+			 * DMA coal
+			 */
+			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+
+			/* make low power state decision controlled
+			 * by DMA coal
+			 */
+			reg = rd32(E1000_PCIEMISC);
+			reg &= ~E1000_PCIEMISC_LX_DECISION;
+			wr32(E1000_PCIEMISC, reg);
+		} /* endif adapter->dmac is not disabled */
+	} else if (hw->mac.type == e1000_82580) {
+		u32 reg = rd32(E1000_PCIEMISC);
+
+		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
+		wr32(E1000_DMACR, 0);
+	}
+}
+
+/**
+ *  igb_read_i2c_byte - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
+ *  @data: value read
+ *
+ *  Performs byte read operation over I2C interface at
+ *  a specified device address.
+ **/
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+		      u8 dev_addr, u8 *data)
+{
+	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+	struct i2c_client *this_client = adapter->i2c_client;
+	s32 status;
+	u16 swfw_mask = 0;
+
+	if (!this_client)
+		return E1000_ERR_I2C;
+
+	swfw_mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		return E1000_ERR_SWFW_SYNC;
+
+	status = i2c_smbus_read_byte_data(this_client, byte_offset);
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+	if (status < 0)
+		return E1000_ERR_I2C;
+	else {
+		*data = status;
+		return 0;
+	}
+}
+
+/**
+ *  igb_write_i2c_byte - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @dev_addr: device address
+ *  @data: value to write
+ *
+ *  Performs byte write operation over I2C interface at
+ *  a specified device address.
+ **/
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+		       u8 dev_addr, u8 data)
+{
+	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+	struct i2c_client *this_client = adapter->i2c_client;
+	s32 status;
+	u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+	if (!this_client)
+		return E1000_ERR_I2C;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		return E1000_ERR_SWFW_SYNC;
+	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+	if (status)
+		return E1000_ERR_I2C;
+	else
+		return 0;
+
+}
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	int err = 0;
+
+	if (rtnetif_running(netdev))
+		igb_close(netdev);
+
+	igb_reset_interrupt_capability(adapter);
+
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	if (rtnetif_running(netdev))
+		err = igb_open(netdev);
+
+	return err;
+}
+/* igb_main.c */
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_i210.h	2022-03-21 12:58:29.379889059 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_mbx.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+			  struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE		0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD	0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+	(u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+	(u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+	(u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+	E1000_INVM_UNINITIALIZED_STRUCTURE		= 0x00,
+	E1000_INVM_WORD_AUTOLOAD_STRUCTURE		= 0x01,
+	E1000_INVM_CSR_AUTOLOAD_STRUCTURE		= 0x02,
+	E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE	= 0x03,
+	E1000_INVM_RSA_KEY_SHA256_STRUCTURE		= 0x04,
+	E1000_INVM_INVALIDATED_STRUCTURE		= 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS	8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS	1
+#define E1000_INVM_ULT_BYTES_SIZE			8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES			4
+#define E1000_INVM_VER_FIELD_ONE			0x1FF8
+#define E1000_INVM_VER_FIELD_TWO			0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD			0x1F800000
+
+#define E1000_INVM_MAJOR_MASK		0x3F0
+#define E1000_INVM_MINOR_MASK		0xF
+#define E1000_INVM_MAJOR_SHIFT		4
+
+#define ID_LED_DEFAULT_I210		((ID_LED_OFF1_ON2  << 8) | \
+					 (ID_LED_DEF1_DEF2 <<  4) | \
+					 (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I210_SERDES	((ID_LED_DEF1_DEF2 << 8) | \
+					 (ID_LED_DEF1_DEF2 <<  4) | \
+					 (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for i211 device */
+#define NVM_INIT_CTRL_2_DEFAULT_I211	0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211	0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211	0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211	0x200C
+
+/* PLL Defines */
+#define E1000_PCI_PMCSR			0x44
+#define E1000_PCI_PMCSR_D3		0x03
+#define E1000_MAX_PLL_TRIES		5
+#define E1000_PHY_PLL_UNCONF		0xFF
+#define E1000_PHY_PLL_FREQ_PAGE		0xFC0000
+#define E1000_PHY_PLL_FREQ_REG		0x000E
+#define E1000_INVM_DEFAULT_AL		0x202F
+#define E1000_INVM_AUTOLOAD		0x0A
+#define E1000_INVM_PLL_WO_VAL		0x0010
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_mbx.c	2022-03-21 12:58:29.375889098 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_nvm.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "e1000_mbx.h"
+
+/**
+ *  igb_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfully read message from buffer
+ **/
+s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	/* limit read to size of mailbox */
+	if (size > mbx->size)
+		size = mbx->size;
+
+	if (mbx->ops.read)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+
+	if (size > mbx->size)
+		ret_val = -E1000_ERR_MBX;
+
+	else if (mbx->ops.write)
+		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbx->ops.check_for_msg)
+		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbx->ops.check_for_ack)
+		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbx->ops.check_for_rst)
+		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_msg)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+out:
+	return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ *  igb_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_ack)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+out:
+	return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ *  igb_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+			       u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = igb_poll_for_msg(hw, mbx_id);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+				u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = igb_poll_for_ack(hw, mbx_id);
+out:
+	return ret_val;
+}
+
+static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+	u32 mbvficr = rd32(E1000_MBVFICR);
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbvficr & mask) {
+		ret_val = 0;
+		wr32(E1000_MBVFICR, mask);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+		ret_val = 0;
+		hw->mbx.stats.reqs++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+		ret_val = 0;
+		hw->mbx.stats.acks++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	u32 vflre = rd32(E1000_VFLRE);
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (vflre & (1 << vf_number)) {
+		ret_val = 0;
+		wr32(E1000_VFLRE, (1 << vf_number));
+		hw->mbx.stats.rsts++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+	u32 p2v_mailbox;
+
+	/* Take ownership of the buffer */
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+	/* reserve mailbox for vf use */
+	p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+	if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+		ret_val = 0;
+
+	return ret_val;
+}
+
+/**
+ *  igb_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+			    u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_write;
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	igb_check_for_msg_pf(hw, vf_number);
+	igb_check_for_ack_pf(hw, vf_number);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
+
+	/* Interrupt VF to tell it a message has been sent and release buffer*/
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+	return ret_val;
+
+}
+
+/**
+ *  igb_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+			   u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
+
+	/* Acknowledge the message and release buffer */
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+
+	mbx->timeout = 0;
+	mbx->usec_delay = 0;
+
+	mbx->size = E1000_VFMAILBOX_SIZE;
+
+	mbx->ops.read = igb_read_mbx_pf;
+	mbx->ops.write = igb_write_mbx_pf;
+	mbx->ops.read_posted = igb_read_posted_mbx;
+	mbx->ops.write_posted = igb_write_posted_mbx;
+	mbx->ops.check_for_msg = igb_check_for_msg_pf;
+	mbx->ops.check_for_ack = igb_check_for_ack_pf;
+	mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+
+	return 0;
+}
+
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_nvm.h	2022-03-21 12:58:29.371889137 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_mac.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+s32  igb_acquire_nvm(struct e1000_hw *hw);
+void igb_release_nvm(struct e1000_hw *hw);
+s32  igb_read_mac_addr(struct e1000_hw *hw);
+s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32  igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+			  u32 part_num_size);
+s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_validate_nvm_checksum(struct e1000_hw *hw);
+s32  igb_update_nvm_checksum(struct e1000_hw *hw);
+
+struct e1000_fw_version {
+	u32 etrack_id;
+	u16 eep_major;
+	u16 eep_minor;
+	u16 eep_build;
+
+	u8 invm_major;
+	u8 invm_minor;
+	u8 invm_img_type;
+
+	bool or_valid;
+	u16 or_major;
+	u16 or_build;
+	u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
+#endif
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_mac.c	2022-03-21 12:58:29.366889186 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_i210.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "e1000_mac.h"
+
+#include "igb.h"
+
+static s32 igb_set_default_fc(struct e1000_hw *hw);
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+
+/**
+ *  igb_get_bus_info_pcie - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+	u32 reg;
+	u16 pcie_link_status;
+
+	bus->type = e1000_bus_type_pci_express;
+
+	ret_val = igb_read_pcie_cap_reg(hw,
+					PCI_EXP_LNKSTA,
+					&pcie_link_status);
+	if (ret_val) {
+		bus->width = e1000_bus_width_unknown;
+		bus->speed = e1000_bus_speed_unknown;
+	} else {
+		switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+		case PCI_EXP_LNKSTA_CLS_2_5GB:
+			bus->speed = e1000_bus_speed_2500;
+			break;
+		case PCI_EXP_LNKSTA_CLS_5_0GB:
+			bus->speed = e1000_bus_speed_5000;
+			break;
+		default:
+			bus->speed = e1000_bus_speed_unknown;
+			break;
+		}
+
+		bus->width = (enum e1000_bus_width)((pcie_link_status &
+						     PCI_EXP_LNKSTA_NLW) >>
+						     PCI_EXP_LNKSTA_NLW_SHIFT);
+	}
+
+	reg = rd32(E1000_STATUS);
+	bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+
+	return 0;
+}
+
+/**
+ *  igb_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void igb_clear_vfta(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		array_wr32(E1000_VFTA, offset, 0);
+		wrfl();
+	}
+}
+
+/**
+ *  igb_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	array_wr32(E1000_VFTA, offset, value);
+	wrfl();
+}
+
+/* Due to a hw errata, if the host tries to  configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ *  igb_clear_vfta_i350 - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void igb_clear_vfta_i350(struct e1000_hw *hw)
+{
+	u32 offset;
+	int i;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		for (i = 0; i < 10; i++)
+			array_wr32(E1000_VFTA, offset, 0);
+
+		wrfl();
+	}
+}
+
+/**
+ *  igb_write_vfta_i350 - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	int i;
+
+	for (i = 0; i < 10; i++)
+		array_wr32(E1000_VFTA, offset, value);
+
+	wrfl();
+}
+
+/**
+ *  igb_init_rx_addrs - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setups the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+	u8 mac_addr[ETH_ALEN] = {0};
+
+	/* Setup the receive address */
+	hw_dbg("Programming MAC Address into RAR[0]\n");
+
+	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+	for (i = 1; i < rar_count; i++)
+		hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ *  igb_vfta_set - enable or disable vlan in VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @vid: VLAN id to add or remove
+ *  @add: if true add filter, if false remove
+ *
+ *  Sets or clears a bit in the VLAN filter table array based on VLAN id
+ *  and if we are adding or removing the filter
+ **/
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
+{
+	u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+	u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+	u32 vfta;
+	struct igb_adapter *adapter = hw->back;
+	s32 ret_val = 0;
+
+	vfta = adapter->shadow_vfta[index];
+
+	/* bit was set/cleared before we started */
+	if ((!!(vfta & mask)) == add) {
+		ret_val = -E1000_ERR_CONFIG;
+	} else {
+		if (add)
+			vfta |= mask;
+		else
+			vfta &= ~mask;
+	}
+	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+		igb_write_vfta_i350(hw, index, vfta);
+	else
+		igb_write_vfta(hw, index, vfta);
+	adapter->shadow_vfta[index] = vfta;
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_alt_mac_addr - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address.  If an
+ *  alternate MAC address is found it is saved in the hw struct and
+ *  programmed into RAR0 and the function returns success, otherwise the
+ *  function returns an error.
+ **/
+s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
+{
+	u32 i;
+	s32 ret_val = 0;
+	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+	u8 alt_mac_addr[ETH_ALEN];
+
+	/* Alternate MAC address is handled by the option ROM for 82580
+	 * and newer. SW support not required.
+	 */
+	if (hw->mac.type >= e1000_82580)
+		goto out;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+				 &nvm_alt_mac_addr_offset);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+	    (nvm_alt_mac_addr_offset == 0x0000))
+		/* There is no Alternate MAC Address */
+		goto out;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+	if (hw->bus.func == E1000_FUNC_2)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+	if (hw->bus.func == E1000_FUNC_3)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+	for (i = 0; i < ETH_ALEN; i += 2) {
+		offset = nvm_alt_mac_addr_offset + (i >> 1);
+		ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+
+		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+	}
+
+	/* if multicast bit is set, the alternate address will not be used */
+	if (is_multicast_ether_addr(alt_mac_addr)) {
+		hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+		goto out;
+	}
+
+	/* We have a valid alternate MAC address, and we want to treat it the
+	 * same as the normal permanent MAC address stored by the HW into the
+	 * RAR. Do this by mapping this address into RAR0.
+	 */
+	hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_rar_set - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+		   ((u32) addr[1] << 8) |
+		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	/* Some bridges will combine consecutive 32-bit writes into
+	 * a single burst write, which will malfunction on some parts.
+	 * The flushes avoid this.
+	 */
+	wr32(E1000_RAL(index), rar_low);
+	wrfl();
+	wr32(E1000_RAH(index), rar_high);
+	wrfl();
+}
+
+/**
+ *  igb_mta_set - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta;
+
+	/* The MTA is a register array of 32-bit registers. It is
+	 * treated like an array of (32*mta_reg_count) bits.  We want to
+	 * set bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
+	 * mask to bits 31:5 of the hash value which gives us the
+	 * register we're modifying.  The hash bit within that register
+	 * is determined by the lower 5 bits of the hash value.
+	 */
+	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+	hash_bit = hash_value & 0x1F;
+
+	mta = array_rd32(E1000_MTA, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	array_wr32(E1000_MTA, hash_reg, mta);
+	wrfl();
+}
+
+/**
+ *  igb_hash_mc_addr - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  igb_mta_set()
+ **/
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/* The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 *
+	 * For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB                 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+	default:
+	case 0:
+		break;
+	case 1:
+		bit_shift += 1;
+		break;
+	case 2:
+		bit_shift += 2;
+		break;
+	case 3:
+		bit_shift += 4;
+		break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+				  (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  igb_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+			     u8 *mc_addr_list, u32 mc_addr_count)
+{
+	u32 hash_value, hash_bit, hash_reg;
+	int i;
+
+	/* clear mta_shadow */
+	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+	/* update mta_shadow from mc_addr_list */
+	for (i = 0; (u32) i < mc_addr_count; i++) {
+		hash_value = igb_hash_mc_addr(hw, mc_addr_list);
+
+		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+		hash_bit = hash_value & 0x1F;
+
+		hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+		mc_addr_list += (ETH_ALEN);
+	}
+
+	/* replace the entire MTA table */
+	for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+		array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+	wrfl();
+}
+
+/**
+ *  igb_clear_hw_cntrs_base - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+	rd32(E1000_CRCERRS);
+	rd32(E1000_SYMERRS);
+	rd32(E1000_MPC);
+	rd32(E1000_SCC);
+	rd32(E1000_ECOL);
+	rd32(E1000_MCC);
+	rd32(E1000_LATECOL);
+	rd32(E1000_COLC);
+	rd32(E1000_DC);
+	rd32(E1000_SEC);
+	rd32(E1000_RLEC);
+	rd32(E1000_XONRXC);
+	rd32(E1000_XONTXC);
+	rd32(E1000_XOFFRXC);
+	rd32(E1000_XOFFTXC);
+	rd32(E1000_FCRUC);
+	rd32(E1000_GPRC);
+	rd32(E1000_BPRC);
+	rd32(E1000_MPRC);
+	rd32(E1000_GPTC);
+	rd32(E1000_GORCL);
+	rd32(E1000_GORCH);
+	rd32(E1000_GOTCL);
+	rd32(E1000_GOTCH);
+	rd32(E1000_RNBC);
+	rd32(E1000_RUC);
+	rd32(E1000_RFC);
+	rd32(E1000_ROC);
+	rd32(E1000_RJC);
+	rd32(E1000_TORL);
+	rd32(E1000_TORH);
+	rd32(E1000_TOTL);
+	rd32(E1000_TOTH);
+	rd32(E1000_TPR);
+	rd32(E1000_TPT);
+	rd32(E1000_MPTC);
+	rd32(E1000_BPTC);
+}
+
+/**
+ *  igb_check_for_copper_link - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 igb_check_for_copper_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	/* We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = 0;
+		goto out;
+	}
+
+	/* First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = false;
+
+	/* Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	igb_check_downshift(hw);
+
+	/* If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	igb_config_collision_dist(hw);
+
+	/* Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = igb_config_fc_after_link_up(hw);
+	if (ret_val)
+		hw_dbg("Error configuring flow control\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_setup_link - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 igb_setup_link(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (igb_check_reset_block(hw))
+		goto out;
+
+	/* If requested flow control is set to default, set flow control
+	 * based on the EEPROM flow control settings.
+	 */
+	if (hw->fc.requested_mode == e1000_fc_default) {
+		ret_val = igb_set_default_fc(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/* We want to save off the original Flow Control configuration just
+	 * in case we get disconnected and then reconnected into a different
+	 * hub or switch with different Flow Control capabilities.
+	 */
+	hw->fc.current_mode = hw->fc.requested_mode;
+
+	hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = hw->mac.ops.setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/* Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	hw_dbg("Initializing the Flow Control address, type and timer regs\n");
+	wr32(E1000_FCT, FLOW_CONTROL_TYPE);
+	wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	wr32(E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = igb_set_fc_watermarks(hw);
+
+out:
+
+	return ret_val;
+}
+
+/**
+ *  igb_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void igb_config_collision_dist(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+	tctl = rd32(E1000_TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	wr32(E1000_TCTL, tctl);
+	wrfl();
+}
+
+/**
+ *  igb_set_fc_watermarks - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  tansmission as well.
+ **/
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 fcrtl = 0, fcrth = 0;
+
+	/* Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (hw->fc.current_mode & e1000_fc_tx_pause) {
+		/* We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = hw->fc.low_water;
+		if (hw->fc.send_xon)
+			fcrtl |= E1000_FCRTL_XONE;
+
+		fcrth = hw->fc.high_water;
+	}
+	wr32(E1000_FCRTL, fcrtl);
+	wr32(E1000_FCRTH, fcrth);
+
+	return ret_val;
+}
+
+/**
+ *  igb_set_default_fc - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 igb_set_default_fc(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 lan_offset;
+	u16 nvm_data;
+
+	/* Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	if (hw->mac.type == e1000_i350) {
+		lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+		ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+					   + lan_offset, 1, &nvm_data);
+	 } else {
+		ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+					   1, &nvm_data);
+	 }
+
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		hw->fc.requested_mode = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		hw->fc.requested_mode = e1000_fc_tx_pause;
+	else
+		hw->fc.requested_mode = e1000_fc_full;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_force_mac_fc - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 igb_force_mac_fc(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = 0;
+
+	ctrl = rd32(E1000_CTRL);
+
+	/* Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "hw->fc.current_mode" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and TX flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+	hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		hw_dbg("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	wr32(E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_config_fc_after_link_up - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = 0;
+	u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+	/* Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->phy.media_type == e1000_media_type_internal_serdes)
+			ret_val = igb_force_mac_fc(hw);
+	} else {
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ret_val = igb_force_mac_fc(hw);
+	}
+
+	if (ret_val) {
+		hw_dbg("Error forcing flow control settings\n");
+		goto out;
+	}
+
+	/* Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/* Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+						   &mii_status_reg);
+		if (ret_val)
+			goto out;
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+						   &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+			hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+			goto out;
+		}
+
+		/* The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+					    &mii_nway_adv_reg);
+		if (ret_val)
+			goto out;
+		ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+					    &mii_nway_lp_ability_reg);
+		if (ret_val)
+			goto out;
+
+		/* Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/* Now we need to check if the user selected RX ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF  the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == e1000_fc_full) {
+				hw->fc.current_mode = e1000_fc_full;
+				hw_dbg("Flow Control = FULL.\n");
+			} else {
+				hw->fc.current_mode = e1000_fc_rx_pause;
+				hw_dbg("Flow Control = RX PAUSE frames only.\n");
+			}
+		}
+		/* For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_tx_pause;
+			hw_dbg("Flow Control = TX PAUSE frames only.\n");
+		}
+		/* For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			hw_dbg("Flow Control = RX PAUSE frames only.\n");
+		}
+		/* Per the IEEE spec, at this point flow control should be
+		 * disabled.  However, we want to consider that we could
+		 * be connected to a legacy switch that doesn't advertise
+		 * desired flow control, but can be forced on the link
+		 * partner.  So if we advertised no flow control, that is
+		 * what we will resolve to.  If we advertised some kind of
+		 * receive capability (Rx Pause Only or Full Flow Control)
+		 * and the link partner advertised none, we will configure
+		 * ourselves to enable Rx Flow Control only.  We can do
+		 * this safely for two reasons:  If the link partner really
+		 * didn't want flow control enabled, and we enable Rx, no
+		 * harm done since we won't be receiving any PAUSE frames
+		 * anyway.  If the intent on the link partner was to have
+		 * flow control enabled, then by us enabling RX only, we
+		 * can at least receive pause frames and process them.
+		 * This is a good idea because in most cases, since we are
+		 * predominantly a server NIC, more times than not we will
+		 * be asked to delay transmission of packets than asking
+		 * our link partner to pause transmission of frames.
+		 */
+		else if ((hw->fc.requested_mode == e1000_fc_none) ||
+			 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
+			 (hw->fc.strict_ieee)) {
+			hw->fc.current_mode = e1000_fc_none;
+			hw_dbg("Flow Control = NONE.\n");
+		} else {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			hw_dbg("Flow Control = RX PAUSE frames only.\n");
+		}
+
+		/* Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			hw_dbg("Error getting link speed and duplex\n");
+			goto out;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			hw->fc.current_mode = e1000_fc_none;
+
+		/* Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = igb_force_mac_fc(hw);
+		if (ret_val) {
+			hw_dbg("Error forcing flow control settings\n");
+			goto out;
+		}
+	}
+	/* Check for the case where we have SerDes media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+		&& mac->autoneg) {
+		/* Read the PCS_LSTS and check to see if AutoNeg
+		 * has completed.
+		 */
+		pcs_status_reg = rd32(E1000_PCS_LSTAT);
+
+		if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+			hw_dbg("PCS Auto Neg has not completed.\n");
+			return ret_val;
+		}
+
+		/* The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (PCS_ANADV) and the Auto_Negotiation Base
+		 * Page Ability Register (PCS_LPAB) to determine how
+		 * flow control was negotiated.
+		 */
+		pcs_adv_reg = rd32(E1000_PCS_ANADV);
+		pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
+
+		/* Two bits in the Auto Negotiation Advertisement Register
+		 * (PCS_ANADV) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (PCS_LPAB) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *
+		 */
+		if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+		    (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+			/* Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise Rx
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == e1000_fc_full) {
+				hw->fc.current_mode = e1000_fc_full;
+				hw_dbg("Flow Control = FULL.\n");
+			} else {
+				hw->fc.current_mode = e1000_fc_rx_pause;
+				hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+			}
+		}
+		/* For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+			  (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+			  (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+			  (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_tx_pause;
+			hw_dbg("Flow Control = Tx PAUSE frames only.\n");
+		}
+		/* For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+			 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+			 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+			 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+		} else {
+			/* Per the IEEE spec, at this point flow control
+			 * should be disabled.
+			 */
+			hw->fc.current_mode = e1000_fc_none;
+			hw_dbg("Flow Control = NONE.\n");
+		}
+
+		/* Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
+		pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+		wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
+
+		ret_val = igb_force_mac_fc(hw);
+		if (ret_val) {
+			hw_dbg("Error forcing flow control settings\n");
+			return ret_val;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+				      u16 *duplex)
+{
+	u32 status;
+
+	status = rd32(E1000_STATUS);
+	if (status & E1000_STATUS_SPEED_1000) {
+		*speed = SPEED_1000;
+		hw_dbg("1000 Mbs, ");
+	} else if (status & E1000_STATUS_SPEED_100) {
+		*speed = SPEED_100;
+		hw_dbg("100 Mbs, ");
+	} else {
+		*speed = SPEED_10;
+		hw_dbg("10 Mbs, ");
+	}
+
+	if (status & E1000_STATUS_FD) {
+		*duplex = FULL_DUPLEX;
+		hw_dbg("Full Duplex\n");
+	} else {
+		*duplex = HALF_DUPLEX;
+		hw_dbg("Half Duplex\n");
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_get_hw_semaphore - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 igb_get_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = 0;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = rd32(E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access device - SMBI bit is set.\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = rd32(E1000_SWSM);
+		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		igb_put_hw_semaphore(hw);
+		hw_dbg("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_put_hw_semaphore - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void igb_put_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = rd32(E1000_SWSM);
+
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+	wr32(E1000_SWSM, swsm);
+}
+
+/**
+ *  igb_get_auto_rd_done - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 igb_get_auto_rd_done(struct e1000_hw *hw)
+{
+	s32 i = 0;
+	s32 ret_val = 0;
+
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
+			break;
+		usleep_range(1000, 2000);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		hw_dbg("Auto read by HW from NVM has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_valid_led_default - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+		switch (hw->phy.media_type) {
+		case e1000_media_type_internal_serdes:
+			*data = ID_LED_DEFAULT_82575_SERDES;
+			break;
+		case e1000_media_type_copper:
+		default:
+			*data = ID_LED_DEFAULT;
+			break;
+		}
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_id_led_init -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 igb_id_led_init(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+	/* i210 and i211 devices have different LED mechanism */
+	if ((hw->mac.type == e1000_i210) ||
+	    (hw->mac.type == e1000_i211))
+		ret_val = igb_valid_led_default_i210(hw, &data);
+	else
+		ret_val = igb_valid_led_default(hw, &data);
+
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = rd32(E1000_LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_cleanup_led - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 igb_cleanup_led(struct e1000_hw *hw)
+{
+	wr32(E1000_LEDCTL, hw->mac.ledctl_default);
+	return 0;
+}
+
+/**
+ *  igb_blink_led - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the led's which are set to be on.
+ **/
+s32 igb_blink_led(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/* Set the blink bit for each LED that's "on" (0x0E)
+		 * (or "off" if inverted) in ledctl_mode2.  The blink
+		 * logic in hardware only works when mode is set to "on"
+		 * so it must be changed accordingly when the mode is
+		 * "off" and inverted.
+		 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 32; i += 8) {
+			u32 mode = (hw->mac.ledctl_mode2 >> i) &
+			    E1000_LEDCTL_LED0_MODE_MASK;
+			u32 led_default = hw->mac.ledctl_default >> i;
+
+			if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+			     (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+			    ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+			     (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+				ledctl_blink &=
+				    ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+						 E1000_LEDCTL_MODE_LED_ON) << i;
+			}
+		}
+	}
+
+	wr32(E1000_LEDCTL, ledctl_blink);
+
+	return 0;
+}
+
+/**
+ *  igb_led_off - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 igb_led_off(struct e1000_hw *hw)
+{
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_disable_pcie_master - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 (0) if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 igb_disable_pcie_master(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+	s32 ret_val = 0;
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	ctrl = rd32(E1000_CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	wr32(E1000_CTRL, ctrl);
+
+	while (timeout) {
+		if (!(rd32(E1000_STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		udelay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		hw_dbg("Master requests are pending.\n");
+		ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_validate_mdi_setting - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+s32 igb_validate_mdi_setting(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* All MDI settings are supported on 82580 and newer. */
+	if (hw->mac.type >= e1000_82580)
+		goto out;
+
+	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+		hw_dbg("Invalid MDI setting detected\n");
+		hw->phy.mdix = 1;
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+			      u32 offset, u8 data)
+{
+	u32 i, regvalue = 0;
+	s32 ret_val = 0;
+
+	/* Set up the address and data */
+	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+	wr32(reg, regvalue);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+		udelay(5);
+		regvalue = rd32(reg);
+		if (regvalue & E1000_GEN_CTL_READY)
+			break;
+	}
+	if (!(regvalue & E1000_GEN_CTL_READY)) {
+		hw_dbg("Reg %08x did not indicate ready\n", reg);
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_enable_mng_pass_thru - Enable processing of ARP's
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
+ **/
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = false;
+
+	if (!hw->mac.asf_firmware_present)
+		goto out;
+
+	manc = rd32(E1000_MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN))
+		goto out;
+
+	if (hw->mac.arc_subsystem_valid) {
+		fwsm = rd32(E1000_FWSM);
+		factps = rd32(E1000_FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = true;
+			goto out;
+		}
+	} else {
+		if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = true;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_i210.c	2022-03-21 12:58:29.362889225 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/net/drivers/igb/e1000_mbx.h	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* e1000_i210
+ * e1000_i211
+ */
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "e1000_hw.h"
+#include "e1000_i210.h"
+
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
+/**
+ * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = rd32(E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		/* In rare circumstances, the SW semaphore may already be held
+		 * unintentionally. Clear the semaphore once before giving up.
+		 */
+		if (hw->dev_spec._82575.clear_semaphore_once) {
+			hw->dev_spec._82575.clear_semaphore_once = false;
+			igb_put_hw_semaphore(hw);
+			for (i = 0; i < timeout; i++) {
+				swsm = rd32(E1000_SWSM);
+				if (!(swsm & E1000_SWSM_SMBI))
+					break;
+
+				udelay(50);
+			}
+		}
+
+		/* If we do not have the semaphore here, we have to give up. */
+		if (i == timeout) {
+			hw_dbg("Driver can't access device - SMBI bit is set.\n");
+			return -E1000_ERR_NVM;
+		}
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = rd32(E1000_SWSM);
+		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		igb_put_hw_semaphore(hw);
+		hw_dbg("Driver can't access the NVM\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_acquire_nvm_i210 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+{
+	return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_release_nvm_i210 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void igb_release_nvm_i210(struct e1000_hw *hw)
+{
+	igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = 0;
+	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+	while (i < timeout) {
+		if (igb_get_hw_semaphore_i210(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = rd32(E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/* Firmware currently using resource (fwmask) */
+		igb_put_hw_semaphore(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_swfw_sync_i210 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (igb_get_hw_semaphore_i210(hw))
+		; /* Empty */
+
+	swfw_sync = rd32(E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+}
+
+/**
+ *  igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the Shadow Ram to read
+ *  @words: number of words to read
+ *  @data: word read from the Shadow Ram
+ *
+ *  Reads a 16 bit word from the Shadow Ram using the EERD register.
+ *  Uses necessary synchronization semaphores.
+ **/
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+	s32 status = 0;
+	u16 i, count;
+
+	/* We cannot hold synchronization semaphores for too long,
+	 * because of forceful takeover procedure. However it is more efficient
+	 * to read in bursts than synchronizing access for each word.
+	 */
+	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+			E1000_EERD_EEWR_MAX_COUNT : (words - i);
+		if (!(hw->nvm.ops.acquire(hw))) {
+			status = igb_read_nvm_eerd(hw, offset, count,
+						     data + i);
+			hw->nvm.ops.release(hw);
+		} else {
+			status = E1000_ERR_SWFW_SYNC;
+		}
+
+		if (status)
+			break;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_write_nvm_srwr - Write to Shadow Ram using EEWR
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the Shadow Ram to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ *  Writes data to Shadow Ram at offset using EEWR register.
+ *
+ *  If igb_update_nvm_checksum is not called after this function , the
+ *  Shadow Ram will most likely contain an invalid checksum.
+ **/
+static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+				u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, k, eewr = 0;
+	u32 attempts = 100000;
+	s32 ret_val = 0;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+			(data[i] << E1000_NVM_RW_REG_DATA) |
+			E1000_NVM_RW_REG_START;
+
+		wr32(E1000_SRWR, eewr);
+
+		for (k = 0; k < attempts; k++) {
+			if (E1000_NVM_RW_REG_DONE &
+			    rd32(E1000_SRWR)) {
+				ret_val = 0;
+				break;
+			}
+			udelay(5);
+	}
+
+		if (ret_val) {
+			hw_dbg("Shadow RAM write EEWR timed out\n");
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the Shadow RAM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ *  Writes data to Shadow RAM at offset using EEWR register.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  data will not be committed to FLASH and also Shadow RAM will most likely
+ *  contain an invalid checksum.
+ *
+ *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ *  partially written.
+ **/
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+	s32 status = 0;
+	u16 i, count;
+
+	/* We cannot hold synchronization semaphores for too long,
+	 * because of forceful takeover procedure. However it is more efficient
+	 * to write in bursts than synchronizing access for each word.
+	 */
+	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+			E1000_EERD_EEWR_MAX_COUNT : (words - i);
+		if (!(hw->nvm.ops.acquire(hw))) {
+			status = igb_write_nvm_srwr(hw, offset, count,
+						      data + i);
+			hw->nvm.ops.release(hw);
+		} else {
+			status = E1000_ERR_SWFW_SYNC;
+		}
+
+		if (status)
+			break;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_read_invm_word_i210 - Reads OTP
+ *  @hw: pointer to the HW structure
+ *  @address: the word address (aka eeprom offset) to read
+ *  @data: pointer to the data read
+ *
+ *  Reads 16-bit words from the OTP. Return error when the word is not
+ *  stored in OTP.
+ **/
+static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	u32 invm_dword;
+	u16 i;
+	u8 record_type, word_address;
+
+	for (i = 0; i < E1000_INVM_SIZE; i++) {
+		invm_dword = rd32(E1000_INVM_DATA_REG(i));
+		/* Get record type */
+		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+			break;
+		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+			if (word_address == address) {
+				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+				hw_dbg("Read INVM Word 0x%02x = %x\n",
+					  address, *data);
+				status = 0;
+				break;
+			}
+		}
+	}
+	if (status)
+		hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+	return status;
+}
+
+/**
+ * igb_read_invm_i210 - Read invm wrapper function for I210/I211
+ *  @hw: pointer to the HW structure
+ *  @words: number of words to read
+ *  @data: pointer to the data read
+ *
+ *  Wrapper function to return data formerly found in the NVM.
+ **/
+static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+				u16 words __always_unused, u16 *data)
+{
+	s32 ret_val = 0;
+
+	/* Only the MAC addr is required to be present in the iNVM */
+	switch (offset) {
+	case NVM_MAC_ADDR:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
+						     &data[1]);
+		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
+						     &data[2]);
+		if (ret_val)
+			hw_dbg("MAC Addr not found in iNVM\n");
+		break;
+	case NVM_INIT_CTRL_2:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_INIT_CTRL_4:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_LED_1_CFG:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_LED_1_CFG_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_LED_0_2_CFG:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_ID_LED_SETTINGS:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = ID_LED_RESERVED_FFFF;
+			ret_val = 0;
+		}
+		break;
+	case NVM_SUB_DEV_ID:
+		*data = hw->subsystem_device_id;
+		break;
+	case NVM_SUB_VEN_ID:
+		*data = hw->subsystem_vendor_id;
+		break;
+	case NVM_DEV_ID:
+		*data = hw->device_id;
+		break;
+	case NVM_VEN_ID:
+		*data = hw->vendor_id;
+		break;
+	default:
+		hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
+		*data = NVM_RESERVED_WORD;
+		break;
+	}
+	return ret_val;
+}
+
+/**
+ *  igb_read_invm_version - Reads iNVM version and image type
+ *  @hw: pointer to the HW structure
+ *  @invm_ver: version structure for the version read
+ *
+ *  Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+			  struct e1000_fw_version *invm_ver) {
+	u32 *record = NULL;
+	u32 *next_record = NULL;
+	u32 i = 0;
+	u32 invm_dword = 0;
+	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+					     E1000_INVM_RECORD_SIZE_IN_BYTES);
+	u32 buffer[E1000_INVM_SIZE];
+	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	u16 version = 0;
+
+	/* Read iNVM memory */
+	for (i = 0; i < E1000_INVM_SIZE; i++) {
+		invm_dword = rd32(E1000_INVM_DATA_REG(i));
+		buffer[i] = invm_dword;
+	}
+
+	/* Read version number */
+	for (i = 1; i < invm_blocks; i++) {
+		record = &buffer[invm_blocks - i];
+		next_record = &buffer[invm_blocks - i + 1];
+
+		/* Check if we have first version location used */
+		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+			version = 0;
+			status = 0;
+			break;
+		}
+		/* Check if we have second version location used */
+		else if ((i == 1) &&
+			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+			status = 0;
+			break;
+		}
+		/* Check if we have odd version location
+		 * used and it is the last one used
+		 */
+		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+			 (i != 1))) {
+			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+				  >> 13;
+			status = 0;
+			break;
+		}
+		/* Check if we have even version location
+		 * used and it is the last one used
+		 */
+		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+			 ((*record & 0x3) == 0)) {
+			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+			status = 0;
+			break;
+		}
+	}
+
+	if (!status) {
+		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+					>> E1000_INVM_MAJOR_SHIFT;
+		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+	}
+	/* Read Image Type */
+	for (i = 1; i < invm_blocks; i++) {
+		record = &buffer[invm_blocks - i];
+		next_record = &buffer[invm_blocks - i + 1];
+
+		/* Check if we have image type in first location used */
+		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+			invm_ver->invm_img_type = 0;
+			status = 0;
+			break;
+		}
+		/* Check if we have image type in first location used */
+		else if ((((*record & 0x3) == 0) &&
+			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+			 ((((*record & 0x3) != 0) && (i != 1)))) {
+			invm_ver->invm_img_type =
+				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+			status = 0;
+			break;
+		}
+	}
+	return status;
+}
+
+/**
+ *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+	s32 status = 0;
+	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+	if (!(hw->nvm.ops.acquire(hw))) {
+
+		/* Replace the read function with semaphore grabbing with
+		 * the one that skips this for a while.
+		 * We have semaphore taken already here.
+		 */
+		read_op_ptr = hw->nvm.ops.read;
+		hw->nvm.ops.read = igb_read_nvm_eerd;
+
+		status = igb_validate_nvm_checksum(hw);
+
+		/* Revert original read operation. */
+		hw->nvm.ops.read = read_op_ptr;
+
+		hw->nvm.ops.release(hw);
+	} else {
+		status = E1000_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_update_nvm_checksum_i210 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("EEPROM read failed\n");
+		goto out;
+	}
+
+	if (!(hw->nvm.ops.acquire(hw))) {
+		/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
+		 * because we do not want to take the synchronization
+		 * semaphores twice here.
+		 */
+
+		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+			ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
+			if (ret_val) {
+				hw->nvm.ops.release(hw);
+				hw_dbg("NVM Read Error while updating checksum.\n");
+				goto out;
+			}
+			checksum += nvm_data;
+		}
+		checksum = (u16) NVM_SUM - checksum;
+		ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+						&checksum);
+		if (ret_val) {
+			hw->nvm.ops.release(hw);
+			hw_dbg("NVM Write Error while updating checksum.\n");
+			goto out;
+		}
+
+		hw->nvm.ops.release(hw);
+
+		ret_val = igb_update_flash_i210(hw);
+	} else {
+		ret_val = -E1000_ERR_SWFW_SYNC;
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ *  @hw: pointer to the HW structure
+ *
+ **/
+static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i, reg;
+
+	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+		reg = rd32(E1000_EECD);
+		if (reg & E1000_EECD_FLUDONE_I210) {
+			ret_val = 0;
+			break;
+		}
+		udelay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_get_flash_presence_i210 - Check if flash device is detected.
+ *  @hw: pointer to the HW structure
+ *
+ **/
+bool igb_get_flash_presence_i210(struct e1000_hw *hw)
+{
+	u32 eec = 0;
+	bool ret_val = false;
+
+	eec = rd32(E1000_EECD);
+	if (eec & E1000_EECD_FLASH_DETECTED_I210)
+		ret_val = true;
+
+	return ret_val;
+}
+
+/**
+ *  igb_update_flash_i210 - Commit EEPROM to the flash
+ *  @hw: pointer to the HW structure
+ *
+ **/
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 flup;
+
+	ret_val = igb_pool_flash_update_done_i210(hw);
+	if (ret_val == -E1000_ERR_NVM) {
+		hw_dbg("Flash update time out\n");
+		goto out;
+	}
+
+	flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
+	wr32(E1000_EECD, flup);
+
+	ret_val = igb_pool_flash_update_done_i210(hw);
+	if (ret_val)
+		hw_dbg("Flash update complete\n");
+	else
+		hw_dbg("Flash update time out\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_valid_led_default_i210 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+		switch (hw->phy.media_type) {
+		case e1000_media_type_internal_serdes:
+			*data = ID_LED_DEFAULT_I210_SERDES;
+			break;
+		case e1000_media_type_copper:
+		default:
+			*data = ID_LED_DEFAULT_I210;
+			break;
+		}
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  __igb_access_xmdio_reg - Read/write XMDIO register
+ *  @hw: pointer to the HW structure
+ *  @address: XMDIO address to program
+ *  @dev_addr: device address to program
+ *  @data: pointer to value to read/write from/to the XMDIO address
+ *  @read: boolean flag to indicate read or write
+ **/
+static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+				  u8 dev_addr, u16 *data, bool read)
+{
+	s32 ret_val = 0;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+							 dev_addr);
+	if (ret_val)
+		return ret_val;
+
+	if (read)
+		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+	else
+		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+	if (ret_val)
+		return ret_val;
+
+	/* Recalibrate the device back to 0 */
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+	if (ret_val)
+		return ret_val;
+
+	return ret_val;
+}
+
+/**
+ *  igb_read_xmdio_reg - Read XMDIO register
+ *  @hw: pointer to the HW structure
+ *  @addr: XMDIO address to program
+ *  @dev_addr: device address to program
+ *  @data: value to be read from the EMI address
+ **/
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+	return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ *  igb_write_xmdio_reg - Write XMDIO register
+ *  @hw: pointer to the HW structure
+ *  @addr: XMDIO address to program
+ *  @dev_addr: device address to program
+ *  @data: value to be written to the XMDIO address
+ **/
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+	return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ *  igb_init_nvm_params_i210 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	struct e1000_nvm_info *nvm = &hw->nvm;
+
+	nvm->ops.acquire = igb_acquire_nvm_i210;
+	nvm->ops.release = igb_release_nvm_i210;
+	nvm->ops.valid_led_default = igb_valid_led_default_i210;
+
+	/* NVM Function Pointers */
+	if (igb_get_flash_presence_i210(hw)) {
+		hw->nvm.type = e1000_nvm_flash_hw;
+		nvm->ops.read    = igb_read_nvm_srrd_i210;
+		nvm->ops.write   = igb_write_nvm_srwr_i210;
+		nvm->ops.validate = igb_validate_nvm_checksum_i210;
+		nvm->ops.update   = igb_update_nvm_checksum_i210;
+	} else {
+		hw->nvm.type = e1000_nvm_invm;
+		nvm->ops.read     = igb_read_invm_i210;
+		nvm->ops.write    = NULL;
+		nvm->ops.validate = NULL;
+		nvm->ops.update   = NULL;
+	}
+	return ret_val;
+}
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+	u16 nvm_word, phy_word, pci_word, tmp_nvm;
+	int i;
+
+	/* Get and set needed register values */
+	wuc = rd32(E1000_WUC);
+	mdicnfg = rd32(E1000_MDICNFG);
+	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+	wr32(E1000_MDICNFG, reg_val);
+
+	/* Get data from NVM, or set default */
+	ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+					  &nvm_word);
+	if (ret_val)
+		nvm_word = E1000_INVM_DEFAULT_AL;
+	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+		/* check current state directly from internal PHY */
+		igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+					 E1000_PHY_PLL_FREQ_REG), &phy_word);
+		if ((phy_word & E1000_PHY_PLL_UNCONF)
+		    != E1000_PHY_PLL_UNCONF) {
+			ret_val = 0;
+			break;
+		} else {
+			ret_val = -E1000_ERR_PHY;
+		}
+		/* directly reset the internal PHY */
+		ctrl = rd32(E1000_CTRL);
+		wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+		ctrl_ext = rd32(E1000_CTRL_EXT);
+		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+		wr32(E1000_CTRL_EXT, ctrl_ext);
+
+		wr32(E1000_WUC, 0);
+		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+		wr32(E1000_EEARBC_I210, reg_val);
+
+		igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+		pci_word |= E1000_PCI_PMCSR_D3;
+		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+		usleep_range(1000, 2000);
+		pci_word &= ~E1000_PCI_PMCSR_D3;
+		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+		wr32(E1000_EEARBC_I210, reg_val);
+
+		/* restore WUC register */
+		wr32(E1000_WUC, wuc);
+	}
+	/* restore MDICNFG setting */
+	wr32(E1000_MDICNFG, mdicnfg);
+	return ret_val;
+}
+++ linux-patched/drivers/xenomai/net/drivers/igb/e1000_mbx.h	2022-03-21 12:58:29.357889273 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_adv_pci.c	1970-01-01 01:00:00.000000000 +0100
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_hw.h"
+
+#define E1000_P2VMAILBOX_STS	0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK	0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU	0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK	0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1		0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK	0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1		0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE	16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK	0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK	0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS	0x20000000
+#define E1000_VT_MSGINFO_SHIFT	16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK	(0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET		0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR	0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST	0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN	0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE	0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC	0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST	(0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG	0x0100 /* PF control message */
+
+s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_check_for_msg(struct e1000_hw *, u16);
+s32 igb_check_for_ack(struct e1000_hw *, u16);
+s32 igb_check_for_rst(struct e1000_hw *, u16);
+s32 igb_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_adv_pci.c	2022-03-21 12:58:29.353889312 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2012 Thierry Bultel <thierry.bultel@basystemes.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+#define ADV_PCI_BASE_SIZE	0x80
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME "rtcan%d"
+#define RTCAN_DRV_NAME "ADV-PCI-CAN"
+
+static char *adv_pci_board_name = "ADV-PCI";
+
+MODULE_AUTHOR("Thierry Bultel <thierry.bultel@basystemes.fr>");
+MODULE_DESCRIPTION("RTCAN board driver for Advantech PCI cards");
+MODULE_LICENSE("GPL");
+
+struct rtcan_adv_pci {
+	struct pci_dev *pci_dev;
+	struct rtcan_device *slave_dev;
+	void __iomem *conf_addr;
+	void __iomem *base_addr;
+};
+
+/*
+ * According to the datasheet,
+ * internal clock is 1/2 of the external oscillator frequency
+ * which is 16 MHz
+ */
+#define ADV_PCI_CAN_CLOCK (16000000 / 2)
+
+/*
+ * Output control register
+  Depends on the board configuration
+ */
+
+#define ADV_PCI_OCR (SJA_OCR_MODE_NORMAL	|\
+		     SJA_OCR_TX0_PUSHPULL	|\
+		     SJA_OCR_TX1_PUSHPULL	|\
+		     SJA_OCR_TX1_INVERT)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ */
+#define ADV_PCI_CDR (SJA_CDR_CBP | SJA_CDR_CAN_MODE)
+
+#define ADV_PCI_VENDOR_ID 0x13fe
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+#define ADV_PCI_DEVICE(device_id)\
+	{ ADV_PCI_VENDOR_ID, device_id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }
+
+static const struct pci_device_id adv_pci_tbl[] = {
+	ADV_PCI_DEVICE(0x1680),
+	ADV_PCI_DEVICE(0x3680),
+	ADV_PCI_DEVICE(0x2052),
+	ADV_PCI_DEVICE(0x1681),
+	ADV_PCI_DEVICE(0xc001),
+	ADV_PCI_DEVICE(0xc002),
+	ADV_PCI_DEVICE(0xc004),
+	ADV_PCI_DEVICE(0xc101),
+	ADV_PCI_DEVICE(0xc102),
+	ADV_PCI_DEVICE(0xc104),
+	/* required last entry */
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, adv_pci_tbl);
+
+static u8 rtcan_adv_pci_read_reg(struct rtcan_device *dev, int port)
+{
+	struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	return ioread8(board->base_addr + port);
+}
+
+static void rtcan_adv_pci_write_reg(struct rtcan_device *dev, int port, u8 data)
+{
+	struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	iowrite8(data, board->base_addr + port);
+}
+
+static void rtcan_adv_pci_del_chan(struct pci_dev *pdev,
+				   struct rtcan_device *dev)
+{
+	struct rtcan_adv_pci *board;
+
+	if (!dev)
+		return;
+
+	board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	rtcan_sja1000_unregister(dev);
+
+	pci_iounmap(pdev, board->base_addr);
+
+	rtcan_dev_free(dev);
+}
+
+
+static int rtcan_adv_pci_add_chan(struct pci_dev *pdev,
+				  int channel,
+				  unsigned int bar,
+				  unsigned int offset,
+				  struct rtcan_device **master_dev)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_adv_pci *board;
+	void __iomem *base_addr;
+	int ret;
+
+	dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			      sizeof(struct rtcan_adv_pci));
+	if (dev == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	if (channel == CHANNEL_SLAVE) {
+		struct rtcan_adv_pci *master_board =
+			(struct rtcan_adv_pci *)(*master_dev)->board_priv;
+		master_board->slave_dev = dev;
+
+		if (offset) {
+			base_addr = master_board->base_addr+offset;
+		} else {
+			base_addr = pci_iomap(pdev, bar, ADV_PCI_BASE_SIZE);
+			if (!base_addr) {
+				ret = -EIO;
+				goto failure;
+			}
+		}
+	} else {
+		base_addr = pci_iomap(pdev, bar, ADV_PCI_BASE_SIZE) + offset;
+		if (!base_addr) {
+			ret = -EIO;
+			goto failure;
+		}
+	}
+
+	board->pci_dev = pdev;
+	board->conf_addr = NULL;
+	board->base_addr = base_addr;
+
+	dev->board_name = adv_pci_board_name;
+
+	chip->read_reg = rtcan_adv_pci_read_reg;
+	chip->write_reg = rtcan_adv_pci_write_reg;
+
+	/* Clock frequency in Hz */
+	dev->can_sys_clock = ADV_PCI_CAN_CLOCK;
+
+	/* Output control register */
+	chip->ocr = ADV_PCI_OCR;
+
+	/* Clock divider register */
+	chip->cdr = ADV_PCI_CDR;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	/* Make sure SJA1000 is in reset mode */
+	chip->write_reg(dev, SJA_MOD, SJA_MOD_RM);
+	/* Set PeliCAN mode */
+	chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE);
+
+	/* check if mode is set */
+	ret = chip->read_reg(dev, SJA_CDR);
+	if (ret != SJA_CDR_CAN_MODE) {
+		ret = -EIO;
+		goto failure_iounmap;
+	}
+
+	/* Register and setup interrupt handling */
+	chip->irq_flags = RTDM_IRQTYPE_SHARED;
+	chip->irq_num = pdev->irq;
+
+	RTCAN_DBG("%s: base_addr=%p conf_addr=%p irq=%d ocr=%#x cdr=%#x\n",
+		   RTCAN_DRV_NAME, board->base_addr, board->conf_addr,
+		   chip->irq_num, chip->ocr, chip->cdr);
+
+	/* Register SJA1000 device */
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 device!\n",
+		       ret);
+		goto failure_iounmap;
+	}
+
+	if (channel != CHANNEL_SLAVE)
+		*master_dev = dev;
+
+	return 0;
+
+failure_iounmap:
+	if (channel != CHANNEL_SLAVE || !offset)
+		pci_iounmap(pdev, base_addr);
+failure:
+	rtcan_dev_free(dev);
+
+	return ret;
+}
+
+static int adv_pci_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret, channel;
+	unsigned int nb_ports = 0;
+	unsigned int bar = 0;
+	unsigned int bar_flag = 0;
+	unsigned int offset = 0;
+	unsigned int ix;
+
+	struct rtcan_device *master_dev = NULL;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	dev_info(&pdev->dev, "RTCAN Registering card");
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto failure;
+
+	dev_info(&pdev->dev, "RTCAN detected Advantech PCI card at slot #%i\n",
+		 PCI_SLOT(pdev->devfn));
+
+	ret = pci_request_regions(pdev, RTCAN_DRV_NAME);
+	if (ret)
+		goto failure_device;
+
+	switch (pdev->device) {
+	case 0xc001:
+	case 0xc002:
+	case 0xc004:
+	case 0xc101:
+	case 0xc102:
+	case 0xc104:
+		nb_ports = pdev->device & 0x7;
+		offset = 0x100;
+		bar = 0;
+		break;
+	case 0x1680:
+	case 0x2052:
+		nb_ports = 2;
+		bar = 2;
+		bar_flag = 1;
+		break;
+	case 0x1681:
+		nb_ports = 1;
+		bar = 2;
+		bar_flag = 1;
+		break;
+	default:
+		goto failure_regions;
+	}
+
+	if (nb_ports > 1)
+		channel = CHANNEL_MASTER;
+	else
+		channel = CHANNEL_SINGLE;
+
+	RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n",
+		   RTCAN_DRV_NAME,
+		   pdev->vendor,
+		   pdev->device,
+		   pdev->subsystem_device);
+
+	ret = rtcan_adv_pci_add_chan(pdev, channel, bar, offset, &master_dev);
+	if (ret)
+		goto failure_iounmap;
+
+	/* register slave channel, if any */
+
+	for (ix = 1; ix < nb_ports; ix++) {
+		ret = rtcan_adv_pci_add_chan(pdev,
+					     CHANNEL_SLAVE,
+					     bar + (bar_flag ? ix : 0),
+					     offset * ix,
+					     &master_dev);
+		if (ret)
+			goto failure_iounmap;
+	}
+
+	pci_set_drvdata(pdev, master_dev);
+
+	return 0;
+
+failure_iounmap:
+	if (master_dev)
+		rtcan_adv_pci_del_chan(pdev, master_dev);
+
+failure_regions:
+	pci_release_regions(pdev);
+
+failure_device:
+	pci_disable_device(pdev);
+
+failure:
+	return ret;
+}
+
+static void adv_pci_remove_one(struct pci_dev *pdev)
+{
+	struct rtcan_device *dev = pci_get_drvdata(pdev);
+	struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	if (board->slave_dev)
+		rtcan_adv_pci_del_chan(pdev, board->slave_dev);
+
+	rtcan_adv_pci_del_chan(pdev, dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_adv_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = adv_pci_tbl,
+	.probe = adv_pci_init_one,
+	.remove = adv_pci_remove_one,
+};
+
+module_pci_driver(rtcan_adv_pci_driver);
+++ linux-patched/drivers/xenomai/can/sja1000/Kconfig	2022-03-21 12:58:29.348889361 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_isa.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_CAN_SJA1000
+	depends on XENO_DRIVERS_CAN
+	tristate "Philips SJA1000 CAN controller"
+	select XENO_DRIVERS_CAN_BUS_ERR
+
+config XENO_DRIVERS_CAN_SJA1000_ISA
+	depends on XENO_DRIVERS_CAN_SJA1000
+	tristate "Standard ISA controllers"
+	help
+
+	This driver is for CAN devices connected to the ISA bus of a PC
+	or a PC/104 system. The I/O port, interrupt number and a few other
+	hardware specific parameters can be defined via module parameters.
+
+config XENO_DRIVERS_CAN_SJA1000_MEM
+	depends on XENO_DRIVERS_CAN_SJA1000
+	tristate "Memory mapped controllers"
+	help
+
+	This driver is for memory mapped CAN devices. The memory address,
+	interrupt number and a few other hardware specific parameters can
+	be defined via module parameters.
+
+config XENO_DRIVERS_CAN_SJA1000_PEAK_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "PEAK PCI Card"
+	help
+
+	This driver is for the PCAN PCI, the PC-PCI CAN plug-in card (1 or
+	2 channel) from PEAK Systems (http://www.peak-system.com). To get
+	the second channel working, Xenomai's shared interrupt support
+	must be enabled.
+
+config XENO_DRIVERS_CAN_SJA1000_IXXAT_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "IXXAT PCI Card"
+	help
+
+	This driver is for the IXXAT PC-I 04/PCI card (1 or 2 channel)
+	from the IXXAT Automation GmbH (http://www.ixxat.de). To get
+	the second channel working, Xenomai's shared interrupt support
+	must be enabled.
+
+config XENO_DRIVERS_CAN_SJA1000_ADV_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "ADVANTECH PCI Cards"
+	help
+
+	This driver is for the ADVANTECH PCI cards (1 or more channels)
+	It supports the 1680U and some other ones.
+
+
+config XENO_DRIVERS_CAN_SJA1000_PLX_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "PLX90xx PCI-bridge based Cards"
+	help
+
+	This driver is for CAN interface cards based on
+	the PLX90xx PCI bridge.
+	Driver supports now:
+	 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
+	 - Adlink PCI-7841/cPCI-7841 SE card
+	 - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
+	 - esd CAN-PCI/PMC/266
+	 - esd CAN-PCIe/2000
+	 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
+	 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config XENO_DRIVERS_CAN_SJA1000_EMS_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "EMS CPC PCI Card"
+	help
+
+	This driver is for the 2 channel CPC PCI card from EMS Dr. Thomas
+	Wünsche (http://www.ems-wuensche.de). To get the second channel
+	working, Xenomai's shared interrupt support must be enabled.
+
+config XENO_DRIVERS_CAN_SJA1000_ESD_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "ESD PCI Cards (DEPRECATED)"
+	help
+
+	This driver supports the esd PCI CAN cards CAN-PCI/200,
+	CAN-PCI/266, CAN-PMC/266 (PMC), CAN-CPCI/200 (CompactPCI),
+	CAN-PCIe2000 (PCI Express) and CAN-PCI104/200 (PCI104)
+	from the esd electronic system design gmbh (http://www.esd.eu).
+
+	This driver is deprecated. It's functionality is now provided by
+	"PLX90xx PCI-bridge based Cards" driver.
+
+config XENO_DRIVERS_CAN_SJA1000_PEAK_DNG
+	depends on XENO_DRIVERS_CAN_SJA1000 && !PARPORT
+	tristate "PEAK Parallel Port Dongle"
+	help
+
+	This driver is for the PCAN Dongle, the PC parallel port to CAN
+	converter from PEAK Systems (http://www.peak-system.com). You need
+	to disable parallel port support in the kernel (CONFIG_PARPORT) for
+	proper operation. The interface type (sp or epp), I/O port and
+	interrupt number should be defined via module parameters.
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_isa.c	2022-03-21 12:58:29.344889400 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_peak_pci.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006, 2009 Sebastian Smolorz
+ *                               <smolorz@rts.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; eitherer version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "sja1000-isa"
+
+#define RTCAN_ISA_MAX_DEV 4
+
+static char *isa_board_name = "ISA-Board";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for standard ISA boards");
+MODULE_LICENSE("GPL");
+
+static u16 io[RTCAN_ISA_MAX_DEV];
+static int irq[RTCAN_ISA_MAX_DEV];
+static u32 can_clock[RTCAN_ISA_MAX_DEV];
+static u8 ocr[RTCAN_ISA_MAX_DEV];
+static u8 cdr[RTCAN_ISA_MAX_DEV];
+
+module_param_array(io, ushort, NULL, 0444);
+module_param_array(irq, int, NULL, 0444);
+module_param_array(can_clock, uint, NULL, 0444);
+module_param_array(ocr, byte, NULL, 0444);
+module_param_array(cdr, byte, NULL, 0444);
+
+MODULE_PARM_DESC(io, "The io-port address");
+MODULE_PARM_DESC(irq, "The interrupt number");
+MODULE_PARM_DESC(can_clock, "External clock frequency (default 16 MHz)");
+MODULE_PARM_DESC(ocr, "Value of output control register (default 0x1a)");
+MODULE_PARM_DESC(cdr, "Value of clock divider register (default 0xc8");
+
+#define RTCAN_ISA_PORT_SIZE 32
+
+struct rtcan_isa
+{
+	u16 io;
+};
+
+static struct rtcan_device *rtcan_isa_devs[RTCAN_ISA_MAX_DEV];
+
+static u8 rtcan_isa_readreg(struct rtcan_device *dev, int port)
+{
+	struct rtcan_isa *board = (struct rtcan_isa *)dev->board_priv;
+	return inb(board->io + port);
+}
+
+static void rtcan_isa_writereg(struct rtcan_device *dev, int port, u8 val)
+{
+	struct rtcan_isa *board = (struct rtcan_isa *)dev->board_priv;
+	outb(val, board->io + port);
+}
+
+
+int __init rtcan_isa_init_one(int idx)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_isa *board;
+	int ret;
+
+	if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+				   sizeof(struct rtcan_isa))) == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_isa *)dev->board_priv;
+
+	dev->board_name = isa_board_name;
+
+	board->io = io[idx];
+
+	chip->irq_num = irq[idx];
+	chip->irq_flags = RTDM_IRQTYPE_SHARED | RTDM_IRQTYPE_EDGE;
+
+	chip->read_reg = rtcan_isa_readreg;
+	chip->write_reg = rtcan_isa_writereg;
+
+	/* Check and request I/O ports */
+	if (!request_region(board->io, RTCAN_ISA_PORT_SIZE, RTCAN_DRV_NAME)) {
+		ret = -EBUSY;
+		goto out_dev_free;
+	}
+
+	/* Clock frequency in Hz */
+	if (can_clock[idx])
+		dev->can_sys_clock = can_clock[idx] / 2;
+	else
+		dev->can_sys_clock = 8000000; /* 16/2 MHz */
+
+	/* Output control register */
+	if (ocr[idx])
+		chip->ocr = ocr[idx];
+	else
+		chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+	if (cdr[idx])
+		chip->cdr = cdr[idx];
+	else
+		chip->cdr = SJA_CDR_CAN_MODE | SJA_CDR_CLK_OFF | SJA_CDR_CBP;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 "
+		       "device!\n", ret);
+		goto out_free_region;
+	}
+
+	rtcan_isa_devs[idx] = dev;
+	return 0;
+
+ out_free_region:
+	release_region(board->io, RTCAN_ISA_PORT_SIZE);
+
+ out_dev_free:
+	rtcan_dev_free(dev);
+
+	return ret;
+}
+
+static void rtcan_isa_exit(void);
+
+/** Init module */
+static int __init rtcan_isa_init(void)
+{
+	int i, err;
+	int devices = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (i = 0; i < RTCAN_ISA_MAX_DEV && io[i] != 0; i++) {
+		err = rtcan_isa_init_one(i);
+		if (err) {
+			rtcan_isa_exit();
+			return err;
+		}
+		devices++;
+	}
+	if (devices)
+		return 0;
+
+	printk(KERN_ERR "ERROR! No devices specified! "
+	       "Use io=<port1>[,...] irq=<irq1>[,...]\n");
+	return -EINVAL;
+}
+
+
+/** Cleanup module */
+static void rtcan_isa_exit(void)
+{
+	int i;
+	struct rtcan_device *dev;
+
+	for (i = 0; i < RTCAN_ISA_MAX_DEV; i++) {
+		dev = rtcan_isa_devs[i];
+		if (!dev)
+			continue;
+		rtcan_sja1000_unregister(dev);
+		release_region(io[i], RTCAN_ISA_PORT_SIZE);
+		rtcan_dev_free(dev);
+	}
+}
+
+module_init(rtcan_isa_init);
+module_exit(rtcan_isa_exit);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_peak_pci.c	2022-03-21 12:58:29.339889449 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from the PCAN project file driver/src/pcan_pci.c:
+ *
+ * Copyright (C) 2001-2006  PEAK System-Technik GmbH
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "PEAK-PCI-CAN"
+
+static char *peak_pci_board_name = "PEAK-PCI";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for PEAK-PCI cards");
+MODULE_LICENSE("GPL");
+
+struct rtcan_peak_pci
+{
+    struct pci_dev *pci_dev;
+    struct rtcan_device *slave_dev;
+    int channel;
+    volatile void __iomem *base_addr;
+    volatile void __iomem *conf_addr;
+};
+
+#define PEAK_PCI_CAN_SYS_CLOCK (16000000 / 2)
+
+#define PELICAN_SINGLE  (SJA_CDR_CAN_MODE | SJA_CDR_CBP | 0x07 | SJA_CDR_CLK_OFF)
+#define PELICAN_MASTER  (SJA_CDR_CAN_MODE | SJA_CDR_CBP | 0x07            )
+#define PELICAN_DEFAULT (SJA_CDR_CAN_MODE                                 )
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+// important PITA registers
+#define PITA_ICR         0x00        // interrupt control register
+#define PITA_GPIOICR     0x18        // general purpose IO interface control register
+#define PITA_MISC        0x1C        // miscellanoes register
+
+#define PEAK_PCI_VENDOR_ID      0x001C  // the PCI device and vendor IDs
+#define PEAK_PCI_DEVICE_ID      0x0001  // Device ID for PCI and older PCIe cards
+#define PEAK_PCIE_DEVICE_ID     0x0003  // Device ID for newer PCIe cards (IPEH-003027)
+#define PEAK_CPCI_DEVICE_ID     0x0004  // for nextgen cPCI slot cards
+#define PEAK_MPCI_DEVICE_ID     0x0005  // for nextgen miniPCI slot cards
+#define PEAK_PC_104P_DEVICE_ID  0x0006  // PCAN-PC/104+ cards
+#define PEAK_PCI_104E_DEVICE_ID 0x0007  // PCAN-PCI/104 Express cards
+#define PEAK_MPCIE_DEVICE_ID    0x0008  // The miniPCIe slot cards
+#define PEAK_PCIE_OEM_ID        0x0009  // PCAN-PCI Express OEM
+
+#define PCI_CONFIG_PORT_SIZE 0x1000  // size of the config io-memory
+#define PCI_PORT_SIZE        0x0400  // size of a channel io-memory
+
+static struct pci_device_id peak_pci_tbl[] = {
+	{PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{ }
+};
+MODULE_DEVICE_TABLE (pci, peak_pci_tbl);
+
+
+static u8 rtcan_peak_pci_read_reg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+    return readb(board->base_addr + ((unsigned long)port << 2));
+}
+
+static void rtcan_peak_pci_write_reg(struct rtcan_device *dev, int port, u8 data)
+{
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+    writeb(data, board->base_addr + ((unsigned long)port << 2));
+}
+
+static void rtcan_peak_pci_irq_ack(struct rtcan_device *dev)
+{
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+    u16 pita_icr_low;
+
+    /* Select and clear in Pita stored interrupt */
+    pita_icr_low = readw(board->conf_addr + PITA_ICR);
+    if (board->channel == CHANNEL_SLAVE) {
+	if (pita_icr_low & 0x0001)
+	    writew(0x0001, board->conf_addr + PITA_ICR);
+    }
+    else {
+	if (pita_icr_low & 0x0002)
+	    writew(0x0002, board->conf_addr + PITA_ICR);
+    }
+}
+
+static void rtcan_peak_pci_del_chan(struct rtcan_device *dev,
+				    int init_step)
+{
+    struct rtcan_peak_pci *board;
+    u16 pita_icr_high;
+
+    if (!dev)
+	return;
+
+    board = (struct rtcan_peak_pci *)dev->board_priv;
+
+    switch (init_step) {
+    case 0:			/* Full cleanup */
+	printk("Removing %s %s device %s\n",
+	       peak_pci_board_name, dev->ctrl_name, dev->name);
+	rtcan_sja1000_unregister(dev);
+	fallthrough;
+    case 5:
+	pita_icr_high = readw(board->conf_addr + PITA_ICR + 2);
+	if (board->channel == CHANNEL_SLAVE) {
+	    pita_icr_high &= ~0x0001;
+	} else {
+	    pita_icr_high &= ~0x0002;
+	}
+	writew(pita_icr_high, board->conf_addr + PITA_ICR + 2);
+	fallthrough;
+    case 4:
+	iounmap((void *)board->base_addr);
+	fallthrough;
+    case 3:
+	if (board->channel != CHANNEL_SLAVE)
+	    iounmap((void *)board->conf_addr);
+	fallthrough;
+    case 2:
+	rtcan_dev_free(dev);
+	fallthrough;
+    case 1:
+	break;
+    }
+
+}
+
+static int rtcan_peak_pci_add_chan(struct pci_dev *pdev, int channel,
+				   struct rtcan_device **master_dev)
+{
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *chip;
+    struct rtcan_peak_pci *board;
+    u16 pita_icr_high;
+    unsigned long addr;
+    int ret, init_step = 1;
+
+    dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			  sizeof(struct rtcan_peak_pci));
+    if (dev == NULL)
+	return -ENOMEM;
+    init_step = 2;
+
+    chip = (struct rtcan_sja1000 *)dev->priv;
+    board = (struct rtcan_peak_pci *)dev->board_priv;
+
+    board->pci_dev = pdev;
+    board->channel = channel;
+
+    if (channel != CHANNEL_SLAVE) {
+
+	addr = pci_resource_start(pdev, 0);
+	board->conf_addr = ioremap(addr, PCI_CONFIG_PORT_SIZE);
+	if (board->conf_addr == 0) {
+	    ret = -ENODEV;
+	    goto failure;
+	}
+	init_step = 3;
+
+	/* Set GPIO control register */
+	writew(0x0005, board->conf_addr + PITA_GPIOICR + 2);
+
+	if (channel == CHANNEL_MASTER)
+	    writeb(0x00, board->conf_addr + PITA_GPIOICR); /* enable both */
+	else
+	    writeb(0x04, board->conf_addr + PITA_GPIOICR); /* enable single */
+
+	writeb(0x05, board->conf_addr + PITA_MISC + 3);  /* toggle reset */
+	mdelay(5);
+	writeb(0x04, board->conf_addr + PITA_MISC + 3);  /* leave parport mux mode */
+    } else {
+	struct rtcan_peak_pci *master_board =
+	    (struct rtcan_peak_pci *)(*master_dev)->board_priv;
+	master_board->slave_dev = dev;
+	board->conf_addr = master_board->conf_addr;
+    }
+
+    addr = pci_resource_start(pdev, 1);
+    if (channel == CHANNEL_SLAVE)
+	addr += 0x400;
+
+    board->base_addr = ioremap(addr, PCI_PORT_SIZE);
+    if (board->base_addr == 0) {
+	ret = -ENODEV;
+	goto failure;
+    }
+    init_step = 4;
+
+    dev->board_name = peak_pci_board_name;
+
+    chip->read_reg = rtcan_peak_pci_read_reg;
+    chip->write_reg = rtcan_peak_pci_write_reg;
+    chip->irq_ack = rtcan_peak_pci_irq_ack;
+
+    /* Clock frequency in Hz */
+    dev->can_sys_clock = PEAK_PCI_CAN_SYS_CLOCK;
+
+    /* Output control register */
+    chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+    /* Clock divider register */
+    if (channel == CHANNEL_MASTER)
+	chip->cdr = PELICAN_MASTER;
+    else
+	chip->cdr = PELICAN_SINGLE;
+
+    strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+    /* Register and setup interrupt handling */
+    chip->irq_flags = RTDM_IRQTYPE_SHARED;
+    chip->irq_num = pdev->irq;
+    pita_icr_high = readw(board->conf_addr + PITA_ICR + 2);
+    if (channel == CHANNEL_SLAVE) {
+	pita_icr_high |= 0x0001;
+    } else {
+	pita_icr_high |= 0x0002;
+    }
+    writew(pita_icr_high, board->conf_addr + PITA_ICR + 2);
+    init_step = 5;
+
+    printk("%s: base_addr=%p conf_addr=%p irq=%d\n", RTCAN_DRV_NAME,
+	   board->base_addr, board->conf_addr, chip->irq_num);
+
+    /* Register SJA1000 device */
+    ret = rtcan_sja1000_register(dev);
+    if (ret) {
+	printk(KERN_ERR
+	       "ERROR %d while trying to register SJA1000 device!\n", ret);
+	goto failure;
+    }
+
+    if (channel != CHANNEL_SLAVE)
+	*master_dev = dev;
+
+    return 0;
+
+ failure:
+    rtcan_peak_pci_del_chan(dev, init_step);
+    return ret;
+}
+
+static int peak_pci_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+    int ret;
+    u16 sub_sys_id;
+    struct rtcan_device *master_dev = NULL;
+
+    if (!rtdm_available())
+	return -ENODEV;
+
+    printk("%s: initializing device %04x:%04x\n",
+	   RTCAN_DRV_NAME,  pdev->vendor, pdev->device);
+
+    if ((ret = pci_enable_device (pdev)))
+	goto failure;
+
+    if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME)))
+	goto failure;
+
+    if ((ret = pci_read_config_word(pdev, 0x2e, &sub_sys_id)))
+	goto failure_cleanup;
+
+    /* Enable memory space */
+    if ((ret = pci_write_config_word(pdev, 0x04, 2)))
+	goto failure_cleanup;
+
+    if ((ret = pci_write_config_word(pdev, 0x44, 0)))
+	goto failure_cleanup;
+
+    if (sub_sys_id > 3) {
+	if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_MASTER,
+					   &master_dev)))
+	    goto failure_cleanup;
+	if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_SLAVE,
+					   &master_dev)))
+	    goto failure_cleanup;
+    } else {
+	if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_SINGLE,
+					   &master_dev)))
+	    goto failure_cleanup;
+    }
+
+    pci_set_drvdata(pdev, master_dev);
+    return 0;
+
+ failure_cleanup:
+    if (master_dev)
+	rtcan_peak_pci_del_chan(master_dev, 0);
+
+    pci_release_regions(pdev);
+
+ failure:
+    return ret;
+
+}
+
+static void peak_pci_remove_one(struct pci_dev *pdev)
+{
+    struct rtcan_device *dev = pci_get_drvdata(pdev);
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+
+    if (board->slave_dev)
+	rtcan_peak_pci_del_chan(board->slave_dev, 0);
+    rtcan_peak_pci_del_chan(dev, 0);
+
+    pci_release_regions(pdev);
+    pci_disable_device(pdev);
+    pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_peak_pci_driver = {
+	.name		= RTCAN_DRV_NAME,
+	.id_table	= peak_pci_tbl,
+	.probe		= peak_pci_init_one,
+	.remove		= peak_pci_remove_one,
+};
+
+module_pci_driver(rtcan_peak_pci_driver);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c	2022-03-21 12:58:29.335889488 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_plx_pci.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME "rtcan%d"
+#define RTCAN_DRV_NAME "IXXAT-PCI-CAN"
+
+static char *ixxat_pci_board_name = "IXXAT-PCI";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for IXXAT-PCI cards");
+MODULE_LICENSE("GPL");
+
+struct rtcan_ixxat_pci
+{
+    struct pci_dev *pci_dev;
+    struct rtcan_device *slave_dev;
+    int conf_addr;
+    void __iomem *base_addr;
+};
+
+#define IXXAT_PCI_CAN_SYS_CLOCK (16000000 / 2)
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+#define CHANNEL_OFFSET       0x200
+#define CHANNEL_MASTER_RESET 0x110
+#define CHANNEL_SLAVE_RESET  (CHANNEL_MASTER_RESET + CHANNEL_OFFSET)
+
+#define IXXAT_INTCSR_OFFSET  0x4c /* Offset in PLX9050 conf registers */
+#define IXXAT_INTCSR_SLAVE   0x41 /* LINT1 and PCI interrupt enabled */
+#define IXXAT_INTCSR_MASTER  0x08 /* LINT2 enabled */
+#define IXXAT_SJA_MOD_MASK   0xa1 /* Mask for reading dual/single channel */
+
+/* PCI vender, device and sub-device ID */
+#define IXXAT_PCI_VENDOR_ID  0x10b5
+#define IXXAT_PCI_DEVICE_ID  0x9050
+#define IXXAT_PCI_SUB_SYS_ID 0x2540
+
+#define IXXAT_CONF_PORT_SIZE 0x0080
+#define IXXAT_BASE_PORT_SIZE 0x0400
+
+static struct pci_device_id ixxat_pci_tbl[] = {
+	{IXXAT_PCI_VENDOR_ID, IXXAT_PCI_DEVICE_ID,
+	 IXXAT_PCI_VENDOR_ID, IXXAT_PCI_SUB_SYS_ID, 0, 0, 0},
+	{ }
+};
+MODULE_DEVICE_TABLE (pci, ixxat_pci_tbl);
+
+
+static u8 rtcan_ixxat_pci_read_reg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv;
+    return readb(board->base_addr + port);
+}
+
+static void rtcan_ixxat_pci_write_reg(struct rtcan_device *dev, int port, u8 data)
+{
+    struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv;
+    writeb(data, board->base_addr + port);
+}
+
+static void rtcan_ixxat_pci_del_chan(struct rtcan_device *dev)
+{
+    struct rtcan_ixxat_pci *board;
+    u8 intcsr;
+
+    if (!dev)
+	return;
+
+    board = (struct rtcan_ixxat_pci *)dev->board_priv;
+
+    printk("Removing %s %s device %s\n",
+	   ixxat_pci_board_name, dev->ctrl_name, dev->name);
+
+    rtcan_sja1000_unregister(dev);
+
+    /* Disable PCI interrupts */
+    intcsr = inb(board->conf_addr + IXXAT_INTCSR_OFFSET);
+    if (board->slave_dev) {
+	intcsr &= ~IXXAT_INTCSR_MASTER;
+	outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET);
+	writeb(0x1, board->base_addr + CHANNEL_MASTER_RESET);
+	iounmap(board->base_addr);
+    } else {
+	intcsr &= ~IXXAT_INTCSR_SLAVE;
+	outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET);
+	writeb(0x1, board->base_addr + CHANNEL_SLAVE_RESET );
+    }
+    rtcan_dev_free(dev);
+}
+
+static int rtcan_ixxat_pci_add_chan(struct pci_dev *pdev,
+				    int channel,
+				    struct rtcan_device **master_dev,
+				    int conf_addr,
+				    void __iomem *base_addr)
+{
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *chip;
+    struct rtcan_ixxat_pci *board;
+    u8 intcsr;
+    int ret;
+
+    dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			  sizeof(struct rtcan_ixxat_pci));
+    if (dev == NULL)
+	return -ENOMEM;
+
+    chip = (struct rtcan_sja1000 *)dev->priv;
+    board = (struct rtcan_ixxat_pci *)dev->board_priv;
+
+    board->pci_dev = pdev;
+    board->conf_addr = conf_addr;
+    board->base_addr = base_addr;
+
+    if (channel == CHANNEL_SLAVE) {
+	struct rtcan_ixxat_pci *master_board =
+	    (struct rtcan_ixxat_pci *)(*master_dev)->board_priv;
+	master_board->slave_dev = dev;
+    }
+
+    dev->board_name = ixxat_pci_board_name;
+
+    chip->read_reg = rtcan_ixxat_pci_read_reg;
+    chip->write_reg = rtcan_ixxat_pci_write_reg;
+
+    /* Clock frequency in Hz */
+    dev->can_sys_clock = IXXAT_PCI_CAN_SYS_CLOCK;
+
+    /* Output control register */
+    chip->ocr = (SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_INVERT |
+		 SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL);
+
+    /* Clock divider register */
+    chip->cdr = SJA_CDR_CAN_MODE;
+
+    strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+    /* Enable PCI interrupts */
+    intcsr = inb(board->conf_addr + IXXAT_INTCSR_OFFSET);
+    if (channel == CHANNEL_SLAVE)
+	intcsr |= IXXAT_INTCSR_SLAVE;
+    else
+	intcsr |= IXXAT_INTCSR_MASTER;
+    outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET);
+
+    /* Register and setup interrupt handling */
+    chip->irq_flags = RTDM_IRQTYPE_SHARED;
+    chip->irq_num = pdev->irq;
+
+    RTCAN_DBG("%s: base_addr=0x%p conf_addr=%#x irq=%d ocr=%#x cdr=%#x\n",
+	      RTCAN_DRV_NAME, board->base_addr, board->conf_addr,
+	      chip->irq_num, chip->ocr, chip->cdr);
+
+    /* Register SJA1000 device */
+    ret = rtcan_sja1000_register(dev);
+    if (ret) {
+	printk(KERN_ERR "ERROR %d while trying to register SJA1000 device!\n",
+	       ret);
+	goto failure;
+    }
+
+    if (channel != CHANNEL_SLAVE)
+	*master_dev = dev;
+
+    return 0;
+
+ failure:
+    rtcan_dev_free(dev);
+    return ret;
+}
+
+static int ixxat_pci_init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+    int ret, channel, conf_addr;
+    unsigned long addr;
+    void __iomem *base_addr;
+    struct rtcan_device *master_dev = NULL;
+
+    if (!rtdm_available())
+	return -ENODEV;
+
+    if ((ret = pci_enable_device (pdev)))
+	goto failure;
+
+    if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME)))
+	goto failure;
+
+    RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n",
+	      RTCAN_DRV_NAME, pdev->vendor, pdev->device,
+	      pdev->subsystem_device);
+
+    /* Enable memory and I/O space */
+    if ((ret = pci_write_config_word(pdev, 0x04, 0x3)))
+	goto failure_release_pci;
+
+    conf_addr = pci_resource_start(pdev, 1);
+
+    addr = pci_resource_start(pdev, 2);
+    base_addr = ioremap(addr, IXXAT_BASE_PORT_SIZE);
+    if (base_addr == 0) {
+	ret = -ENODEV;
+	goto failure_release_pci;
+    }
+
+    /* Check if second channel is available after reset */
+    writeb(0x1, base_addr + CHANNEL_MASTER_RESET);
+    writeb(0x1, base_addr + CHANNEL_SLAVE_RESET);
+    udelay(100);
+    if ( (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) & IXXAT_SJA_MOD_MASK ) != 0x21 ||
+	readb(base_addr + CHANNEL_OFFSET + SJA_SR ) != 0x0c ||
+	readb(base_addr + CHANNEL_OFFSET + SJA_IR ) != 0xe0)
+	channel = CHANNEL_SINGLE;
+    else
+	channel = CHANNEL_MASTER;
+
+    if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel, &master_dev,
+					conf_addr, base_addr)))
+	goto failure_iounmap;
+
+    if (channel != CHANNEL_SINGLE) {
+	channel = CHANNEL_SLAVE;
+	if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel,
+					    &master_dev, conf_addr,
+					    base_addr + CHANNEL_OFFSET)))
+	    goto failure_iounmap;
+    }
+
+    pci_set_drvdata(pdev, master_dev);
+    return 0;
+
+failure_iounmap:
+    if (master_dev)
+	rtcan_ixxat_pci_del_chan(master_dev);
+    iounmap(base_addr);
+
+failure_release_pci:
+    pci_release_regions(pdev);
+
+failure:
+    return ret;
+}
+
+static void ixxat_pci_remove_one(struct pci_dev *pdev)
+{
+    struct rtcan_device *dev = pci_get_drvdata(pdev);
+    struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv;
+
+    if (board->slave_dev)
+	rtcan_ixxat_pci_del_chan(board->slave_dev);
+    rtcan_ixxat_pci_del_chan(dev);
+
+    pci_release_regions(pdev);
+    pci_disable_device(pdev);
+    pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_ixxat_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = ixxat_pci_tbl,
+	.probe = ixxat_pci_init_one,
+	.remove = ixxat_pci_remove_one,
+};
+
+module_pci_driver(rtcan_ixxat_pci_driver);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_plx_pci.c	2022-03-21 12:58:29.330889537 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
+ *
+ * Derived from the ems_pci.c driver:
+ *	Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ *	Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ *	Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DRV_NAME "rt_sja1000_plx_pci"
+#define RTCAN_DEV_NAME "rtcan%d"
+
+MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
+MODULE_DESCRIPTION("RTCAN driver for PLX90xx PCI-bridge cards with "
+		   "the SJA1000 chips");
+MODULE_LICENSE("GPL v2");
+
+#define PLX_PCI_MAX_CHAN 2
+
+struct plx_pci_card {
+	int channels;			/* detected channels count */
+	struct rtcan_device *rtcan_dev[PLX_PCI_MAX_CHAN];
+	void __iomem *conf_addr;
+
+	/* Pointer to device-dependent reset function */
+	void (*reset_func)(struct pci_dev *pdev);
+};
+
+#define PLX_PCI_CAN_CLOCK (16000000 / 2)
+
+/* PLX9030/9050/9052 registers */
+#define PLX_INTCSR	0x4c		/* Interrupt Control/Status */
+#define PLX_CNTRL	0x50		/* User I/O, Direct Slave Response,
+					 * Serial EEPROM, and Initialization
+					 * Control register
+					 */
+
+#define PLX_LINT1_EN	0x1		/* Local interrupt 1 enable */
+#define PLX_LINT2_EN	(1 << 3)	/* Local interrupt 2 enable */
+#define PLX_PCI_INT_EN	(1 << 6)	/* PCI Interrupt Enable */
+#define PLX_PCI_RESET	(1 << 30)	/* PCI Adapter Software Reset */
+
+/* PLX9056 registers */
+#define PLX9056_INTCSR	0x68		/* Interrupt Control/Status */
+#define PLX9056_CNTRL	0x6c		/* Control / Software Reset */
+
+#define PLX9056_LINTI	(1 << 11)
+#define PLX9056_PCI_INT_EN (1 << 8)
+#define PLX9056_PCI_RCR	(1 << 29)	/* Read Configuration Registers */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PLX_PCI_OCR	(SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PLX_PCI_CDR			(SJA_CDR_CBP | SJA_CDR_CAN_MODE)
+
+/* SJA1000 Control Register in the BasicCAN Mode */
+#define SJA_CR				0x00
+
+/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
+#define REG_CR_BASICCAN_INITIAL		0x21
+#define REG_CR_BASICCAN_INITIAL_MASK	0xa1
+#define REG_SR_BASICCAN_INITIAL		0x0c
+#define REG_IR_BASICCAN_INITIAL		0xe0
+
+/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
+#define REG_MOD_PELICAN_INITIAL		0x01
+#define REG_SR_PELICAN_INITIAL		0x3c
+#define REG_IR_PELICAN_INITIAL		0x00
+
+#define ADLINK_PCI_VENDOR_ID		0x144A
+#define ADLINK_PCI_DEVICE_ID		0x7841
+
+#define ESD_PCI_SUB_SYS_ID_PCI200	0x0004
+#define ESD_PCI_SUB_SYS_ID_PCI266	0x0009
+#define ESD_PCI_SUB_SYS_ID_PMC266	0x000e
+#define ESD_PCI_SUB_SYS_ID_CPCI200	0x010b
+#define ESD_PCI_SUB_SYS_ID_PCIE2000	0x0200
+#define ESD_PCI_SUB_SYS_ID_PCI104200	0x0501
+
+#define MARATHON_PCI_DEVICE_ID		0x2715
+
+#define TEWS_PCI_VENDOR_ID		0x1498
+#define TEWS_PCI_DEVICE_ID_TMPC810	0x032A
+
+static void plx_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon(struct pci_dev *pdev);
+static void plx9056_pci_reset_common(struct pci_dev *pdev);
+
+struct plx_pci_channel_map {
+	u32 bar;
+	u32 offset;
+	u32 size;		/* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct plx_pci_card_info {
+	const char *name;
+	int channel_count;
+	u32 can_clock;
+	u8 ocr;			/* output control register */
+	u8 cdr;			/* clock divider register */
+
+	/* Parameters for mapping local configuration space */
+	struct plx_pci_channel_map conf_map;
+
+	/* Parameters for mapping the SJA1000 chips */
+	struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
+
+	/* Pointer to device-dependent reset function */
+	void (*reset_func)(struct pci_dev *pdev);
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink = {
+	"Adlink PCI-7841/cPCI-7841", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink_se = {
+	"Adlink PCI-7841/cPCI-7841 SE", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd200 = {
+	"esd CAN-PCI/CPCI/PCI104/200", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9030/9050 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd266 = {
+	"esd CAN-PCI/PMC/266", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+	&plx9056_pci_reset_common
+	/* based on PLX9056 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd2000 = {
+	"esd CAN-PCIe/2000", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+	&plx9056_pci_reset_common
+	/* based on PEX8311 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_marathon = {
+	"Marathon CAN-bus-PCI", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+	&plx_pci_reset_marathon
+	/* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_tews = {
+	"TEWS TECHNOLOGIES TPMC810", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9030 */
+};
+
+static const struct pci_device_id plx_pci_tbl[] = {
+	{
+		/* Adlink PCI-7841/cPCI-7841 */
+		ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_NETWORK_OTHER << 8, ~0,
+		(kernel_ulong_t)&plx_pci_card_info_adlink
+	},
+	{
+		/* Adlink PCI-7841/cPCI-7841 SE */
+		ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
+		(kernel_ulong_t)&plx_pci_card_info_adlink_se
+	},
+	{
+		/* esd CAN-PCI/200 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd200
+	},
+	{
+		/* esd CAN-CPCI/200 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd200
+	},
+	{
+		/* esd CAN-PCI104/200 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd200
+	},
+	{
+		/* esd CAN-PCI/266 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd266
+	},
+	{
+		/* esd CAN-PMC/266 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd266
+	},
+	{
+		/* esd CAN-PCIE/2000 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd2000
+	},
+	{
+		/* Marathon CAN-bus-PCI card */
+		PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_marathon
+	},
+	{
+		/* TEWS TECHNOLOGIES TPMC810 card */
+		TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_tews
+	},
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
+
+static u8 plx_pci_read_reg(struct rtcan_device *dev, int port)
+{
+	return ioread8((void* __iomem)dev->base_addr + port);
+}
+
+static void plx_pci_write_reg(struct rtcan_device *dev, int port, u8 val)
+{
+	iowrite8(val, (void* __iomem)dev->base_addr + port);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to switch 'em from the Basic mode into the PeliCAN mode.
+ * Also check states of some registers in reset mode.
+ */
+static inline int plx_pci_check_sja1000(struct rtcan_device *dev)
+{
+	int flag = 0;
+
+	struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+	/*
+	 * Check registers after hardware reset (the Basic mode)
+	 * See states on p. 10 of the Datasheet.
+	 */
+	if ((chip->read_reg(dev, SJA_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+	    REG_CR_BASICCAN_INITIAL &&
+	    (chip->read_reg(dev, SJA_SR) == REG_SR_BASICCAN_INITIAL) &&
+	    (chip->read_reg(dev, SJA_IR) == REG_IR_BASICCAN_INITIAL))
+		flag = 1;
+
+	/* Bring the SJA1000 into the PeliCAN mode*/
+	chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE);
+
+	/*
+	 * Check registers after reset in the PeliCAN mode.
+	 * See states on p. 23 of the Datasheet.
+	 */
+	if (chip->read_reg(dev, SJA_MOD) == REG_MOD_PELICAN_INITIAL &&
+	    chip->read_reg(dev, SJA_SR) == REG_SR_PELICAN_INITIAL &&
+	    chip->read_reg(dev, SJA_IR) == REG_IR_PELICAN_INITIAL)
+		return flag;
+
+	return 0;
+}
+
+/*
+ * PLX9030/50/52 software reset
+ * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
+ * For most cards it's enough for reset the SJA1000 chips.
+ */
+static void plx_pci_reset_common(struct pci_dev *pdev)
+{
+	struct plx_pci_card *card = pci_get_drvdata(pdev);
+	u32 cntrl;
+
+	cntrl = ioread32(card->conf_addr + PLX_CNTRL);
+	cntrl |= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+	udelay(100);
+	cntrl ^= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+};
+
+/*
+ * PLX9056 software reset
+ * Assert LRESET# and reset device(s) on the Local Bus (if wired).
+ */
+static void plx9056_pci_reset_common(struct pci_dev *pdev)
+{
+	struct plx_pci_card *card = pci_get_drvdata(pdev);
+	u32 cntrl;
+
+	/* issue a local bus reset */
+	cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
+	cntrl |= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+	udelay(100);
+	cntrl ^= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+	/* reload local configuration from EEPROM */
+	cntrl |= PLX9056_PCI_RCR;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+	/*
+	 * There is no safe way to poll for the end
+	 * of reconfiguration process. Waiting for 10ms
+	 * is safe.
+	 */
+	mdelay(10);
+
+	cntrl ^= PLX9056_PCI_RCR;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+};
+
+/* Special reset function for Marathon card */
+static void plx_pci_reset_marathon(struct pci_dev *pdev)
+{
+	void __iomem *reset_addr;
+	int i;
+	int reset_bar[2] = {3, 5};
+
+	plx_pci_reset_common(pdev);
+
+	for (i = 0; i < 2; i++) {
+		reset_addr = pci_iomap(pdev, reset_bar[i], 0);
+		if (!reset_addr) {
+			dev_err(&pdev->dev, "Failed to remap reset "
+				"space %d (BAR%d)\n", i, reset_bar[i]);
+		} else {
+			/* reset the SJA1000 chip */
+			iowrite8(0x1, reset_addr);
+			udelay(100);
+			pci_iounmap(pdev, reset_addr);
+		}
+	}
+}
+
+static void plx_pci_del_card(struct pci_dev *pdev)
+{
+	struct plx_pci_card *card = pci_get_drvdata(pdev);
+	struct rtcan_device *dev;
+	int i = 0;
+
+	for (i = 0; i < card->channels; i++) {
+		dev = card->rtcan_dev[i];
+		if (!dev)
+			continue;
+
+		dev_info(&pdev->dev, "Removing %s\n", dev->name);
+		rtcan_sja1000_unregister(dev);
+		if (dev->base_addr)
+			pci_iounmap(pdev, (void* __iomem)dev->base_addr);
+		rtcan_dev_free(dev);
+	}
+
+	card->reset_func(pdev);
+
+	/*
+	 * Disable interrupts from PCI-card and disable local
+	 * interrupts
+	 */
+	if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+		iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+	else
+		iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
+
+	if (card->conf_addr)
+		pci_iounmap(pdev, card->conf_addr);
+
+	kfree(card);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * Probe PLX90xx based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int plx_pci_add_card(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct rtcan_sja1000 *chip;
+	struct rtcan_device *dev;
+	struct plx_pci_card *card;
+	struct plx_pci_card_info *ci;
+	int err, i;
+	u32 val;
+	void __iomem *addr;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	ci = (struct plx_pci_card_info *)ent->driver_data;
+
+	if (pci_enable_device(pdev) < 0) {
+		dev_err(&pdev->dev, "Failed to enable PCI device\n");
+		return -ENODEV;
+	}
+
+	dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
+		 ci->name, PCI_SLOT(pdev->devfn));
+
+	/* Allocate card structures to hold addresses, ... */
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (!card) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		pci_disable_device(pdev);
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, card);
+
+	card->channels = 0;
+
+	/* Remap PLX90xx configuration space */
+	addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
+	if (!addr) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to remap configuration space "
+			"(BAR%d)\n", ci->conf_map.bar);
+		goto failure_cleanup;
+	}
+	card->conf_addr = addr + ci->conf_map.offset;
+
+	ci->reset_func(pdev);
+	card->reset_func = ci->reset_func;
+
+	/* Detect available channels */
+	for (i = 0; i < ci->channel_count; i++) {
+		struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
+
+		dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+				      sizeof(struct plx_pci_card));
+		if (!dev) {
+			err = -ENOMEM;
+			goto failure_cleanup;
+		}
+
+		strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+		dev->board_name = (char *)ci->name;
+
+		card->rtcan_dev[i] = dev;
+		chip = card->rtcan_dev[i]->priv;
+		chip->irq_flags = RTDM_IRQTYPE_SHARED;
+		chip->irq_num = pdev->irq;
+
+		/*
+		 * Remap IO space of the SJA1000 chips
+		 * This is device-dependent mapping
+		 */
+		addr = pci_iomap(pdev, cm->bar, cm->size);
+		if (!addr) {
+			err = -ENOMEM;
+			dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
+			goto failure_cleanup;
+		}
+
+		dev->base_addr = (unsigned long)(addr + cm->offset);
+		chip->read_reg = plx_pci_read_reg;
+		chip->write_reg = plx_pci_write_reg;
+
+		/* Check if channel is present */
+		if (plx_pci_check_sja1000(dev)) {
+			dev->can_sys_clock = ci->can_clock;
+			chip->ocr = ci->ocr;
+			chip->cdr = ci->cdr;
+
+			/* Register SJA1000 device */
+			err = rtcan_sja1000_register(dev);
+			if (err) {
+				dev_err(&pdev->dev, "Registering device failed "
+					"(err=%d)\n", err);
+				rtcan_dev_free(dev);
+				goto failure_cleanup;
+			}
+
+			card->channels++;
+
+			dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+				 "registered as %s\n", i + 1,
+				 (void* __iomem)dev->base_addr, chip->irq_num,
+				 dev->name);
+		} else {
+			dev_err(&pdev->dev, "Channel #%d not detected\n",
+				i + 1);
+			rtcan_dev_free(dev);
+		}
+	}
+
+	if (!card->channels) {
+		err = -ENODEV;
+		goto failure_cleanup;
+	}
+
+	/*
+	 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
+	 * Local_2 interrupts from the SJA1000 chips
+	 */
+	if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+		val = ioread32(card->conf_addr + PLX_INTCSR);
+		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
+			val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
+		else
+			val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+		iowrite32(val, card->conf_addr + PLX_INTCSR);
+	} else {
+		iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
+			  card->conf_addr + PLX9056_INTCSR);
+	}
+	return 0;
+
+failure_cleanup:
+	dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+	plx_pci_del_card(pdev);
+
+	return err;
+}
+
+static struct pci_driver plx_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = plx_pci_tbl,
+	.probe = plx_pci_add_card,
+	.remove = plx_pci_del_card,
+};
+
+module_pci_driver(plx_pci_driver);
+++ linux-patched/drivers/xenomai/can/sja1000/Makefile	2022-03-21 12:58:29.326889576 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_esd_pci.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/drivers/xenomai/can -I$(srctree)/drivers/xenomai/can/sja1000
+
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000) += xeno_can_sja1000.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PEAK_PCI) += xeno_can_peak_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PEAK_DNG) += xeno_can_peak_dng.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PLX_PCI) += xeno_can_plx_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_IXXAT_PCI) += xeno_can_ixxat_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ADV_PCI) += xeno_can_adv_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_EMS_PCI) += xeno_can_ems_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ESD_PCI) += xeno_can_esd_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ISA) += xeno_can_isa.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_MEM) += xeno_can_mem.o
+
+xeno_can_sja1000-y := rtcan_sja1000.o
+xeno_can_sja1000-$(CONFIG_FS_PROCFS) += rtcan_sja1000_proc.o
+xeno_can_peak_pci-y := rtcan_peak_pci.o
+xeno_can_peak_dng-y := rtcan_peak_dng.o
+xeno_can_plx_pci-y := rtcan_plx_pci.o
+xeno_can_ixxat_pci-y := rtcan_ixxat_pci.o
+xeno_can_adv_pci-y := rtcan_adv_pci.o
+xeno_can_ems_pci-y := rtcan_ems_pci.o
+xeno_can_esd_pci-y := rtcan_esd_pci.o
+xeno_can_isa-y := rtcan_isa.o
+xeno_can_mem-y := rtcan_mem.o
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_esd_pci.c	2022-03-21 12:58:29.322889615 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_peak_dng.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Sebastian Smolorz <sesmo@gmx.net>
+ *
+ * This driver is based on the Socket-CAN driver esd_pci.c,
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME "rtcan%d"
+#define RTCAN_DRV_NAME "ESD-PCI-CAN"
+
+static char *esd_pci_board_name = "ESD-PCI";
+
+MODULE_AUTHOR("Sebastian Smolorz <sesmo@gmx.net");
+MODULE_DESCRIPTION("RTCAN board driver for esd PCI/PMC/CPCI/PCIe/PCI104 " \
+		   "CAN cards");
+MODULE_LICENSE("GPL v2");
+
+struct rtcan_esd_pci {
+	struct pci_dev *pci_dev;
+	struct rtcan_device *slave_dev;
+	void __iomem *conf_addr;
+	void __iomem *base_addr;
+};
+
+#define ESD_PCI_CAN_CLOCK	(16000000 / 2)
+
+#define ESD_PCI_OCR		(SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL | \
+				 SJA_OCR_TX1_INVERT | SJA_OCR_MODE_CLOCK)
+#define ESD_PCI_CDR		(SJA_CDR_CLK_OFF | SJA_CDR_CBP | \
+				 SJA_CDR_CAN_MODE)
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+#define CHANNEL_OFFSET		0x100
+
+#define INTCSR_OFFSET		0x4c /* Offset in PLX9050 conf registers */
+#define INTCSR_LINTI1		(1 << 0)
+#define INTCSR_PCI		(1 << 6)
+
+#define INTCSR9056_OFFSET	0x68 /* Offset in PLX9056 conf registers */
+#define INTCSR9056_LINTI	(1 << 11)
+#define INTCSR9056_PCI		(1 << 8)
+
+#ifndef PCI_DEVICE_ID_PLX_9056
+# define PCI_DEVICE_ID_PLX_9056 0x9056
+#endif
+
+/* PCI subsystem IDs of esd's SJA1000 based CAN cards */
+
+/* CAN-PCI/200: PCI, 33MHz only, bridge: PLX9050 */
+#define ESD_PCI_SUB_SYS_ID_PCI200	0x0004
+
+/* CAN-PCI/266: PCI, 33/66MHz, bridge: PLX9056 */
+#define ESD_PCI_SUB_SYS_ID_PCI266	0x0009
+
+/* CAN-PMC/266: PMC module, 33/66MHz, bridge: PLX9056 */
+#define ESD_PCI_SUB_SYS_ID_PMC266	0x000e
+
+/* CAN-CPCI/200: Compact PCI, 33MHz only, bridge: PLX9030 */
+#define ESD_PCI_SUB_SYS_ID_CPCI200	0x010b
+
+/* CAN-PCIE/2000: PCI Express 1x, bridge: PEX8311 = PEX8111 + PLX9056 */
+#define ESD_PCI_SUB_SYS_ID_PCIE2000	0x0200
+
+/* CAN-PCI/104: PCI104 module, 33MHz only, bridge: PLX9030 */
+#define ESD_PCI_SUB_SYS_ID_PCI104200	0x0501
+
+static struct pci_device_id esd_pci_tbl[] = {
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200},
+	{0,}
+};
+
+#define ESD_PCI_BASE_SIZE  0x200
+
+MODULE_DEVICE_TABLE(pci, esd_pci_tbl);
+
+
+static u8 rtcan_esd_pci_read_reg(struct rtcan_device *dev, int port)
+{
+	struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv;
+	return readb(board->base_addr + port);
+}
+
+static void rtcan_esd_pci_write_reg(struct rtcan_device *dev, int port, u8 val)
+{
+	struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv;
+	writeb(val, board->base_addr + port);
+}
+
+static void rtcan_esd_pci_del_chan(struct rtcan_device *dev)
+{
+	struct rtcan_esd_pci *board;
+
+	if (!dev)
+		return;
+
+	board = (struct rtcan_esd_pci *)dev->board_priv;
+
+	printk("Removing %s %s device %s\n",
+		esd_pci_board_name, dev->ctrl_name, dev->name);
+
+	rtcan_sja1000_unregister(dev);
+
+	rtcan_dev_free(dev);
+}
+
+static int rtcan_esd_pci_add_chan(struct pci_dev *pdev, int channel,
+				  struct rtcan_device **master_dev,
+				  void __iomem *conf_addr,
+				  void __iomem *base_addr)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_esd_pci *board;
+	int ret;
+
+	dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			      sizeof(struct rtcan_esd_pci));
+	if (dev == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_esd_pci *)dev->board_priv;
+
+	board->pci_dev = pdev;
+	board->conf_addr = conf_addr;
+	board->base_addr = base_addr;
+
+	if (channel == CHANNEL_SLAVE) {
+		struct rtcan_esd_pci *master_board =
+			(struct rtcan_esd_pci *)(*master_dev)->board_priv;
+		master_board->slave_dev = dev;
+	}
+
+	dev->board_name = esd_pci_board_name;
+
+	chip->read_reg = rtcan_esd_pci_read_reg;
+	chip->write_reg = rtcan_esd_pci_write_reg;
+
+	dev->can_sys_clock = ESD_PCI_CAN_CLOCK;
+
+	chip->ocr = ESD_PCI_OCR;
+	chip->cdr = ESD_PCI_CDR;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	chip->irq_flags = RTDM_IRQTYPE_SHARED;
+	chip->irq_num = pdev->irq;
+
+	RTCAN_DBG("%s: base_addr=0x%p conf_addr=0x%p irq=%d ocr=%#x cdr=%#x\n",
+		  RTCAN_DRV_NAME, board->base_addr, board->conf_addr,
+		  chip->irq_num, chip->ocr, chip->cdr);
+
+	/* Register SJA1000 device */
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 "
+				"device!\n", ret);
+		goto failure;
+	}
+
+	if (channel != CHANNEL_SLAVE)
+		*master_dev = dev;
+
+	return 0;
+
+
+failure:
+	rtcan_dev_free(dev);
+	return ret;
+}
+
+static int esd_pci_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret, channel;
+	void __iomem *base_addr;
+	void __iomem *conf_addr;
+	struct rtcan_device *master_dev = NULL;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	if ((ret = pci_enable_device (pdev)))
+		goto failure;
+
+	if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME)))
+		goto failure;
+
+	RTCAN_DBG("%s: Initializing device %04x:%04x %04x:%04x\n",
+		 RTCAN_DRV_NAME, pdev->vendor, pdev->device,
+		 pdev->subsystem_vendor, pdev->subsystem_device);
+
+	conf_addr = pci_iomap(pdev, 0, ESD_PCI_BASE_SIZE);
+	if (conf_addr == NULL) {
+		ret = -ENODEV;
+		goto failure_release_pci;
+	}
+
+	base_addr = pci_iomap(pdev, 2, ESD_PCI_BASE_SIZE);
+	if (base_addr == NULL) {
+		ret = -ENODEV;
+		goto failure_iounmap_conf;
+	}
+
+	/* Check if second channel is available */
+	writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+	writeb(SJA_CDR_CBP, base_addr + CHANNEL_OFFSET + SJA_CDR);
+	writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+	if (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) == 0x21) {
+		writeb(SJA_MOD_SM | SJA_MOD_AFM | SJA_MOD_STM | SJA_MOD_LOM |
+		       SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+		if (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) == 0x3f)
+			channel = CHANNEL_MASTER;
+		else {
+			writeb(SJA_MOD_RM,
+				base_addr + CHANNEL_OFFSET + SJA_MOD);
+			channel = CHANNEL_SINGLE;
+		}
+	} else {
+		writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+		channel = CHANNEL_SINGLE;
+	}
+
+	if ((ret = rtcan_esd_pci_add_chan(pdev, channel, &master_dev,
+						conf_addr, base_addr)))
+		goto failure_iounmap_base;
+
+	if (channel != CHANNEL_SINGLE) {
+		channel = CHANNEL_SLAVE;
+		if ((ret = rtcan_esd_pci_add_chan(pdev, channel, &master_dev,
+				      conf_addr, base_addr + CHANNEL_OFFSET)))
+			goto failure_iounmap_base;
+	}
+
+	if ((pdev->device == PCI_DEVICE_ID_PLX_9050) ||
+	    (pdev->device == PCI_DEVICE_ID_PLX_9030)) {
+		/* Enable interrupts in PLX9050 */
+		writel(INTCSR_LINTI1 | INTCSR_PCI, conf_addr + INTCSR_OFFSET);
+	} else {
+		/* Enable interrupts in PLX9056*/
+		writel(INTCSR9056_LINTI | INTCSR9056_PCI,
+					conf_addr + INTCSR9056_OFFSET);
+	}
+
+	pci_set_drvdata(pdev, master_dev);
+
+	return 0;
+
+
+failure_iounmap_base:
+	if (master_dev)
+		rtcan_esd_pci_del_chan(master_dev);
+	pci_iounmap(pdev, base_addr);
+
+failure_iounmap_conf:
+	pci_iounmap(pdev, conf_addr);
+
+failure_release_pci:
+	pci_release_regions(pdev);
+
+failure:
+	return ret;
+}
+
+static void esd_pci_remove_one(struct pci_dev *pdev)
+{
+	struct rtcan_device *dev = pci_get_drvdata(pdev);
+	struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv;
+
+	if ((pdev->device == PCI_DEVICE_ID_PLX_9050) ||
+	    (pdev->device == PCI_DEVICE_ID_PLX_9030)) {
+		/* Disable interrupts in PLX9050*/
+		writel(0, board->conf_addr + INTCSR_OFFSET);
+	} else {
+		/* Disable interrupts in PLX9056*/
+		writel(0, board->conf_addr + INTCSR9056_OFFSET);
+	}
+
+	if (board->slave_dev)
+		rtcan_esd_pci_del_chan(board->slave_dev);
+	rtcan_esd_pci_del_chan(dev);
+
+
+	pci_iounmap(pdev, board->base_addr);
+	pci_iounmap(pdev, board->conf_addr);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_esd_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = esd_pci_tbl,
+	.probe = esd_pci_init_one,
+	.remove = esd_pci_remove_one,
+};
+
+module_pci_driver(rtcan_esd_pci_driver);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_peak_dng.c	2022-03-21 12:58:29.317889663 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_ems_pci.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from the PCAN project file driver/src/pcan_dongle.c:
+ *
+ * Copyright (C) 2001-2006  PEAK System-Technik GmbH
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/pnp.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "PEAK-Dongle"
+
+#define RTCAN_PEAK_DNG_MAX_DEV 1
+
+static char *dongle_board_name = "PEAK-Dongle";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for PEAK-Dongle");
+MODULE_LICENSE("GPL");
+
+static char   *type[RTCAN_PEAK_DNG_MAX_DEV];
+static ushort io[RTCAN_PEAK_DNG_MAX_DEV];
+static char   irq[RTCAN_PEAK_DNG_MAX_DEV];
+
+module_param_array(type, charp,  NULL, 0444);
+module_param_array(io,   ushort, NULL, 0444);
+module_param_array(irq,  byte,   NULL, 0444);
+
+MODULE_PARM_DESC(type, "The type of interface (sp, epp)");
+MODULE_PARM_DESC(io,   "The io-port address");
+MODULE_PARM_DESC(irq,  "The interrupt number");
+
+#define DONGLE_TYPE_SP  0
+#define DONGLE_TYPE_EPP 1
+
+#define DNG_PORT_SIZE            4  /* the address range of the dongle-port */
+#define ECR_PORT_SIZE            1  /* size of the associated ECR register */
+
+struct rtcan_peak_dng
+{
+    u16  ioport;
+    u16  ecr;      /* ECR register in case of EPP */
+    u8   old_data; /* the overwritten contents of the port registers */
+    u8   old_ctrl;
+    u8   old_ecr;
+    u8   type;
+};
+
+static struct rtcan_device *rtcan_peak_dng_devs[RTCAN_PEAK_DNG_MAX_DEV];
+
+static u16 dng_ports[] = {0x378, 0x278, 0x3bc, 0x2bc};
+static u8  dng_irqs[]  = {7, 5, 7, 5};
+
+static unsigned char nibble_decode[32] =
+{
+    0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+    0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+    0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+    0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+};
+
+/* Enable and disable irqs */
+static inline void rtcan_parport_disable_irq(u32 port)
+{
+    u32 pc = port + 2;
+    outb(inb(pc) & ~0x10, pc);
+}
+
+static inline void rtcan_parport_enable_irq(u32 port)
+{
+    u32 pc = port + 2;
+    outb(inb(pc) | 0x10, pc);
+}
+
+/* Functions for SP port */
+static u8 rtcan_peak_dng_sp_readreg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 pa = dng->ioport;
+    u32 pb = pa + 1;
+    u32 pc = pb + 1;
+    u8  b0, b1 ;
+    u8  irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */
+
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+    outb((port & 0x1F) | 0x80, pa);
+    outb((0x0B ^ 0x0C) | irq_enable, pc);
+    b1=nibble_decode[inb(pb)>>3];
+    outb(0x40, pa);
+    b0=nibble_decode[inb(pb)>>3];
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+
+    return  (b1 << 4) | b0 ;
+}
+
+static void rtcan_peak_dng_writereg(struct rtcan_device *dev, int port, u8 data)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 pa = dng->ioport;
+    u32 pc = pa + 2;
+    u8  irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */
+
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+    outb(port & 0x1F, pa);
+    outb((0x0B ^ 0x0C) | irq_enable, pc);
+    outb(data, pa);
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+}
+
+/* Functions for EPP port */
+static u8 rtcan_peak_dng_epp_readreg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 pa = dng->ioport;
+    u32 pc = pa + 2;
+    u8  val;
+    u8  irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */
+
+    outb((0x0B ^ 0x0F) | irq_enable, pc);
+    outb((port & 0x1F) | 0x80, pa);
+    outb((0x0B ^ 0x2E) | irq_enable, pc);
+    val = inb(pa);
+    outb((0x0B ^ 0x0F) | irq_enable, pc);
+
+    return val;
+}
+
+
+/* to switch epp on or restore register */
+static void dongle_set_ecr(u16 port, struct rtcan_peak_dng *dng)
+{
+    u32 ecr = dng->ecr;
+
+    dng->old_ecr = inb(ecr);
+    outb((dng->old_ecr & 0x1F) | 0x20, ecr);
+
+    if (dng->old_ecr == 0xff)
+	printk(KERN_DEBUG "%s: realy ECP mode configured?\n", RTCAN_DRV_NAME);
+}
+
+static void dongle_restore_ecr(u16 port, struct rtcan_peak_dng *dng)
+{
+    u32 ecr = dng->ecr;
+
+    outb(dng->old_ecr, ecr);
+
+    printk(KERN_DEBUG "%s: restore ECR\n", RTCAN_DRV_NAME);
+}
+
+static inline void rtcan_peak_dng_enable(struct rtcan_device *dev)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 port = dng->ioport;
+
+    /* save old port contents */
+    dng->old_data = inb(port);
+    dng->old_ctrl = inb(port + 2);
+
+    /* switch to epp mode if possible */
+    if (dng->type == DONGLE_TYPE_EPP)
+	dongle_set_ecr(port, dng);
+
+    rtcan_parport_enable_irq(port);
+}
+
+static inline void rtcan_peak_dng_disable(struct rtcan_device *dev)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 port = dng->ioport;
+
+    rtcan_parport_disable_irq(port);
+
+    if (dng->type == DONGLE_TYPE_EPP)
+	dongle_restore_ecr(port, dng);
+
+    /* restore port state */
+    outb(dng->old_data, port);
+    outb(dng->old_ctrl, port + 2);
+}
+
+/** Init module */
+int __init rtcan_peak_dng_init_one(int idx)
+{
+    int ret, dtype;
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *sja;
+    struct rtcan_peak_dng *dng;
+
+    if (strncmp(type[idx], "sp", 2) == 0)
+	dtype = DONGLE_TYPE_SP;
+    else if (strncmp(type[idx], "epp", 3) == 0)
+	dtype = DONGLE_TYPE_EPP;
+    else {
+	printk("%s: type %s is invalid, use \"sp\" or \"epp\".",
+	       RTCAN_DRV_NAME, type[idx]);
+	return -EINVAL;
+    }
+
+    if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			       sizeof(struct rtcan_peak_dng))) == NULL)
+	return -ENOMEM;
+
+    sja = (struct rtcan_sja1000 *)dev->priv;
+    dng = (struct rtcan_peak_dng *)dev->board_priv;
+
+    dev->board_name = dongle_board_name;
+
+    if (io[idx])
+	dng->ioport = io[idx];
+    else
+	dng->ioport = dng_ports[idx];
+
+    if (irq[idx])
+	sja->irq_num = irq[idx];
+    else
+	sja->irq_num = dng_irqs[idx];
+    sja->irq_flags = 0;
+
+    if (dtype == DONGLE_TYPE_SP) {
+	sja->read_reg = rtcan_peak_dng_sp_readreg;
+	sja->write_reg = rtcan_peak_dng_writereg;
+	dng->ecr = 0; /* set to anything */
+    } else {
+	sja->read_reg = rtcan_peak_dng_epp_readreg;
+	sja->write_reg = rtcan_peak_dng_writereg;
+	dng->ecr = dng->ioport + 0x402;
+    }
+
+    /* Check and request I/O ports */
+    if (!request_region(dng->ioport, DNG_PORT_SIZE, RTCAN_DRV_NAME)) {
+	ret = -EBUSY;
+	goto out_dev_free;
+    }
+
+    if (dng->type == DONGLE_TYPE_EPP) {
+	if (!request_region(dng->ecr, ECR_PORT_SIZE, RTCAN_DRV_NAME)) {
+	    ret = -EBUSY;
+	    goto out_free_region;
+	}
+    }
+
+    /* Clock frequency in Hz */
+    dev->can_sys_clock = 8000000;	/* 16/2 MHz */
+
+    /* Output control register */
+    sja->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+    sja->cdr = SJA_CDR_CAN_MODE;
+
+    strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+    rtcan_peak_dng_enable(dev);
+
+    /* Register RTDM device */
+    ret = rtcan_sja1000_register(dev);
+    if (ret) {
+	printk(KERN_ERR "ERROR while trying to register SJA1000 device %d!\n",
+	       ret);
+	goto out_free_region2;
+    }
+
+    rtcan_peak_dng_devs[idx] = dev;
+    return 0;
+
+ out_free_region2:
+    if (dng->type == DONGLE_TYPE_EPP)
+	release_region(dng->ecr, ECR_PORT_SIZE);
+
+ out_free_region:
+    release_region(dng->ioport, DNG_PORT_SIZE);
+
+ out_dev_free:
+    rtcan_dev_free(dev);
+
+    return ret;
+}
+
+void rtcan_peak_dng_exit_one(struct rtcan_device *dev)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+
+    rtcan_sja1000_unregister(dev);
+    rtcan_peak_dng_disable(dev);
+    if (dng->type == DONGLE_TYPE_EPP)
+	release_region(dng->ecr, ECR_PORT_SIZE);
+    release_region(dng->ioport, DNG_PORT_SIZE);
+    rtcan_dev_free(dev);
+}
+
+static const struct pnp_device_id rtcan_peak_dng_pnp_tbl[] = {
+    /* Standard LPT Printer Port */
+    {.id = "PNP0400", .driver_data = 0},
+    /* ECP Printer Port */
+    {.id = "PNP0401", .driver_data = 0},
+    { }
+};
+
+static int rtcan_peak_dng_pnp_probe(struct pnp_dev *dev,
+				    const struct pnp_device_id *id)
+{
+    return 0;
+}
+
+static struct pnp_driver rtcan_peak_dng_pnp_driver = {
+    .name     = RTCAN_DRV_NAME,
+    .id_table = rtcan_peak_dng_pnp_tbl,
+    .probe    = rtcan_peak_dng_pnp_probe,
+};
+
+static int pnp_registered;
+
+/** Cleanup module */
+static void rtcan_peak_dng_exit(void)
+{
+    int i;
+    struct rtcan_device *dev;
+
+    for (i = 0, dev = rtcan_peak_dng_devs[i];
+	 i < RTCAN_PEAK_DNG_MAX_DEV && dev != NULL;
+	 i++)
+	rtcan_peak_dng_exit_one(dev);
+
+    if (pnp_registered)
+	pnp_unregister_driver(&rtcan_peak_dng_pnp_driver);
+}
+
+/** Init module */
+static int __init rtcan_peak_dng_init(void)
+{
+    int i, ret = -EINVAL, done = 0;
+
+    if (!rtdm_available())
+	return -ENOSYS;
+
+    if (pnp_register_driver(&rtcan_peak_dng_pnp_driver) == 0)
+	pnp_registered = 1;
+
+    for (i = 0;
+	 i < RTCAN_PEAK_DNG_MAX_DEV && type[i] != 0;
+	 i++) {
+
+	if ((ret = rtcan_peak_dng_init_one(i)) != 0) {
+	    printk(KERN_ERR "%s: Init failed with %d\n", RTCAN_DRV_NAME, ret);
+	    goto cleanup;
+	}
+	done++;
+    }
+    if (done)
+	return 0;
+
+    printk(KERN_ERR "%s: Please specify type=epp or type=sp\n",
+	   RTCAN_DRV_NAME);
+
+cleanup:
+    rtcan_peak_dng_exit();
+    return ret;
+}
+
+module_init(rtcan_peak_dng_init);
+module_exit(rtcan_peak_dng_exit);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_ems_pci.c	2022-03-21 12:58:29.313889702 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007, 2016 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * Derived from Linux CAN SJA1000 PCI driver "ems_pci".
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "EMS-CPC-PCI-CAN"
+
+static char *ems_pci_board_name = "EMS-CPC-PCI";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for EMS CPC-PCI/PCIe/104P CAN cards");
+MODULE_LICENSE("GPL v2");
+
+#define EMS_PCI_V1_MAX_CHAN 2
+#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_MAX_CHAN    EMS_PCI_V2_MAX_CHAN
+
+struct ems_pci_card {
+	int version;
+	int channels;
+
+	struct pci_dev *pci_dev;
+	struct rtcan_device *rtcan_dev[EMS_PCI_MAX_CHAN];
+
+	void __iomem *conf_addr;
+	void __iomem *base_addr;
+};
+
+#define EMS_PCI_CAN_CLOCK (16000000 / 2)
+
+/*
+ * Register definitions and descriptions are from LinCAN 0.3.3.
+ *
+ * PSB4610 PITA-2 bridge control registers
+ */
+#define PITA2_ICR           0x00	/* Interrupt Control Register */
+#define PITA2_ICR_INT0      0x00000002	/* [RC] INT0 Active/Clear */
+#define PITA2_ICR_INT0_EN   0x00020000	/* [RW] Enable INT0 */
+
+#define PITA2_MISC          0x1c	/* Miscellaneous Register */
+#define PITA2_MISC_CONFIG   0x04000000	/* Multiplexed parallel interface */
+
+/*
+ * Register definitions for the PLX 9030
+ */
+#define PLX_ICSR            0x4c   /* Interrupt Control/Status register */
+#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
+#define PLX_ICSR_PCIINT_ENA 0x0040 /* PCI Interrupt Enable */
+#define PLX_ICSR_LINTI1_CLR 0x0400 /* Local Edge Triggerable Interrupt Clear */
+#define PLX_ICSR_ENA_CLR    (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
+			     PLX_ICSR_LINTI1_CLR)
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define EMS_PCI_OCR         (SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define EMS_PCI_CDR             (SJA_CDR_CBP | SJA_CDR_CLKOUT_MASK)
+
+#define EMS_PCI_V1_BASE_BAR     1
+#define EMS_PCI_V1_CONF_SIZE    4096 /* size of PITA control area */
+#define EMS_PCI_V2_BASE_BAR     2
+#define EMS_PCI_V2_CONF_SIZE    128 /* size of PLX control area */
+#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
+#define EMS_PCI_CAN_CTRL_SIZE   0x200 /* memory size for each controller */
+
+#define EMS_PCI_BASE_SIZE  4096 /* size of controller area */
+
+static const struct pci_device_id ems_pci_tbl[] = {
+	/* CPC-PCI v1 */
+	{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
+	/* CPC-PCI v2 */
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
+	/* CPC-104P v2 */
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
+
+/*
+ * Helper to read internal registers from card logic (not CAN)
+ */
+static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
+{
+	return readb((void __iomem *)card->base_addr + (port * 4));
+}
+
+static u8 ems_pci_v1_read_reg(struct rtcan_device *dev, int port)
+{
+	return readb((void __iomem *)dev->base_addr + (port * 4));
+}
+
+static void ems_pci_v1_write_reg(struct rtcan_device *dev,
+				 int port, u8 val)
+{
+	writeb(val, (void __iomem *)dev->base_addr + (port * 4));
+}
+
+static void ems_pci_v1_post_irq(struct rtcan_device *dev)
+{
+	struct ems_pci_card *card = (struct ems_pci_card *)dev->board_priv;
+
+	/* reset int flag of pita */
+	writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
+	       card->conf_addr + PITA2_ICR);
+}
+
+static u8 ems_pci_v2_read_reg(struct rtcan_device *dev, int port)
+{
+	return readb((void __iomem *)dev->base_addr + port);
+}
+
+static void ems_pci_v2_write_reg(struct rtcan_device *dev,
+				 int port, u8 val)
+{
+	writeb(val, (void __iomem *)dev->base_addr + port);
+}
+
+static void ems_pci_v2_post_irq(struct rtcan_device *dev)
+{
+	struct ems_pci_card *card = (struct ems_pci_card *)dev->board_priv;
+
+	writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to set 'em into the PeliCAN mode
+ */
+static inline int ems_pci_check_chan(struct rtcan_device *dev)
+{
+	struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+	unsigned char res;
+
+	/* Make sure SJA1000 is in reset mode */
+	chip->write_reg(dev, SJA_MOD, 1);
+
+	chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE);
+
+	/* read reset-values */
+	res = chip->read_reg(dev, SJA_CDR);
+
+	if (res == SJA_CDR_CAN_MODE)
+		return 1;
+
+	return 0;
+}
+
+static void ems_pci_del_card(struct pci_dev *pdev)
+{
+	struct ems_pci_card *card = pci_get_drvdata(pdev);
+	struct rtcan_device *dev;
+	int i = 0;
+
+	for (i = 0; i < card->channels; i++) {
+		dev = card->rtcan_dev[i];
+
+		if (!dev)
+			continue;
+
+		dev_info(&pdev->dev, "Removing %s.\n", dev->name);
+		rtcan_sja1000_unregister(dev);
+		rtcan_dev_free(dev);
+	}
+
+	if (card->base_addr != NULL)
+		pci_iounmap(card->pci_dev, card->base_addr);
+
+	if (card->conf_addr != NULL)
+		pci_iounmap(card->pci_dev, card->conf_addr);
+
+	kfree(card);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static void ems_pci_card_reset(struct ems_pci_card *card)
+{
+	/* Request board reset */
+	writeb(0, card->base_addr);
+}
+
+/*
+ * Probe PCI device for EMS CAN signature and register each available
+ * CAN channel to RTCAN subsystem.
+ */
+static int ems_pci_add_card(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct rtcan_sja1000 *chip;
+	struct rtcan_device *dev;
+	struct ems_pci_card *card;
+	int max_chan, conf_size, base_bar;
+	int err, i;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	/* Enabling PCI device */
+	if (pci_enable_device(pdev) < 0) {
+		dev_err(&pdev->dev, "Enabling PCI device failed\n");
+		return -ENODEV;
+	}
+
+	/* Allocating card structures to hold addresses, ... */
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (card == NULL) {
+		pci_disable_device(pdev);
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, card);
+
+	card->pci_dev = pdev;
+
+	card->channels = 0;
+
+	if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+		card->version = 2; /* CPC-PCI v2 */
+		max_chan = EMS_PCI_V2_MAX_CHAN;
+		base_bar = EMS_PCI_V2_BASE_BAR;
+		conf_size = EMS_PCI_V2_CONF_SIZE;
+	} else {
+		card->version = 1; /* CPC-PCI v1 */
+		max_chan = EMS_PCI_V1_MAX_CHAN;
+		base_bar = EMS_PCI_V1_BASE_BAR;
+		conf_size = EMS_PCI_V1_CONF_SIZE;
+	}
+
+	/* Remap configuration space and controller memory area */
+	card->conf_addr = pci_iomap(pdev, 0, conf_size);
+	if (card->conf_addr == NULL) {
+		err = -ENOMEM;
+		goto failure_cleanup;
+	}
+
+	card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
+	if (card->base_addr == NULL) {
+		err = -ENOMEM;
+		goto failure_cleanup;
+	}
+
+	if (card->version == 1) {
+		/* Configure PITA-2 parallel interface (enable MUX) */
+		writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC);
+
+		/* Check for unique EMS CAN signature */
+		if (ems_pci_v1_readb(card, 0) != 0x55 ||
+		    ems_pci_v1_readb(card, 1) != 0xAA ||
+		    ems_pci_v1_readb(card, 2) != 0x01 ||
+		    ems_pci_v1_readb(card, 3) != 0xCB ||
+		    ems_pci_v1_readb(card, 4) != 0x11) {
+			dev_err(&pdev->dev,
+				"Not EMS Dr. Thomas Wuensche interface\n");
+			err = -ENODEV;
+			goto failure_cleanup;
+		}
+	}
+
+	ems_pci_card_reset(card);
+
+	for (i = 0; i < max_chan; i++) {
+		dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), 0);
+		if (!dev) {
+			err = -ENOMEM;
+			goto failure_cleanup;
+		}
+
+		strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+		dev->board_name = ems_pci_board_name;
+		dev->board_priv = card;
+
+		card->rtcan_dev[i] = dev;
+		chip = card->rtcan_dev[i]->priv;
+		chip->irq_flags = RTDM_IRQTYPE_SHARED;
+		chip->irq_num = pdev->irq;
+
+		dev->base_addr = (unsigned long)card->base_addr +
+			EMS_PCI_CAN_BASE_OFFSET + (i * EMS_PCI_CAN_CTRL_SIZE);
+		if (card->version == 1) {
+			chip->read_reg  = ems_pci_v1_read_reg;
+			chip->write_reg = ems_pci_v1_write_reg;
+			chip->irq_ack = ems_pci_v1_post_irq;
+		} else {
+			chip->read_reg  = ems_pci_v2_read_reg;
+			chip->write_reg = ems_pci_v2_write_reg;
+			chip->irq_ack = ems_pci_v2_post_irq;
+		}
+
+		/* Check if channel is present */
+		if (ems_pci_check_chan(dev)) {
+			dev->can_sys_clock = EMS_PCI_CAN_CLOCK;
+			chip->ocr = EMS_PCI_OCR | SJA_OCR_MODE_NORMAL;
+			chip->cdr = EMS_PCI_CDR | SJA_CDR_CAN_MODE;
+
+			if (card->version == 1)
+				/* reset int flag of pita */
+				writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
+				       card->conf_addr + PITA2_ICR);
+			else
+				/* enable IRQ in PLX 9030 */
+				writel(PLX_ICSR_ENA_CLR,
+				       card->conf_addr + PLX_ICSR);
+
+			/* Register SJA1000 device */
+			err = rtcan_sja1000_register(dev);
+			if (err) {
+				dev_err(&pdev->dev, "Registering device failed "
+					"(err=%d)\n", err);
+				rtcan_dev_free(dev);
+				goto failure_cleanup;
+			}
+
+			card->channels++;
+
+			dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+				 "registered as %s\n", i + 1,
+				 (void* __iomem)dev->base_addr, chip->irq_num,
+				 dev->name);
+		} else {
+			dev_err(&pdev->dev, "Channel #%d not detected\n",
+				i + 1);
+			rtcan_dev_free(dev);
+		}
+	}
+
+	if (!card->channels) {
+		err = -ENODEV;
+		goto failure_cleanup;
+	}
+
+	return 0;
+
+failure_cleanup:
+	dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+	ems_pci_del_card(pdev);
+
+	return err;
+}
+
+static struct pci_driver ems_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = ems_pci_tbl,
+	.probe = ems_pci_add_card,
+	.remove = ems_pci_del_card,
+};
+
+module_pci_driver(ems_pci_driver);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h	2022-03-21 12:58:29.308889751 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_mem.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                        <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Based on drivers/can/sja1000.h in linux-can.patch, a CAN socket
+ * framework for Linux:
+ *
+ * Copyright (C) 2005, Sascha Hauer, Pengutronix
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SJA1000_REGS_H_
+#define __SJA1000_REGS_H_
+
+
+/* PeliCAN mode address map */
+
+/* reset and operating mode */
+#define SJA_MOD          0       /* Mode register                   */
+#define SJA_CMR          1       /* Command register                */
+#define SJA_SR           2       /* Status register                 */
+#define SJA_IR           3       /* Interrupt register              */
+#define SJA_IER          4       /* Interrupt enable register       */
+#define SJA_BTR0         6       /* Bus timing register 0           */
+#define SJA_BTR1         7       /* Bus timing register 1           */
+#define SJA_OCR          8       /* Output control register         */
+#define SJA_ALC         11       /* Arbitration lost capture        */
+#define SJA_ECC         12       /* Error code capture register     */
+#define SJA_RXERR       14       /* Receive error counter           */
+#define SJA_TXERR       15       /* Transmit error counter          */
+#define SJA_CDR         31       /* Clock divider register          */
+
+/* reset mode */
+#define SJA_ACR0        16       /* Acceptance code register 0      */
+#define SJA_ACR1        17       /* Acceptance code register 1      */
+#define SJA_ACR2        18       /* Acceptance code register 2      */
+#define SJA_ACR3        19       /* Acceptance code register 3      */
+#define SJA_AMR0        20       /* Acceptance mask register 0      */
+#define SJA_AMR1        21       /* Acceptance mask register 1      */
+#define SJA_AMR2        22       /* Acceptance mask register 2      */
+#define SJA_AMR3        23       /* Acceptance mask register 3      */
+
+/* operating mode */
+#define SJA_FIR         16       /* Frame information register      */
+#define SJA_ID1         17       /* Identifier 1                    */
+#define SJA_ID2         18       /* Identifier 2                    */
+#define SJA_ID3         19       /* Identifier 3 (EFF only)         */
+#define SJA_ID4         20       /* Identifier 4 (EFF only)         */
+
+#define SJA_DATA_SFF(x) (19 + (x)) /* Data registers in case of standard
+				    * frame format; 0 <= x <= 7 */
+#define SJA_DATA_EFF(x) (21 + (x)) /* Data registers in case of extended
+				    * frame format; 0 <= x <= 7 */
+
+/* Mode register */
+enum SJA1000_PELI_MOD {
+    SJA_MOD_RM           = 1,    /* Reset Mode                          */
+    SJA_MOD_LOM          = 1<<1, /* Listen Only Mode                    */
+    SJA_MOD_STM          = 1<<2, /* Self Test Mode                      */
+    SJA_MOD_AFM          = 1<<3, /* Acceptance Filter Mode              */
+    SJA_MOD_SM           = 1<<4  /* Sleep Mode                          */
+};
+
+/* Command register */
+enum SJA1000_PELI_CMR {
+    SJA_CMR_TR  = 1,             /* Transmission request                */
+    SJA_CMR_AT  = 1<<1,          /* Abort Transmission                  */
+    SJA_CMR_RRB = 1<<2,          /* Release Receive Buffer              */
+    SJA_CMR_CDO = 1<<3,          /* Clear Data Overrun                  */
+    SJA_CMR_SRR = 1<<4           /* Self reception request              */
+};
+
+/* Status register */
+enum SJA1000_PELI_SR {
+    SJA_SR_RBS           = 1,    /* Receive Buffer Status               */
+    SJA_SR_DOS           = 1<<1, /* Data Overrun Status                 */
+    SJA_SR_TBS           = 1<<2, /* Transmit Buffer Status              */
+    SJA_SR_ES            = 1<<6, /* Error Status                        */
+    SJA_SR_BS            = 1<<7  /* Bus Status                          */
+};
+
+/* Interrupt register */
+enum SJA1000_PELI_IR {
+    SJA_IR_RI           = 1,     /* Receive Interrupt                   */
+    SJA_IR_TI           = 1<<1,  /* Transmit Interrupt                  */
+    SJA_IR_EI           = 1<<2,  /* Error Warning Interrupt             */
+    SJA_IR_DOI          = 1<<3,  /* Data Overrun Interrupt              */
+    SJA_IR_WUI          = 1<<4,  /* Wake-Up Interrupt                   */
+    SJA_IR_EPI          = 1<<5,  /* Error Passive Interrupt             */
+    SJA_IR_ALI          = 1<<6,  /* Arbitration Lost Interrupt          */
+    SJA_IR_BEI          = 1<<7,  /* Bus Error Interrupt                 */
+};
+
+/* Interrupt enable register */
+enum SJA1000_PELI_IER {
+    SJA_IER_RIE         = 1,     /* Receive Interrupt Enable            */
+    SJA_IER_TIE         = 1<<1,  /* Transmit Interrupt Enable           */
+    SJA_IER_EIE         = 1<<2,  /* Error Warning Interrupt Enable      */
+    SJA_IER_DOIE        = 1<<3,  /* Data Overrun Interrupt Enable       */
+    SJA_IER_WUIE        = 1<<4,  /* Wake-Up Interrupt Enable            */
+    SJA_IER_EPIE        = 1<<5,  /* Error Passive Interrupt Enable      */
+    SJA_IER_ALIE        = 1<<6,  /* Arbitration Lost Interrupt Enable   */
+    SJA_IER_BEIE        = 1<<7,  /* Bus Error Interrupt Enable          */
+};
+
+/* Bus timing register 0 */
+enum SJA1000_PELI_BTR0 {
+    /* Period of the CAN system clock t_SCl
+     * (t_CLK = time period of XTAL frequency) */
+    SJA_BTR0_T_SCL_2_T_CLK  = 0,    /* t_SCl = 2 x t_CLK                 */
+    SJA_BTR0_T_SCL_4_T_CLK  = 1,    /* t_SCl = 4 x t_CLK                 */
+    SJA_BTR0_T_SCL_6_T_CLK  = 2,    /* t_SCl = 6 x t_CLK                 */
+    SJA_BTR0_T_SCL_8_T_CLK  = 3,    /* t_SCl = 8 x t_CLK                 */
+    SJA_BTR0_T_SCL_10_T_CLK = 4,    /* t_SCl = 10 x t_CLK                */
+    SJA_BTR0_T_SCL_12_T_CLK = 5,    /* t_SCl = 12 x t_CLK                */
+    SJA_BTR0_T_SCL_14_T_CLK = 6,    /* t_SCl = 14 x t_CLK                */
+    SJA_BTR0_T_SCL_16_T_CLK = 7,    /* t_SCl = 16 x t_CLK                */
+    SJA_BTR0_T_SCL_20_T_CLK = 9,    /* t_SCl = 20 x t_CLK                */
+    SJA_BTR0_T_SCL_40_T_CLK = 19,   /* t_SCl = 40 x t_CLK                */
+    SJA_BTR0_T_SCL_100_T_CLK = 49,  /* t_SCl = 100 x t_CLK               */
+
+};
+
+/* Bus timing register 1 */
+enum SJA1000_PELI_BTR1 {
+    /* Time segment 1 */
+    SJA_BTR1_T_SEG1_1_T_SCL = 0,    /* t_SEG1 = 1 x t_SCl               */
+    SJA_BTR1_T_SEG1_2_T_SCL = 1,    /* t_SEG1 = 2 x t_SCl               */
+    SJA_BTR1_T_SEG1_3_T_SCL = 2,    /* t_SEG1 = 3 x t_SCl               */
+    SJA_BTR1_T_SEG1_4_T_SCL = 3,    /* t_SEG1 = 4 x t_SCl               */
+    SJA_BTR1_T_SEG1_5_T_SCL = 4,    /* t_SEG1 = 5 x t_SCl               */
+    SJA_BTR1_T_SEG1_6_T_SCL = 5,    /* t_SEG1 = 6 x t_SCl               */
+    SJA_BTR1_T_SEG1_7_T_SCL = 6,    /* t_SEG1 = 7 x t_SCl               */
+    SJA_BTR1_T_SEG1_8_T_SCL = 7,    /* t_SEG1 = 8 x t_SCl               */
+    /* Time segment 2 */
+    SJA_BTR1_T_SEG2_1_T_SCL = 0<<4, /* t_SEG2 = 1 x t_SCl               */
+    SJA_BTR1_T_SEG2_2_T_SCL = 1<<4, /* t_SEG2 = 2 x t_SCl               */
+    SJA_BTR1_T_SEG2_3_T_SCL = 2<<4, /* t_SEG2 = 3 x t_SCl               */
+    SJA_BTR1_T_SEG2_4_T_SCL = 3<<4, /* t_SEG2 = 4 x t_SCl               */
+    SJA_BTR1_T_SEG2_5_T_SCL = 4<<4, /* t_SEG2 = 5 x t_SCl               */
+    SJA_BTR1_T_SEG2_6_T_SCL = 5<<4, /* t_SEG2 = 6 x t_SCl               */
+    SJA_BTR1_T_SEG2_7_T_SCL = 6<<4, /* t_SEG2 = 7 x t_SCl               */
+    SJA_BTR1_T_SEG2_8_T_SCL = 7<<4, /* t_SEG2 = 8 x t_SCl               */
+};
+
+/* One bit time = t_SCl + t_SEG1 + t_SEG2 */
+
+
+/* Output control register */
+enum SJA1000_PELI_OCR {
+    SJA_OCR_MODE_BIPHASE = 0,
+    SJA_OCR_MODE_TEST    = 1,
+    SJA_OCR_MODE_NORMAL  = 2,
+    SJA_OCR_MODE_CLOCK   = 3,
+    SJA_OCR_TX0_INVERT   = 1<<2,
+    SJA_OCR_TX0_PULLDOWN = 1<<3,
+    SJA_OCR_TX0_PULLUP   = 2<<3,
+    SJA_OCR_TX0_PUSHPULL = 3<<3,
+    SJA_OCR_TX1_INVERT   = 1<<5,
+    SJA_OCR_TX1_PULLDOWN = 1<<6,
+    SJA_OCR_TX1_PULLUP   = 2<<6,
+    SJA_OCR_TX1_PUSHPULL = 3<<6
+};
+
+/* Error code capture register */
+enum SJA1000_PELI_ECC {
+    /* The segmentation field gives information about the location of
+     * errors on the bus */
+    SJA_ECC_SEG_MASK     = 31,   /* Segmentation field mask             */
+    SJA_ECC_DIR          = 1<<5, /* Transfer direction                  */
+    SJA_ECC_ERR_BIT      = 0<<6,
+    SJA_ECC_ERR_FORM     = 1<<6,
+    SJA_ECC_ERR_STUFF    = 2<<6,
+    SJA_ECC_ERR_MASK     = 3<<6  /* Error code mask                     */
+};
+
+/* Frame information register */
+enum SJA1000_PELI_FIR {
+    SJA_FIR_DLC_MASK     = 15,   /* Data length code mask               */
+    SJA_FIR_RTR          = 1<<6, /* Remote transmission request         */
+    SJA_FIR_EFF          = 1<<7  /* Extended frame format               */
+};
+
+/* Clock divider register */
+enum SJA1000_PELI_CDR {
+    SJA_CDR_CLKOUT_MASK  = 0x07,
+    SJA_CDR_CLK_OFF      = 1<<3, /* Clock off (CLKOUT pin)              */
+    SJA_CDR_CBP          = 1<<6, /* CAN input comparator bypass         */
+    SJA_CDR_CAN_MODE     = 1<<7  /* CAN mode: 1 = PeliCAN               */
+};
+
+#endif  /* __SJA1000_REGS_H_ */
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_mem.c	2022-03-21 12:58:29.304889790 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_sja1000.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Matthias Fuchs <matthias.fuchs@esd-electronics.com>,
+ *                    Jan Kiszka <jan.kiszka@web.de>
+ *
+ * RTCAN driver for memory mapped SJA1000 CAN controller
+ * This code has been tested on esd's CPCI405/EPPC405 PPC405 systems.
+ *
+ * This driver is derived from the rtcan-isa driver by
+ * Wolfgang Grandegger and Sebastian Smolorz.
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; eitherer version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "sja1000-mem"
+
+#define RTCAN_MEM_MAX_DEV 4
+
+static char *mem_board_name = "mem mapped";
+
+MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd-electronics.com>");
+MODULE_DESCRIPTION("RTCAN driver for memory mapped SJA1000 controller");
+MODULE_LICENSE("GPL");
+
+static u32 mem[RTCAN_MEM_MAX_DEV];
+static int irq[RTCAN_MEM_MAX_DEV];
+static u32 can_clock[RTCAN_MEM_MAX_DEV];
+static u8 ocr[RTCAN_MEM_MAX_DEV];
+static u8 cdr[RTCAN_MEM_MAX_DEV];
+
+module_param_array(mem, uint, NULL, 0444);
+module_param_array(irq, int, NULL, 0444);
+module_param_array(can_clock, uint, NULL, 0444);
+module_param_array(ocr, byte, NULL, 0444);
+module_param_array(cdr, byte, NULL, 0444);
+
+MODULE_PARM_DESC(mem, "The io-memory address");
+MODULE_PARM_DESC(irq, "The interrupt number");
+MODULE_PARM_DESC(can_clock, "External clock frequency (default 16 MHz)");
+MODULE_PARM_DESC(ocr, "Value of output control register (default 0x1a)");
+MODULE_PARM_DESC(cdr, "Value of clock divider register (default 0xc8");
+
+#define RTCAN_MEM_RANGE 0x80
+
+struct rtcan_mem
+{
+	volatile void __iomem *vmem;
+};
+
+static struct rtcan_device *rtcan_mem_devs[RTCAN_MEM_MAX_DEV];
+
+static u8 rtcan_mem_readreg(struct rtcan_device *dev, int reg)
+{
+	struct rtcan_mem *board = (struct rtcan_mem *)dev->board_priv;
+	return readb(board->vmem + reg);
+}
+
+static void rtcan_mem_writereg(struct rtcan_device *dev, int reg, u8 val)
+{
+	struct rtcan_mem *board = (struct rtcan_mem *)dev->board_priv;
+	writeb(val, board->vmem + reg);
+}
+
+int __init rtcan_mem_init_one(int idx)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_mem *board;
+	int ret;
+
+	if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+				   sizeof(struct rtcan_mem))) == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_mem *)dev->board_priv;
+
+	dev->board_name = mem_board_name;
+
+	chip->irq_num = irq[idx];
+	chip->irq_flags = RTDM_IRQTYPE_SHARED;
+	chip->read_reg = rtcan_mem_readreg;
+	chip->write_reg = rtcan_mem_writereg;
+
+	if (!request_mem_region(mem[idx], RTCAN_MEM_RANGE, RTCAN_DRV_NAME)) {
+		ret = -EBUSY;
+		goto out_dev_free;
+	}
+
+	/* ioremap io memory */
+	if (!(board->vmem = ioremap(mem[idx], RTCAN_MEM_RANGE))) {
+		ret = -EBUSY;
+		goto out_release_mem;
+	}
+
+	/* Clock frequency in Hz */
+	if (can_clock[idx])
+		dev->can_sys_clock = can_clock[idx] / 2;
+	else
+		dev->can_sys_clock = 8000000; /* 16/2 MHz */
+
+	/* Output control register */
+	if (ocr[idx])
+		chip->ocr = ocr[idx];
+	else
+		chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+	if (cdr[idx])
+		chip->cdr = cdr[idx];
+	else
+		chip->cdr = SJA_CDR_CAN_MODE | SJA_CDR_CLK_OFF | SJA_CDR_CBP;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 "
+		       "device!\n", ret);
+		goto out_iounmap;
+	}
+
+	rtcan_mem_devs[idx] = dev;
+	return 0;
+
+ out_iounmap:
+	iounmap((void *)board->vmem);
+
+ out_release_mem:
+	release_mem_region(mem[idx], RTCAN_MEM_RANGE);
+
+ out_dev_free:
+	rtcan_dev_free(dev);
+
+	return ret;
+}
+
+static void rtcan_mem_exit(void);
+
+/** Init module */
+static int __init rtcan_mem_init(void)
+{
+	int i, err;
+	int devices = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (i = 0; i < RTCAN_MEM_MAX_DEV && mem[i] != 0; i++) {
+		err = rtcan_mem_init_one(i);
+		if (err) {
+			rtcan_mem_exit();
+			return err;
+		}
+		devices++;
+	}
+	if (devices)
+		return 0;
+
+	printk(KERN_ERR "ERROR! No devices specified! "
+	       "Use mem=<port1>[,...] irq=<irq1>[,...]\n");
+	return -EINVAL;
+}
+
+
+/** Cleanup module */
+static void rtcan_mem_exit(void)
+{
+	int i;
+	struct rtcan_device *dev;
+	volatile void __iomem *vmem;
+
+	for (i = 0; i < RTCAN_MEM_MAX_DEV; i++) {
+		dev = rtcan_mem_devs[i];
+		if (!dev)
+			continue;
+		vmem = ((struct rtcan_mem *)dev->board_priv)->vmem;
+		rtcan_sja1000_unregister(dev);
+		iounmap((void *)vmem);
+		release_mem_region(mem[i], RTCAN_MEM_RANGE);
+		rtcan_dev_free(dev);
+	}
+}
+
+module_init(rtcan_mem_init);
+module_exit(rtcan_mem_exit);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_sja1000.c	2022-03-21 12:58:29.299889839 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_sja1000.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Parts of this software are based on the following:
+ *
+ * - RTAI CAN device driver for SJA1000 controllers by Jan Kiszka
+ *
+ * - linux-can.patch, a CAN socket framework for Linux,
+ *   Copyright (C) 2004, 2005, Robert Schwebel, Benedikt Spranger,
+ *   Marc Kleine-Budde, Sascha Hauer, Pengutronix
+ *
+ * - RTnet (www.rtnet.org)
+ *
+ * - serial device driver and profile included in Xenomai (RTDM),
+ *   Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+
+#include <rtcan_socket.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_list.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+
+#define BTR0_BRP_MASK	0x3f
+#define BTR0_SJW_SHIFT	6
+#define BTR0_SJW_MASK	(0x3 << BTR0_SJW_SHIFT)
+
+#define BTR1_TSEG1_MASK  0xf
+#define BTR1_TSEG2_SHIFT 4
+#define BTR1_TSEG2_MASK  (0x7 << BTR1_TSEG2_SHIFT)
+#define BTR1_SAM_SHIFT   7
+
+#define BTR0_SET_BRP(brp)     (((brp) - 1) & BTR0_BRP_MASK)
+#define BTR0_SET_SJW(sjw)     ((((sjw) - 1) << BTR0_SJW_SHIFT) & BTR0_SJW_MASK)
+
+#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
+#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & BTR1_TSEG2_MASK)
+#define BTR1_SET_SAM(sam)     (((sam) & 1) << BTR1_SAM_SHIFT)
+
+/* Value for the interrupt enable register */
+#define SJA1000_IER                 SJA_IER_RIE | SJA_IER_TIE | \
+				    SJA_IER_EIE | SJA_IER_WUIE | \
+				    SJA_IER_EPIE | SJA_IER_BEIE | \
+				    SJA_IER_ALIE | SJA_IER_DOIE
+
+static char *sja_ctrl_name = "SJA1000";
+
+#define STATE_OPERATING(state) \
+    ((state) != CAN_STATE_STOPPED && (state) != CAN_STATE_BUS_OFF)
+
+#define STATE_RESET(state) \
+    ((state) == CAN_STATE_STOPPED || (state) == CAN_STATE_BUS_OFF)
+
+
+MODULE_AUTHOR("Sebastian.Smolorz@stud.uni-hannover.de");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RT-Socket-CAN driver for SJA1000");
+
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+static struct can_bittiming_const sja1000_bittiming_const = {
+	.name = "sja1000",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+#endif
+
+static inline void rtcan_sja_rx_interrupt(struct rtcan_device *dev,
+					  struct rtcan_skb *skb)
+{
+    int i;
+    /* "Real" size of the payload */
+    u8 size;
+    /* Content of frame information register */
+    u8 fir;
+    /* Ring buffer frame within skb */
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+    struct rtcan_sja1000 *chip = dev->priv;
+
+    /* Read out frame information register */
+    fir = chip->read_reg(dev, SJA_FIR);
+
+    /* Extract data length code */
+    frame->can_dlc = fir & SJA_FIR_DLC_MASK;
+
+    /* If DLC exceeds 8 bytes adjust it to 8 (for the payload size) */
+    size = (frame->can_dlc > 8) ? 8 : frame->can_dlc;
+
+
+    if (fir & SJA_FIR_EFF) {
+	/* Extended frame */
+	frame->can_id = CAN_EFF_FLAG;
+
+	/* Read ID */
+	frame->can_id |= chip->read_reg(dev, SJA_ID1) << 21;
+	frame->can_id |= chip->read_reg(dev, SJA_ID2) << 13;
+	frame->can_id |= chip->read_reg(dev, SJA_ID3) << 5;
+	frame->can_id |= chip->read_reg(dev, SJA_ID4) >> 3;
+
+	if (!(fir & SJA_FIR_RTR)) {
+	    /* No RTR, read data bytes */
+	    for (i = 0; i < size; i++)
+		frame->data[i] = chip->read_reg(dev,
+						SJA_DATA_EFF(i));
+	}
+
+    } else {
+	/* Standard frame */
+
+	/* Read ID */
+	frame->can_id  = chip->read_reg(dev, SJA_ID1) << 3;
+	frame->can_id |= chip->read_reg(dev, SJA_ID2) >> 5;
+
+	if (!(fir & SJA_FIR_RTR)) {
+	    /* No RTR, read data bytes */
+	    for (i = 0; i < size; i++)
+		frame->data[i] = chip->read_reg(dev, SJA_DATA_SFF(i));
+	}
+    }
+
+    /* Release Receive Buffer */
+    chip->write_reg(dev, SJA_CMR, SJA_CMR_RRB);
+
+
+    /* RTR? */
+    if (fir & SJA_FIR_RTR) {
+	frame->can_id |= CAN_RTR_FLAG;
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE;
+    } else
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + size;
+
+    /* Store the interface index */
+    frame->can_ifindex = dev->ifindex;
+}
+
+
+static inline void rtcan_sja_err_interrupt(struct rtcan_device *dev,
+					   struct rtcan_sja1000 *chip,
+					   struct rtcan_skb *skb,
+					   u8 irq_source)
+{
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+    can_state_t state = dev->state;
+    u8 status, txerr, rxerr;
+
+    status = chip->read_reg(dev, SJA_SR);
+    txerr = chip->read_reg(dev, SJA_TXERR);
+    rxerr = chip->read_reg(dev, SJA_RXERR);
+
+    skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC;
+
+    frame->can_id = CAN_ERR_FLAG;
+    frame->can_dlc = CAN_ERR_DLC;
+
+    memset(&frame->data[0], 0, frame->can_dlc);
+
+    /* Data overrun interrupt? */
+    if (irq_source & SJA_IR_DOI) {
+	frame->can_id |= CAN_ERR_CRTL;
+	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+    }
+
+    /* Arbitratio lost interrupt? */
+    if (irq_source & SJA_IR_ALI) {
+	frame->can_id |= CAN_ERR_LOSTARB;
+	frame->data[0] = chip->read_reg(dev, SJA_ALC)  & 0x1f;
+    }
+
+    /* Bus error interrupt? */
+    if (irq_source & SJA_IR_BEI) {
+	u8 ecc = chip->read_reg(dev, SJA_ECC);
+
+	frame->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+	switch (ecc & SJA_ECC_ERR_MASK) {
+	case SJA_ECC_ERR_BIT:
+	    frame->data[2] |= CAN_ERR_PROT_BIT;
+	    break;
+	case SJA_ECC_ERR_FORM:
+	    frame->data[2] |= CAN_ERR_PROT_FORM;
+	    break;
+	case SJA_ECC_ERR_STUFF:
+	    frame->data[2] |= CAN_ERR_PROT_STUFF;
+	    break;
+	default:
+	    frame->data[2] |= CAN_ERR_PROT_UNSPEC;
+	    frame->data[3] = ecc & SJA_ECC_SEG_MASK;
+	    break;
+	}
+	/* Error occured during transmission? */
+	if ((ecc & SJA_ECC_DIR) == 0)
+	    frame->data[2] |= CAN_ERR_PROT_TX;
+    }
+
+    /* Error passive interrupt? */
+    if (unlikely(irq_source & SJA_IR_EPI)) {
+	if (state == CAN_STATE_BUS_WARNING) {
+	    state = CAN_STATE_BUS_PASSIVE;
+	} else {
+	    state = CAN_STATE_BUS_WARNING;
+	}
+    }
+
+    /* Error warning interrupt? */
+    if (irq_source & SJA_IR_EI) {
+
+	/* Test bus status (bus-off condition) */
+	if (status & SJA_SR_BS) {
+	    /* Bus-off */
+	    state = CAN_STATE_BUS_OFF;
+	    frame->can_id |= CAN_ERR_BUSOFF;
+	    /* Only allow error warning interrupts
+	       (otherwise an EPI would arise during bus-off
+	       recovery) */
+	    chip->write_reg(dev, SJA_IER, SJA_IER_EIE);
+	    /* Wake up waiting senders */
+	    rtdm_sem_destroy(&dev->tx_sem);
+	}
+
+	/* Test error status (error warning limit) */
+	else if (status & SJA_SR_ES)
+	    /* error warning limit reached */
+	    state = CAN_STATE_BUS_WARNING;
+
+	/* Re-entrance into error active state from bus-warn? */
+	else if (state == CAN_STATE_BUS_WARNING)
+	    state = CAN_STATE_ACTIVE;
+
+	else
+	    /* Bus-off recovery complete, enable all interrupts again */
+	    chip->write_reg(dev, SJA_IER, SJA1000_IER);
+    }
+
+    if (state != dev->state &&
+	(state == CAN_STATE_BUS_WARNING || state == CAN_STATE_BUS_PASSIVE)) {
+	frame->can_id |= CAN_ERR_PROT;
+	if (txerr > rxerr)
+	    frame->data[1] = CAN_ERR_CRTL_TX_WARNING;
+	else
+	    frame->data[1] = CAN_ERR_CRTL_RX_WARNING;
+    }
+
+    dev->state = state;
+    frame->can_ifindex = dev->ifindex;
+}
+
+static int rtcan_sja_interrupt(rtdm_irq_t *irq_handle)
+{
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *chip;
+    struct rtcan_skb skb;
+    int recv_lock_free = 1;
+    int irq_count = 0;
+    int ret = RTDM_IRQ_NONE;
+    u8 irq_source;
+
+
+    /* Get the ID of the device which registered this IRQ. */
+    dev = (struct rtcan_device *)rtdm_irq_get_arg(irq_handle, void);
+    chip = (struct rtcan_sja1000 *)dev->priv;
+
+    /* Take spinlock protecting HW register access and device structures. */
+    rtdm_lock_get(&dev->device_lock);
+
+    /* Loop as long as the device reports an event */
+    while ((irq_source = chip->read_reg(dev, SJA_IR))) {
+	ret = RTDM_IRQ_HANDLED;
+	irq_count++;
+
+	/* Now look up which interrupts appeared */
+
+	/* Wake-up interrupt? */
+	if (irq_source & SJA_IR_WUI)
+	    dev->state = dev->state_before_sleep;
+
+	/* Error Interrupt? */
+	if (irq_source & (SJA_IR_EI | SJA_IR_DOI | SJA_IR_EPI |
+			  SJA_IR_ALI | SJA_IR_BEI)) {
+
+	    /* Check error condition and fill error frame */
+	    if (!((irq_source & SJA_IR_BEI) && (chip->bus_err_on-- < 2))) {
+		rtcan_sja_err_interrupt(dev, chip, &skb, irq_source);
+
+		if (recv_lock_free) {
+		    recv_lock_free = 0;
+		    rtdm_lock_get(&rtcan_recv_list_lock);
+		    rtdm_lock_get(&rtcan_socket_lock);
+		}
+		/* Pass error frame out to the sockets */
+		rtcan_rcv(dev, &skb);
+	    }
+	}
+
+	/* Transmit Interrupt? */
+	if (irq_source & SJA_IR_TI) {
+	    /* Wake up a sender */
+	    rtdm_sem_up(&dev->tx_sem);
+
+	    if (rtcan_loopback_pending(dev)) {
+
+		if (recv_lock_free) {
+		    recv_lock_free = 0;
+		    rtdm_lock_get(&rtcan_recv_list_lock);
+		    rtdm_lock_get(&rtcan_socket_lock);
+		}
+
+		rtcan_loopback(dev);
+	    }
+	}
+
+	/* Receive Interrupt? */
+	if (irq_source & SJA_IR_RI) {
+
+	    /* Read out HW registers */
+	    rtcan_sja_rx_interrupt(dev, &skb);
+
+	    /* Take more locks. Ensure that they are taken and
+	     * released only once in the IRQ handler. */
+	    /* WARNING: Nested locks are dangerous! But they are
+	     * nested only in this routine so a deadlock should
+	     * not be possible. */
+	    if (recv_lock_free) {
+		recv_lock_free = 0;
+		rtdm_lock_get(&rtcan_recv_list_lock);
+		rtdm_lock_get(&rtcan_socket_lock);
+	    }
+
+	    /* Pass received frame out to the sockets */
+	    rtcan_rcv(dev, &skb);
+	}
+    }
+
+    if (chip->irq_ack)
+	chip->irq_ack(dev);
+
+    /* Release spinlocks */
+    if (!recv_lock_free) {
+	rtdm_lock_put(&rtcan_socket_lock);
+	rtdm_lock_put(&rtcan_recv_list_lock);
+    }
+    rtdm_lock_put(&dev->device_lock);
+
+    return ret;
+}
+
+
+
+/*
+ * Inline function to decide if controller is operating
+ *
+ * Catch the very unlikely case that setting stop mode
+ * returned without success before this call but in the
+ * meantime the controller went into reset mode.
+ */
+static inline int rtcan_sja_is_operating(struct rtcan_device *dev,
+					 can_state_t *state)
+{
+    int is_operating = STATE_OPERATING(*state);
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    if (unlikely(is_operating && chip->read_reg(dev, SJA_MOD) & SJA_MOD_RM)) {
+	*state = CAN_STATE_STOPPED;
+	is_operating = 0;
+	/* Disable the controller's interrupts */
+	chip->write_reg(dev, SJA_IER, 0x00);
+	/* Wake up waiting senders */
+	rtdm_sem_destroy(&dev->tx_sem);
+    }
+
+    return is_operating;
+}
+
+
+/*
+ * Set controller into reset mode.
+ *
+ * According to the SJA1000 specification, it is necessary to check the
+ * reset mode bit in PeliCAN mode after having set it. So we do. But if
+ * using a ISA card like the PHYTEC eNET card this should not be necessary
+ * because the CAN controller clock of this card (16 MHz) is twice as high
+ * as the ISA bus clock.
+ */
+static int rtcan_sja_mode_stop(struct rtcan_device *dev,
+			       rtdm_lockctx_t *lock_ctx)
+{
+    int ret = 0;
+    /* Max. 50 loops busy sleep. If the controller is stopped while in
+     * sleep mode 20-40 loops are needed (tested on PHYTEC eNET). */
+    int wait_loop = 50;
+    can_state_t state;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    state = dev->state;
+    /* If controller is not operating anyway, go out */
+    if (STATE_RESET(state))
+	goto out;
+
+    /* Disable the controller's interrupts */
+    chip->write_reg(dev, SJA_IER, 0x00);
+
+    /* Set reset mode bit */
+    chip->write_reg(dev, SJA_MOD, SJA_MOD_RM);
+
+    /* Read reset mode bit, multiple tests */
+    do {
+	if (chip->read_reg(dev, SJA_MOD) & SJA_MOD_RM)
+	    break;
+
+	if (lock_ctx)
+	    rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+	/* Busy sleep 1 microsecond */
+	rtdm_task_busy_sleep(1000);
+	if (lock_ctx)
+	    rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+    } while(--wait_loop);
+
+
+    if (wait_loop) {
+	/* Volatile state could have changed while we slept busy. */
+	dev->state = CAN_STATE_STOPPED;
+	/* Wake up waiting senders */
+	rtdm_sem_destroy(&dev->tx_sem);
+    } else {
+	ret = -EAGAIN;
+	/* Enable interrupts again as we did not succeed */
+	chip->write_reg(dev, SJA_IER, SJA1000_IER);
+    }
+
+ out:
+    return ret;
+}
+
+
+
+/*
+ * Set controller into operating mode.
+ *
+ * If coming from CAN_STATE_SLEEPING, the controller must wait
+ * some time to avoid bus errors. Measured on an PHYTEC eNET card,
+ * this time was 110 microseconds.
+ */
+static int rtcan_sja_mode_start(struct rtcan_device *dev,
+				rtdm_lockctx_t *lock_ctx)
+{
+    int ret = 0;
+    u8 mod_reg;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    /* We won't forget that state in the device structure is volatile and
+     * access to it will not be optimized by the compiler. So ... */
+
+    mod_reg = 0;
+    if (dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)
+	mod_reg |= SJA_MOD_LOM;
+    if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+	mod_reg |= SJA_MOD_STM;
+
+    switch (dev->state) {
+
+    case CAN_STATE_ACTIVE:
+    case CAN_STATE_BUS_WARNING:
+    case CAN_STATE_BUS_PASSIVE:
+	break;
+
+    case CAN_STATE_STOPPED:
+	/* Clear error counters */
+	chip->write_reg(dev, SJA_RXERR , 0);
+	chip->write_reg(dev, SJA_TXERR , 0);
+	/* Clear error code capture (i.e. read it) */
+	chip->read_reg(dev, SJA_ECC);
+	/* Set error active state */
+	dev->state = CAN_STATE_ACTIVE;
+	/* Set up sender "mutex" */
+	rtdm_sem_init(&dev->tx_sem, 1);
+	/* Enable interrupts */
+	chip->write_reg(dev, SJA_IER, SJA1000_IER);
+
+	/* Clear reset mode bit in SJA1000 */
+	chip->write_reg(dev, SJA_MOD, mod_reg);
+
+	break;
+
+    case CAN_STATE_SLEEPING:
+	/* Trigger Wake-up interrupt */
+	chip->write_reg(dev, SJA_MOD, mod_reg);
+
+	/* Ok, coming from sleep mode is problematic. We have to wait
+	 * for the SJA1000 to get on both feet again. */
+	rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+	rtdm_task_busy_sleep(110000);
+	rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+
+	/* Meanwhile, the Wake-up interrupt was serviced and has set the
+	 * right state. As we don't want to set it back jump out. */
+	goto out;
+
+	break;
+
+    case CAN_STATE_BUS_OFF:
+	/* Trigger bus-off recovery */
+	chip->write_reg(dev, SJA_MOD, mod_reg);
+	/* Set up sender "mutex" */
+	rtdm_sem_init(&dev->tx_sem, 1);
+	/* Set error active state */
+	dev->state = CAN_STATE_ACTIVE;
+
+	break;
+
+    default:
+	/* Never reached, but we don't want nasty compiler warnings ... */
+	break;
+    }
+
+ out:
+    return ret;
+}
+
+can_state_t rtcan_sja_get_state(struct rtcan_device *dev)
+{
+    can_state_t state = dev->state;
+    rtcan_sja_is_operating(dev, &state);
+    return state;
+}
+
+int rtcan_sja_set_mode(struct rtcan_device *dev,
+		       can_mode_t mode,
+		       rtdm_lockctx_t *lock_ctx)
+{
+    int ret = 0;
+    can_state_t state;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000*)dev->priv;
+
+    switch (mode) {
+
+    case CAN_MODE_STOP:
+	ret = rtcan_sja_mode_stop(dev, lock_ctx);
+	break;
+
+    case CAN_MODE_START:
+	ret = rtcan_sja_mode_start(dev, lock_ctx);
+	break;
+
+    case CAN_MODE_SLEEP:
+
+	state = dev->state;
+
+	/* Controller must operate, otherwise go out */
+	if (!rtcan_sja_is_operating(dev, &state)) {
+	    ret = -ENETDOWN;
+	    goto mode_sleep_out;
+	}
+
+	/* Is controller sleeping yet? If yes, go out */
+	if (state == CAN_STATE_SLEEPING)
+	    goto mode_sleep_out;
+
+	/* Remember into which state to return when we
+	 * wake up */
+	dev->state_before_sleep = state;
+
+	/* Let's take a nap. (Now I REALLY understand
+	 * the meaning of interrupts ...) */
+	state = CAN_STATE_SLEEPING;
+	chip->write_reg(dev, SJA_MOD,
+			chip->read_reg(dev, SJA_MOD) | SJA_MOD_SM);
+
+    mode_sleep_out:
+	dev->state = state;
+	break;
+
+    default:
+	ret = -EOPNOTSUPP;
+	break;
+    }
+
+    return ret;
+}
+
+int rtcan_sja_set_bit_time(struct rtcan_device *dev,
+			   struct can_bittime *bit_time,
+			   rtdm_lockctx_t *lock_ctx)
+{
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+    u8 btr0, btr1;
+
+    switch (bit_time->type) {
+    case CAN_BITTIME_BTR:
+	btr0 = bit_time->btr.btr0;
+	btr1 = bit_time->btr.btr1;
+	break;
+
+    case CAN_BITTIME_STD:
+	btr0 = (BTR0_SET_BRP(bit_time->std.brp) |
+		BTR0_SET_SJW(bit_time->std.sjw));
+	btr1 = (BTR1_SET_TSEG1(bit_time->std.prop_seg +
+			       bit_time->std.phase_seg1) |
+		BTR1_SET_TSEG2(bit_time->std.phase_seg2) |
+		BTR1_SET_SAM(bit_time->std.sam));
+
+	break;
+
+    default:
+	return -EINVAL;
+    }
+
+    printk("%s: btr0=%#x btr1=%#x\n", __func__, btr0, btr1);
+    chip->write_reg(dev, SJA_BTR0, btr0);
+    chip->write_reg(dev, SJA_BTR1, btr1);
+
+    return 0;
+}
+
+void rtcan_sja_enable_bus_err(struct rtcan_device *dev)
+{
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    if (chip->bus_err_on < 2) {
+	if (chip->bus_err_on < 1)
+	    chip->read_reg(dev, SJA_ECC);
+	chip->bus_err_on = 2;
+    }
+}
+
+/*
+ *  Start a transmission to a SJA1000 device
+ */
+static int rtcan_sja_start_xmit(struct rtcan_device *dev,
+				can_frame_t *frame)
+{
+    int             i;
+    /* "Real" size of the payload */
+    u8   size;
+    /* Content of frame information register */
+    u8   fir;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    /* Get DLC */
+    fir  = frame->can_dlc;
+
+    /* If DLC exceeds 8 bytes adjust it to 8 (for the payload) */
+    size = (fir > 8) ? 8 : fir;
+
+
+    if (frame->can_id & CAN_EFF_FLAG) {
+	/* Send extended frame */
+	fir |= SJA_FIR_EFF;
+
+	/* Write ID */
+	chip->write_reg(dev, SJA_ID1, frame->can_id >> 21);
+	chip->write_reg(dev, SJA_ID2, frame->can_id >> 13);
+	chip->write_reg(dev, SJA_ID3, frame->can_id >> 5);
+	chip->write_reg(dev, SJA_ID4, frame->can_id << 3);
+
+	/* RTR? */
+	if (frame->can_id & CAN_RTR_FLAG)
+	    fir |= SJA_FIR_RTR;
+
+	else {
+	    /* No RTR, write data bytes */
+	    for (i = 0; i < size; i++)
+		chip->write_reg(dev, SJA_DATA_EFF(i),
+				frame->data[i]);
+	}
+
+    } else {
+	/* Send standard frame */
+
+	/* Write ID */
+	chip->write_reg(dev, SJA_ID1, frame->can_id >> 3);
+	chip->write_reg(dev, SJA_ID2, frame->can_id << 5);
+
+	/* RTR? */
+	if (frame->can_id & CAN_RTR_FLAG)
+	    fir |= SJA_FIR_RTR;
+
+	else {
+	    /* No RTR, write data bytes */
+	    for (i = 0; i < size; i++)
+		chip->write_reg(dev, SJA_DATA_SFF(i),
+				frame->data[i]);
+	}
+    }
+
+
+    /* Write frame information register */
+    chip->write_reg(dev, SJA_FIR, fir);
+
+    /* Push the 'send' button */
+    if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+	chip->write_reg(dev, SJA_CMR, SJA_CMR_SRR);
+    else
+	chip->write_reg(dev, SJA_CMR, SJA_CMR_TR);
+
+    return 0;
+}
+
+
+
+/*
+ *  SJA1000 chip configuration
+ */
+static void sja1000_chip_config(struct rtcan_device *dev)
+{
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000* )dev->priv;
+
+    chip->write_reg(dev, SJA_CDR, chip->cdr);
+    chip->write_reg(dev, SJA_OCR, chip->ocr);
+
+    chip->write_reg(dev, SJA_AMR0, 0xFF);
+    chip->write_reg(dev, SJA_AMR1, 0xFF);
+    chip->write_reg(dev, SJA_AMR2, 0xFF);
+    chip->write_reg(dev, SJA_AMR3, 0xFF);
+}
+
+
+int rtcan_sja1000_register(struct rtcan_device *dev)
+{
+    int                         ret;
+    struct rtcan_sja1000 *chip = dev->priv;
+
+    if (chip == NULL)
+	return -EINVAL;
+
+    /* Set dummy state for following call */
+    dev->state = CAN_STATE_ACTIVE;
+    /* Enter reset mode */
+    rtcan_sja_mode_stop(dev, NULL);
+
+    if ((chip->read_reg(dev, SJA_SR) &
+	 (SJA_SR_RBS | SJA_SR_DOS | SJA_SR_TBS)) != SJA_SR_TBS) {
+	printk("ERROR! No SJA1000 device found!\n");
+	return -ENODEV;
+    }
+
+    dev->ctrl_name = sja_ctrl_name;
+
+    dev->hard_start_xmit = rtcan_sja_start_xmit;
+    dev->do_set_mode = rtcan_sja_set_mode;
+    dev->do_get_state = rtcan_sja_get_state;
+    dev->do_set_bit_time = rtcan_sja_set_bit_time;
+    dev->do_enable_bus_err = rtcan_sja_enable_bus_err;
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+    dev->bittiming_const = &sja1000_bittiming_const;
+#endif
+
+    chip->bus_err_on = 1;
+
+    ret = rtdm_irq_request(&dev->irq_handle,
+			   chip->irq_num, rtcan_sja_interrupt,
+			   chip->irq_flags, sja_ctrl_name, dev);
+    if (ret) {
+	printk(KERN_ERR "ERROR %d: IRQ %d is %s!\n",
+	       ret, chip->irq_num, ret == -EBUSY ?
+	       "busy, check shared interrupt support" : "invalid");
+	return ret;
+    }
+
+    sja1000_chip_config(dev);
+
+    /* Register RTDM device */
+    ret = rtcan_dev_register(dev);
+    if (ret) {
+	    printk(KERN_ERR
+		   "ERROR %d while trying to register RTCAN device!\n", ret);
+	goto out_irq_free;
+    }
+
+    rtcan_sja_create_proc(dev);
+
+    return 0;
+
+ out_irq_free:
+    rtdm_irq_free(&dev->irq_handle);
+
+    return ret;
+}
+
+
+/* Cleanup module */
+void rtcan_sja1000_unregister(struct rtcan_device *dev)
+{
+    printk("Unregistering SJA1000 device %s\n", dev->name);
+
+    rtdm_irq_disable(&dev->irq_handle);
+    rtcan_sja_mode_stop(dev, NULL);
+    rtdm_irq_free(&dev->irq_handle);
+    rtcan_sja_remove_proc(dev);
+    rtcan_dev_unregister(dev);
+}
+
+int __init rtcan_sja_init(void)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	printk("RTCAN SJA1000 driver initialized\n");
+	return 0;
+}
+
+
+void __exit rtcan_sja_exit(void)
+{
+	printk("%s removed\n", sja_ctrl_name);
+}
+
+module_init(rtcan_sja_init);
+module_exit(rtcan_sja_exit);
+
+EXPORT_SYMBOL_GPL(rtcan_sja1000_register);
+EXPORT_SYMBOL_GPL(rtcan_sja1000_unregister);
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_sja1000.h	2022-03-21 12:58:29.295889878 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006, Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SJA1000_H_
+#define __SJA1000_H_
+
+#include <rtcan_dev.h>
+
+struct rtcan_sja1000 {
+    unsigned char (*read_reg)(struct rtcan_device *dev, int off);
+    void (*write_reg)(struct rtcan_device *dev, int off, unsigned char val);
+    void (*irq_ack)(struct rtcan_device *dev);
+    unsigned short irq_num;
+    unsigned short irq_flags;
+    unsigned char ocr;
+    unsigned char cdr;
+    char bus_err_on;
+};
+
+#ifdef CONFIG_FS_PROCFS
+int rtcan_sja_create_proc(struct rtcan_device* dev);
+void rtcan_sja_remove_proc(struct rtcan_device* dev);
+#else
+static inline int rtcan_sja_create_proc(struct rtcan_device* dev)
+{ return 0; }
+static inline void rtcan_sja_remove_proc(struct rtcan_device* dev) { }
+#endif
+int rtcan_sja1000_register(struct rtcan_device *dev);
+void rtcan_sja1000_unregister(struct rtcan_device *dev);
+
+
+#endif  /* __SJA1000_H_ */
+++ linux-patched/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c	2022-03-21 12:58:29.290889927 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_raw_dev.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtcan_dev.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+
+static int rtcan_sja_proc_regs(struct seq_file *p, void *data)
+{
+    struct rtcan_device *dev = (struct rtcan_device *)data;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+    int i;
+
+    seq_printf(p, "SJA1000 registers");
+    for (i = 0; i < 0x20; i++) {
+	if ((i % 0x10) == 0)
+	    seq_printf(p, "\n%02x:", i);
+	seq_printf(p, " %02x", chip->read_reg(dev, i));
+    }
+    seq_printf(p, "\n");
+    return 0;
+}
+
+static int rtcan_sja_proc_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_sja_proc_regs, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_sja_proc_regs_ops,
+			rtcan_sja_proc_regs_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int rtcan_sja_create_proc(struct rtcan_device* dev)
+{
+    if (!dev->proc_root)
+	return -EINVAL;
+
+    proc_create_data("registers", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root,
+		     &rtcan_sja_proc_regs_ops, dev);
+    return 0;
+}
+
+void rtcan_sja_remove_proc(struct rtcan_device* dev)
+{
+    if (!dev->proc_root)
+	return;
+
+    remove_proc_entry("registers", dev->proc_root);
+}
+
+#else /* !CONFIG_XENO_DRIVERS_CAN_DEBUG */
+
+void rtcan_sja_remove_proc(struct rtcan_device* dev)
+{
+}
+
+int rtcan_sja_create_proc(struct rtcan_device* dev)
+{
+    return 0;
+}
+#endif	/* CONFIG_XENO_DRIVERS_CAN_DEBUG */
+++ linux-patched/drivers/xenomai/can/rtcan_raw_dev.c	2022-03-21 12:58:29.286889966 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_socket.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger, <wg@grandegger.com>
+ * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+
+#define RTCAN_MAX_TSEG1  15
+#define RTCAN_MAX_TSEG2  7
+
+/*
+ * Calculate standard bit-time values for odd bitrates.
+ * Most parts of this code is from Arnaud Westenberg <arnaud@wanadoo.nl>
+ */
+static int rtcan_calc_bit_time(struct rtcan_device *dev,
+			       can_baudrate_t rate,
+			       struct can_bittime_std *bit_time)
+{
+    int best_error = 1000000000;
+    int error;
+    int best_tseg=0, best_brp=0, best_rate=0, brp=0;
+    int tseg=0, tseg1=0, tseg2=0;
+    int clock = dev->can_sys_clock;
+    int sjw = 0;
+    int sampl_pt = 90;
+
+    /* some heuristic specials */
+    if (rate > ((1000000 + 500000) / 2))
+	sampl_pt = 75;
+
+    if (rate < ((12500 + 10000) / 2))
+	sampl_pt = 75;
+
+    if (rate < ((100000 + 125000) / 2))
+	sjw = 1;
+
+    /* tseg even = round down, odd = round up */
+    for (tseg = (0 + 0 + 2) * 2;
+	 tseg <= (RTCAN_MAX_TSEG2 + RTCAN_MAX_TSEG1 + 2) * 2 + 1;
+	 tseg++) {
+	brp = clock / ((1 + tseg / 2) * rate) + tseg % 2;
+	if ((brp == 0) || (brp > 64))
+	    continue;
+
+	error = rate - clock / (brp * (1 + tseg / 2));
+	if (error < 0)
+	    error = -error;
+
+	if (error <= best_error) {
+	    best_error = error;
+	    best_tseg = tseg/2;
+	    best_brp = brp - 1;
+	    best_rate = clock / (brp * (1 + tseg / 2));
+	}
+    }
+
+    if (best_error && (rate / best_error < 10)) {
+	RTCAN_RTDM_DBG("%s: bitrate %d is not possible with %d Hz clock\n",
+		       dev->name, rate, clock);
+	return -EDOM;
+    }
+
+    tseg2 = best_tseg - (sampl_pt * (best_tseg + 1)) / 100;
+
+    if (tseg2 < 0)
+	tseg2 = 0;
+
+    if (tseg2 > RTCAN_MAX_TSEG2)
+	tseg2 = RTCAN_MAX_TSEG2;
+
+    tseg1 = best_tseg - tseg2 - 2;
+
+    if (tseg1 > RTCAN_MAX_TSEG1)  {
+	tseg1 = RTCAN_MAX_TSEG1;
+	tseg2 = best_tseg-tseg1-2;
+    }
+
+    bit_time->brp = best_brp + 1;
+    bit_time->prop_seg = 0;
+    bit_time->phase_seg1 = tseg1 + 1;
+    bit_time->phase_seg2 = tseg2 + 1;
+    bit_time->sjw = sjw + 1;
+    bit_time->sam = 0;
+
+    return 0;
+}
+
+#else /* !CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD */
+
+/* This is the bit-time calculation method from the Linux kernel */
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+static int can_update_spt(const struct can_bittiming_const *btc,
+			  unsigned int sampl_pt, unsigned int tseg,
+			  unsigned int *tseg1, unsigned int *tseg2)
+{
+    *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000;
+    *tseg2 = clamp(*tseg2, btc->tseg2_min, btc->tseg2_max);
+    *tseg1 = tseg - *tseg2;
+    if (*tseg1 > btc->tseg1_max) {
+	*tseg1 = btc->tseg1_max;
+	*tseg2 = tseg - *tseg1;
+    }
+
+    return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
+}
+
+static int rtcan_calc_bit_time(struct rtcan_device *dev,
+			       can_baudrate_t bitrate,
+			       struct can_bittime_std *bt)
+{
+    const struct can_bittiming_const *btc = dev->bittiming_const;
+    long rate;	/* current bitrate */
+    long rate_error;/* difference between current and target value */
+    long best_rate_error = 1000000000;
+    int spt;	/* current sample point in thousandth */
+    int spt_error;	/* difference between current and target value */
+    int best_spt_error = 1000;
+    int sampl_pt;	/* target sample point */
+    int best_tseg = 0, best_brp = 0;	/* current best values for tseg and brp */
+    unsigned int brp, tsegall, tseg, tseg1, tseg2;
+    u64 v64;
+
+    if (!dev->bittiming_const)
+	return -ENOTSUPP;
+
+    /* Use CIA recommended sample points */
+    if (bitrate > 800000)
+	sampl_pt = 750;
+    else if (bitrate > 500000)
+	sampl_pt = 800;
+    else
+	sampl_pt = 875;
+
+    /* tseg even = round down, odd = round up */
+    for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+	 tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+	tsegall = 1 + tseg / 2;
+
+	/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+	brp = dev->can_sys_clock / (tsegall * bitrate) + tseg % 2;
+
+	/* chose brp step which is possible in system */
+	brp = (brp / btc->brp_inc) * btc->brp_inc;
+	if ((brp < btc->brp_min) || (brp > btc->brp_max))
+	    continue;
+
+	rate = dev->can_sys_clock / (brp * tsegall);
+	rate_error = abs((long)(bitrate - rate));
+
+	/* tseg brp biterror */
+	if (rate_error > best_rate_error)
+	    continue;
+
+	/* reset sample point error if we have a better bitrate */
+	if (rate_error < best_rate_error)
+	    best_spt_error = 1000;
+
+	spt = can_update_spt(btc, sampl_pt, tseg / 2, &tseg1, &tseg2);
+	spt_error = abs((long)(sampl_pt - spt));
+	if (spt_error > best_spt_error)
+	    continue;
+
+	best_spt_error = spt_error;
+	best_rate_error = rate_error;
+	best_tseg = tseg / 2;
+	best_brp = brp;
+
+	if (rate_error == 0 && spt_error == 0)
+	    break;
+    }
+
+    if (best_rate_error) {
+	/* Error in one-tenth of a percent */
+	rate_error = (best_rate_error * 1000) / bitrate;
+	if (rate_error > CAN_CALC_MAX_ERROR) {
+	    rtcandev_err(dev,
+			 "bitrate error %ld.%ld%% too high\n",
+			 rate_error / 10, rate_error % 10);
+	    return -EDOM;
+	} else {
+	    rtcandev_warn(dev, "bitrate error %ld.%ld%%\n",
+			  rate_error / 10, rate_error % 10);
+	}
+    }
+
+    /* real sample point */
+    sampl_pt = can_update_spt(btc, sampl_pt, best_tseg, &tseg1, &tseg2);
+
+    v64 = (u64)best_brp * 1000000000UL;
+    do_div(v64, dev->can_sys_clock);
+    bt->prop_seg = tseg1 / 2;
+    bt->phase_seg1 = tseg1 - bt->prop_seg;
+    bt->phase_seg2 = tseg2;
+    bt->sjw = 1;
+    bt->sam = 0;
+    bt->brp = best_brp;
+
+    /* real bit-rate */
+    rate = dev->can_sys_clock / (bt->brp * (tseg1 + tseg2 + 1));
+
+    rtcandev_dbg(dev, "real bitrate %ld, sampling point %d.%d%%\n",
+		 rate, sampl_pt/10, sampl_pt%10);
+
+    return 0;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD */
+
+static inline int rtcan_raw_ioctl_dev_get(struct rtcan_device *dev,
+					  int request, struct can_ifreq *ifr)
+{
+    rtdm_lockctx_t lock_ctx;
+
+    switch (request) {
+
+    case SIOCGIFINDEX:
+	ifr->ifr_ifindex = dev->ifindex;
+	break;
+
+    case SIOCGCANSTATE:
+	rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+	if (dev->do_get_state)
+	    dev->state = dev->do_get_state(dev);
+	ifr->ifr_ifru.state = dev->state;
+	rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+	break;
+
+    case SIOCGCANCTRLMODE:
+	ifr->ifr_ifru.ctrlmode = dev->ctrl_mode;
+	break;
+
+    case SIOCGCANBAUDRATE:
+	ifr->ifr_ifru.baudrate = dev->baudrate;
+	break;
+
+    case SIOCGCANCUSTOMBITTIME:
+	ifr->ifr_ifru.bittime = dev->bit_time;
+	break;
+    }
+
+    return 0;
+}
+
+static inline int rtcan_raw_ioctl_dev_set(struct rtcan_device *dev,
+					  int request, struct can_ifreq *ifr)
+{
+    rtdm_lockctx_t lock_ctx;
+    int ret = 0, started = 0;
+    struct can_bittime bit_time, *bt;
+
+    switch (request) {
+    case SIOCSCANBAUDRATE:
+	if (!dev->do_set_bit_time)
+	    return 0;
+	ret = rtcan_calc_bit_time(dev, ifr->ifr_ifru.baudrate, &bit_time.std);
+	if (ret)
+	    break;
+	bit_time.type = CAN_BITTIME_STD;
+	break;
+    }
+
+    rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+
+    if (dev->do_get_state)
+	dev->state = dev->do_get_state(dev);
+
+    switch (request) {
+    case SIOCSCANCTRLMODE:
+    case SIOCSCANBAUDRATE:
+    case SIOCSCANCUSTOMBITTIME:
+	if ((started = CAN_STATE_OPERATING(dev->state))) {
+	    if ((ret = dev->do_set_mode(dev, CAN_MODE_STOP, &lock_ctx)))
+		goto out;
+	}
+	break;
+    }
+
+    switch (request) {
+    case SIOCSCANMODE:
+	if (dev->do_set_mode &&
+	    !(ifr->ifr_ifru.mode == CAN_MODE_START &&
+	      CAN_STATE_OPERATING(dev->state)))
+	    ret = dev->do_set_mode(dev, ifr->ifr_ifru.mode, &lock_ctx);
+	break;
+
+    case SIOCSCANCTRLMODE:
+	dev->ctrl_mode = ifr->ifr_ifru.ctrlmode;
+	break;
+
+    case SIOCSCANBAUDRATE:
+	ret = dev->do_set_bit_time(dev, &bit_time, &lock_ctx);
+	if (!ret) {
+	    dev->baudrate = ifr->ifr_ifru.baudrate;
+	    dev->bit_time = bit_time;
+	}
+	break;
+
+    case SIOCSCANCUSTOMBITTIME:
+	bt = &ifr->ifr_ifru.bittime;
+	ret = dev->do_set_bit_time(dev, bt, &lock_ctx);
+	if (!ret) {
+	    dev->bit_time = *bt;
+	    if (bt->type == CAN_BITTIME_STD && bt->std.brp)
+		dev->baudrate = (dev->can_sys_clock /
+				 (bt->std.brp * (1 + bt->std.prop_seg +
+						 bt->std.phase_seg1 +
+						 bt->std.phase_seg2)));
+	    else
+		dev->baudrate = CAN_BAUDRATE_UNKNOWN;
+	}
+	break;
+
+    default:
+	ret = -EOPNOTSUPP;
+	break;
+    }
+
+ out:
+    if (started)
+	dev->do_set_mode(dev, CAN_MODE_START, &lock_ctx);
+
+    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+
+    return ret;
+}
+
+int rtcan_raw_ioctl_dev(struct rtdm_fd *fd, int request, void *arg)
+{
+    struct can_ifreq *ifr;
+    int ret = 0, get = 0;
+    union {
+	    /*
+	     * We need to deal with callers still passing struct ifreq
+	     * instead of can_ifreq, which might have a larger memory
+	     * footprint (but can't be smaller though). Field offsets
+	     * will be the same regardless.
+	     */
+	    struct ifreq ifr_legacy;
+	    struct can_ifreq ifr_can;
+    } ifr_buf;
+    struct rtcan_device *dev;
+
+    switch (request) {
+
+    case SIOCGIFINDEX:
+    case SIOCGCANSTATE:
+    case SIOCGCANBAUDRATE:
+    case SIOCGCANCUSTOMBITTIME:
+	    get = 1;
+	    fallthrough;
+    case SIOCSCANMODE:
+    case SIOCSCANCTRLMODE:
+    case SIOCSCANBAUDRATE:
+    case SIOCSCANCUSTOMBITTIME:
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy struct can_ifreq from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(struct can_ifreq)) ||
+		rtdm_copy_from_user(fd, &ifr_buf, arg,
+				    sizeof(struct can_ifreq)))
+		return -EFAULT;
+
+	    ifr = &ifr_buf.ifr_can;
+	} else
+	    ifr = (struct can_ifreq *)arg;
+
+	/* Get interface index and data */
+	dev = rtcan_dev_get_by_name(ifr->ifr_name);
+	if (dev == NULL)
+	    return -ENODEV;
+
+	if (get) {
+		ret = rtcan_raw_ioctl_dev_get(dev, request, ifr);
+		rtcan_dev_dereference(dev);
+		if (ret == 0 && rtdm_fd_is_user(fd)) {
+		    /*
+		     * Since we yet tested if user memory is rw safe,
+		     * we can copy to user space directly.
+		     */
+		    if (rtdm_copy_to_user(fd, arg, ifr,
+					  sizeof(struct can_ifreq)))
+			    return -EFAULT;
+		}
+	} else {
+		ret = rtcan_raw_ioctl_dev_set(dev, request, ifr);
+		rtcan_dev_dereference(dev);
+	}
+	break;
+
+    default:
+	ret = -EOPNOTSUPP;
+	break;
+
+    }
+
+    return ret;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR
+void __rtcan_raw_enable_bus_err(struct rtcan_socket *sock)
+{
+    int i, begin, end;
+    struct rtcan_device *dev;
+    rtdm_lockctx_t lock_ctx;
+    int ifindex = atomic_read(&sock->ifindex);
+
+    if (ifindex) {
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    for (i = begin; i <= end; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+
+	if (dev->do_enable_bus_err) {
+	    rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+	    dev->do_enable_bus_err(dev);
+	    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+	}
+	rtcan_dev_dereference(dev);
+    }
+}
+#endif /* CONFIG_XENO_DRIVERS_CAN_BUS_ERR*/
+++ linux-patched/drivers/xenomai/can/rtcan_socket.c	2022-03-21 12:58:29.282890005 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Based on stack/socket.c - sockets implementation for RTnet
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "rtcan_socket.h"
+#include "rtcan_list.h"
+
+
+LIST_HEAD(rtcan_socket_list);
+
+void rtcan_socket_init(struct rtdm_fd *fd)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    rtdm_lockctx_t lock_ctx;
+
+
+    rtdm_sem_init(&sock->recv_sem, 0);
+
+    sock->recv_head = 0;
+    sock->recv_tail = 0;
+    atomic_set(&sock->ifindex, 0);
+    sock->flistlen = RTCAN_SOCK_UNBOUND;
+    sock->flist = NULL;
+    sock->err_mask = 0;
+    sock->rx_buf_full = 0;
+    sock->flags = 0;
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+    sock->loopback = 1;
+#endif
+
+    sock->tx_timeout = RTDM_TIMEOUT_INFINITE;
+    sock->rx_timeout = RTDM_TIMEOUT_INFINITE;
+
+    INIT_LIST_HEAD(&sock->tx_wait_head);
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+    list_add(&sock->socket_list, &rtcan_socket_list);
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+}
+
+
+void rtcan_socket_cleanup(struct rtdm_fd *fd)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct tx_wait_queue *tx_waiting;
+    rtdm_lockctx_t lock_ctx;
+    int tx_list_empty;
+
+    /* Wake up sleeping senders. This is re-entrant-safe. */
+    do {
+	cobalt_atomic_enter(lock_ctx);
+	/* Is someone there? */
+	if (list_empty(&sock->tx_wait_head))
+		tx_list_empty = 1;
+	else {
+		tx_list_empty = 0;
+
+		/* Get next entry pointing to a waiting task */
+		tx_waiting = list_entry(sock->tx_wait_head.next,
+					struct tx_wait_queue, tx_wait_list);
+
+		/* Remove it from list */
+		list_del_init(&tx_waiting->tx_wait_list);
+
+		/* Wake task up (atomic section is left implicitly) */
+		rtdm_task_unblock(tx_waiting->rt_task);
+	}
+	cobalt_atomic_leave(lock_ctx);
+    } while (!tx_list_empty);
+
+    rtdm_sem_destroy(&sock->recv_sem);
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+    if (sock->socket_list.next) {
+	list_del(&sock->socket_list);
+	sock->socket_list.next = NULL;
+    }
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+}
+++ linux-patched/drivers/xenomai/can/Kconfig	2022-03-21 12:58:29.277890054 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c	1970-01-01 01:00:00.000000000 +0100
+menu "CAN drivers"
+
+config XENO_DRIVERS_CAN
+	tristate "RT-Socket-CAN, CAN raw socket interface"
+	help
+	RT-Socket-CAN is a real-time socket interface for CAN controllers.
+
+config XENO_DRIVERS_CAN_DEBUG
+	depends on XENO_DRIVERS_CAN && PROC_FS
+	bool "Enable debug output"
+	default y
+	help
+
+	This option activates debugging checks and enhanced output for the
+	RT-Socket-CAN driver. It also allows to list the hardware registers
+	of the registered CAN controllers. It is a recommended option for
+	getting started and analysing potential problems. For production
+	purposes, it should be switched off (for the sake of latency).
+
+config XENO_DRIVERS_CAN_LOOPBACK
+	depends on XENO_DRIVERS_CAN
+	bool "Enable TX loopback to local sockets"
+	default n
+	help
+
+	This options adds support for TX loopback to local sockets. Normally,
+	messages sent to the CAN bus are not visible to sockets listening to
+	the same local device. When this option is enabled, TX messages are
+	looped back locally when the transmit has been done by default. This
+	behaviour can be deactivated or reactivated with "setsockopt". Enable
+	this option, if you want to have a "net-alike" behaviour.
+
+config XENO_DRIVERS_CAN_RXBUF_SIZE
+	depends on XENO_DRIVERS_CAN
+	int "Size of receive ring buffers (must be 2^N)"
+	default 1024
+
+config XENO_DRIVERS_CAN_MAX_DEVICES
+	depends on XENO_DRIVERS_CAN
+	int "Maximum number of devices"
+	default 4
+
+config XENO_DRIVERS_CAN_MAX_RECEIVERS
+	depends on XENO_DRIVERS_CAN
+	int "Maximum number of receive filters per device"
+	default 16
+	help
+
+	The driver maintains a receive filter list per device for fast access.
+
+config XENO_DRIVERS_CAN_BUS_ERR
+	depends on XENO_DRIVERS_CAN
+	bool
+	default n
+	help
+
+	To avoid unnecessary bus error interrupt flooding, this option enables
+	bus error interrupts when an application is calling a receive function
+	on a socket listening on bus errors. After one bus error has occured,
+	the interrupt will be disabled to allow the application time for error
+	processing. This option is automatically selected for CAN controllers
+	supporting bus error interrupts like the SJA1000.
+
+config XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	depends on XENO_DRIVERS_CAN
+	bool "Old bit-time calculation algorithm (deprecated)"
+	default n
+	help
+
+	This option allows to enable the old algorithm to calculate the
+	CAN bit-timing parameters for backward compatibility.
+
+config XENO_DRIVERS_CAN_VIRT
+	depends on XENO_DRIVERS_CAN
+	tristate "Virtual CAN bus driver"
+	help
+
+	This driver provides two CAN ports that are virtually interconnected.
+	More ports can be enabled with the module parameter "devices".
+
+config XENO_DRIVERS_CAN_FLEXCAN
+	depends on XENO_DRIVERS_CAN && OF && !XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	tristate "Freescale FLEXCAN based chips"
+	help
+
+	Say Y here if you want to support for Freescale FlexCAN.
+
+source "drivers/xenomai/can/mscan/Kconfig"
+source "drivers/xenomai/can/peak_canfd/Kconfig"
+source "drivers/xenomai/can/sja1000/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c	2022-03-21 12:58:29.273890092 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/mscan/rtcan_mscan_proc.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * CAN bus driver for the Freescale MPC5xxx embedded CPU.
+ *
+ * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
+ *                         Varma Electronics Oy
+ * Copyright (C) 2008-2010 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/mpc52xx.h>
+
+#include "rtcan_dev.h"
+#include "rtcan_mscan_regs.h"
+#include "rtcan_mscan.h"
+
+#define of_device platform_device
+#define of_platform_driver platform_driver
+#define of_register_platform_driver platform_driver_register
+#define of_unregister_platform_driver platform_driver_unregister
+
+static char mscan_ctrl_name_mpc5200[] = "MSCAN-MPC5200";
+static char mscan_ctrl_name_mpc512x[] = "MSCAN-MPC512x";
+static char mscan_board_name[] = "unkown";
+
+struct mpc5xxx_can_data {
+	unsigned int type;
+	u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+			 int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+static struct of_device_id mpc52xx_cdm_ids[] = {
+	{ .compatible = "fsl,mpc5200-cdm", },
+	{}
+};
+
+static u32 mpc52xx_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	unsigned int pvr;
+	struct mpc52xx_cdm  __iomem *cdm;
+	struct device_node *np_cdm;
+	unsigned int freq;
+	u32 val;
+
+	pvr = mfspr(SPRN_PVR);
+
+	/*
+	 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+	 * (IP_CLK) can be selected as MSCAN clock source. According to
+	 * the MPC5200 user's manual, the oscillator clock is the better
+	 * choice as it has less jitter. For this reason, it is selected
+	 * by default. Unfortunately, it can not be selected for the old
+	 * MPC5200 Rev. A chips due to a hardware bug (check errata).
+	 */
+	if (clock_name && strcmp(clock_name, "ip") == 0)
+		*mscan_clksrc = MSCAN_CLKSRC_BUS;
+	else
+		*mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+	freq = mpc5xxx_get_bus_frequency(mpc5xxx_get_of_node(ofdev));
+	if (!freq)
+		return 0;
+
+	if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+		return freq;
+
+	/* Determine SYS_XTAL_IN frequency from the clock domain settings */
+	np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
+	if (!np_cdm) {
+		dev_err(&ofdev->dev, "can't get clock node!\n");
+		return 0;
+	}
+	cdm = of_iomap(np_cdm, 0);
+
+	if (in_8(&cdm->ipb_clk_sel) & 0x1)
+		freq *= 2;
+	val = in_be32(&cdm->rstcfg);
+
+	freq *= (val & (1 << 5)) ? 8 : 4;
+	freq /= (val & (1 << 6)) ? 12 : 16;
+
+	of_node_put(np_cdm);
+	iounmap(cdm);
+
+	return freq;
+}
+#else /* !CONFIG_PPC_MPC5200 */
+static u32 mpc52xx_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+	u32 spmr;		/* System PLL Mode Reg */
+	u32 sccr[2];		/* System Clk Ctrl Reg 1 & 2 */
+	u32 scfr1;		/* System Clk Freq Reg 1 */
+	u32 scfr2;		/* System Clk Freq Reg 2 */
+	u32 reserved;
+	u32 bcr;		/* Bread Crumb Reg */
+	u32 pccr[12];		/* PSC Clk Ctrl Reg 0-11 */
+	u32 spccr;		/* SPDIF Clk Ctrl Reg */
+	u32 cccr;		/* CFM Clk Ctrl Reg */
+	u32 dccr;		/* DIU Clk Cnfg Reg */
+	u32 mccr[4];		/* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id mpc512x_clock_ids[] = {
+	{ .compatible = "fsl,mpc5121-clock", },
+	{}
+};
+
+static u32 mpc512x_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	struct mpc512x_clockctl __iomem *clockctl;
+	struct device_node *np_clock;
+	struct clk *sys_clk, *ref_clk;
+	int plen, clockidx, clocksrc = -1;
+	u32 sys_freq, val, clockdiv = 1, freq = 0;
+	const u32 *pval;
+
+	np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+	if (!np_clock) {
+		dev_err(&ofdev->dev, "couldn't find clock node\n");
+		return -ENODEV;
+	}
+	clockctl = of_iomap(np_clock, 0);
+	if (!clockctl) {
+		dev_err(&ofdev->dev, "couldn't map clock registers\n");
+		return 0;
+	}
+
+	/* Determine the MSCAN device index from the physical address */
+	pval = of_get_property(mpc5xxx_get_of_node(ofdev), "reg", &plen);
+	BUG_ON(!pval || plen < sizeof(*pval));
+	clockidx = (*pval & 0x80) ? 1 : 0;
+	if (*pval & 0x2000)
+		clockidx += 2;
+
+	/*
+	 * Clock source and divider selection: 3 different clock sources
+	 * can be selected: "ip", "ref" or "sys". For the latter two, a
+	 * clock divider can be defined as well. If the clock source is
+	 * not specified by the device tree, we first try to find an
+	 * optimal CAN source clock based on the system clock. If that
+	 * is not posslible, the reference clock will be used.
+	 */
+	if (clock_name && !strcmp(clock_name, "ip")) {
+		*mscan_clksrc = MSCAN_CLKSRC_IPS;
+		freq = mpc5xxx_get_bus_frequency(mpc5xxx_get_of_node(ofdev));
+	} else {
+		*mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+		pval = of_get_property(mpc5xxx_get_of_node(ofdev),
+				       "fsl,mscan-clock-divider", &plen);
+		if (pval && plen == sizeof(*pval))
+			clockdiv = *pval;
+		if (!clockdiv)
+			clockdiv = 1;
+
+		if (!clock_name || !strcmp(clock_name, "sys")) {
+			sys_clk = clk_get(&ofdev->dev, "sys_clk");
+			if (!sys_clk) {
+				dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+				goto exit_unmap;
+			}
+			/* Get and round up/down sys clock rate */
+			sys_freq = 1000000 *
+				((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+			if (!clock_name) {
+				/* A multiple of 16 MHz would be optimal */
+				if ((sys_freq % 16000000) == 0) {
+					clocksrc = 0;
+					clockdiv = sys_freq / 16000000;
+					freq = sys_freq / clockdiv;
+				}
+			} else {
+				clocksrc = 0;
+				freq = sys_freq / clockdiv;
+			}
+		}
+
+		if (clocksrc < 0) {
+			ref_clk = clk_get(&ofdev->dev, "ref_clk");
+			if (!ref_clk) {
+				dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+				goto exit_unmap;
+			}
+			clocksrc = 1;
+			freq = clk_get_rate(ref_clk) / clockdiv;
+		}
+	}
+
+	/* Disable clock */
+	out_be32(&clockctl->mccr[clockidx], 0x0);
+	if (clocksrc >= 0) {
+		/* Set source and divider */
+		val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+		out_be32(&clockctl->mccr[clockidx], val);
+		/* Enable clock */
+		out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+	}
+
+	/* Enable MSCAN clock domain */
+	val = in_be32(&clockctl->sccr[1]);
+	if (!(val & (1 << 25)))
+		out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+	dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+		*mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+		clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+	of_node_put(np_clock);
+	iounmap(clockctl);
+
+	return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 mpc512x_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
+
+static struct of_device_id mpc5xxx_can_table[];
+static int mpc5xxx_can_probe(struct of_device *ofdev)
+{
+	struct device_node *np = mpc5xxx_get_of_node(ofdev);
+	struct mpc5xxx_can_data *data;
+	struct rtcan_device *dev;
+	void __iomem *base;
+	const char *clock_name = NULL;
+	int irq, mscan_clksrc = 0;
+	int err = -ENOMEM;
+
+	const struct of_device_id *id;
+
+	id = of_match_device(mpc5xxx_can_table, &ofdev->dev);
+	if (!id)
+		return -EINVAL;
+
+	data = (struct mpc5xxx_can_data *)id->data;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		dev_err(&ofdev->dev, "couldn't ioremap\n");
+		return err;
+	}
+
+	irq = irq_of_parse_and_map(np, 0);
+	if (!irq) {
+		dev_err(&ofdev->dev, "no irq found\n");
+		err = -ENODEV;
+		goto exit_unmap_mem;
+	}
+
+	dev = rtcan_dev_alloc(0, 0);
+	if (!dev)
+		goto exit_dispose_irq;
+
+	clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+	BUG_ON(!data);
+	dev->can_sys_clock = data->get_clock(ofdev, clock_name,
+					     &mscan_clksrc);
+	if (!dev->can_sys_clock) {
+		dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
+		goto exit_free_mscan;
+	}
+
+	if (data->type == MSCAN_TYPE_MPC5121)
+		dev->ctrl_name = mscan_ctrl_name_mpc512x;
+	else
+		dev->ctrl_name = mscan_ctrl_name_mpc5200;
+	dev->board_name = mscan_board_name;
+	dev->base_addr = (unsigned long)base;
+
+	err = rtcan_mscan_register(dev, irq, mscan_clksrc);
+	if (err) {
+		dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
+			RTCAN_DRV_NAME, err);
+		goto exit_free_mscan;
+	}
+
+	dev_set_drvdata(&ofdev->dev, dev);
+
+	dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
+		 base, irq, dev->can_sys_clock);
+
+	return 0;
+
+exit_free_mscan:
+	rtcan_dev_free(dev);
+exit_dispose_irq:
+	irq_dispose_mapping(irq);
+exit_unmap_mem:
+	iounmap(base);
+
+	return err;
+}
+
+static int mpc5xxx_can_remove(struct of_device *ofdev)
+{
+	struct rtcan_device *dev = dev_get_drvdata(&ofdev->dev);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+
+	rtcan_mscan_unregister(dev);
+	iounmap((void *)dev->base_addr);
+	rtcan_dev_free(dev);
+
+	return 0;
+}
+
+static struct mpc5xxx_can_data mpc5200_can_data = {
+	.type = MSCAN_TYPE_MPC5200,
+	.get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data mpc5121_can_data = {
+	.type = MSCAN_TYPE_MPC5121,
+	.get_clock = mpc512x_can_get_clock,
+};
+
+static struct of_device_id mpc5xxx_can_table[] = {
+	{ .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+	/* Note that only MPC5121 Rev. 2 (and later) is supported */
+	{ .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
+	{},
+};
+
+static struct of_platform_driver mpc5xxx_can_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = RTCAN_DRV_NAME,
+		.of_match_table = mpc5xxx_can_table,
+	},
+	.probe = mpc5xxx_can_probe,
+	.remove = mpc5xxx_can_remove,
+};
+
+static int __init mpc5xxx_can_init(void)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	return of_register_platform_driver(&mpc5xxx_can_driver);
+}
+module_init(mpc5xxx_can_init);
+
+static void __exit mpc5xxx_can_exit(void)
+{
+	return of_unregister_platform_driver(&mpc5xxx_can_driver);
+};
+module_exit(mpc5xxx_can_exit);
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RT-Socket-CAN driver for MPC5200 and MPC521x");
+MODULE_LICENSE("GPL v2");
+++ linux-patched/drivers/xenomai/can/mscan/rtcan_mscan_proc.c	2022-03-21 12:58:29.269890132 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/mscan/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include "rtcan_dev.h"
+#include "rtcan_internal.h"
+#include "rtcan_mscan_regs.h"
+
+#define MSCAN_REG_ARGS(reg) \
+	"%-8s 0x%02x\n", #reg, (int)(in_8(&regs->reg)) & 0xff
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+
+static int rtcan_mscan_proc_regs(struct seq_file *p, void *data)
+{
+	struct rtcan_device *dev = (struct rtcan_device *)data;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+#ifdef MPC5xxx_GPIO
+	struct mpc5xxx_gpio *gpio = (struct mpc5xxx_gpio *)MPC5xxx_GPIO;
+	u32 port_config;
+#endif
+	u8 canctl0, canctl1;
+
+	seq_printf(p, "MSCAN registers at %p\n", regs);
+
+	canctl0 = in_8(&regs->canctl0);
+	seq_printf(p, "canctl0  0x%02x%s%s%s%s%s%s%s%s\n",
+		   canctl0,
+		   (canctl0 & MSCAN_RXFRM) ? " rxfrm" :"",
+		   (canctl0 & MSCAN_RXACT) ? " rxact" :"",
+		   (canctl0 & MSCAN_CSWAI) ? " cswai" :"",
+		   (canctl0 & MSCAN_SYNCH) ? " synch" :"",
+		   (canctl0 & MSCAN_TIME)  ? " time"  :"",
+		   (canctl0 & MSCAN_WUPE)  ? " wupe"  :"",
+		   (canctl0 & MSCAN_SLPRQ) ? " slprq" :"",
+		   (canctl0 & MSCAN_INITRQ)? " initrq":"" );
+	canctl1 = in_8(&regs->canctl1);
+	seq_printf(p, "canctl1  0x%02x%s%s%s%s%s%s%s\n",
+		   canctl1,
+		   (canctl1 & MSCAN_CANE)  ? " cane"  :"",
+		   (canctl1 & MSCAN_CLKSRC)? " clksrc":"",
+		   (canctl1 & MSCAN_LOOPB) ? " loopb" :"",
+		   (canctl1 & MSCAN_LISTEN)? " listen":"",
+		   (canctl1 & MSCAN_WUPM)  ? " wump"  :"",
+		   (canctl1 & MSCAN_SLPAK) ? " slpak" :"",
+		   (canctl1 & MSCAN_INITAK)? " initak":"");
+	seq_printf(p, MSCAN_REG_ARGS(canbtr0 ));
+	seq_printf(p, MSCAN_REG_ARGS(canbtr1 ));
+	seq_printf(p, MSCAN_REG_ARGS(canrflg ));
+	seq_printf(p, MSCAN_REG_ARGS(canrier ));
+	seq_printf(p, MSCAN_REG_ARGS(cantflg ));
+	seq_printf(p, MSCAN_REG_ARGS(cantier ));
+	seq_printf(p, MSCAN_REG_ARGS(cantarq ));
+	seq_printf(p, MSCAN_REG_ARGS(cantaak ));
+	seq_printf(p, MSCAN_REG_ARGS(cantbsel));
+	seq_printf(p, MSCAN_REG_ARGS(canidac ));
+	seq_printf(p, MSCAN_REG_ARGS(canrxerr));
+	seq_printf(p, MSCAN_REG_ARGS(cantxerr));
+	seq_printf(p, MSCAN_REG_ARGS(canidar0));
+	seq_printf(p, MSCAN_REG_ARGS(canidar1));
+	seq_printf(p, MSCAN_REG_ARGS(canidar2));
+	seq_printf(p, MSCAN_REG_ARGS(canidar3));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr0));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr1));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr2));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr3));
+	seq_printf(p, MSCAN_REG_ARGS(canidar4));
+	seq_printf(p, MSCAN_REG_ARGS(canidar5));
+	seq_printf(p, MSCAN_REG_ARGS(canidar6));
+	seq_printf(p, MSCAN_REG_ARGS(canidar7));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr4));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr5));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr6));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr7));
+
+#ifdef MPC5xxx_GPIO
+	seq_printf(p, "GPIO registers\n");
+	port_config = in_be32(&gpio->port_config);
+	seq_printf(p, "port_config 0x%08x %s\n", port_config,
+		   (port_config & 0x10000000 ?
+			"CAN1 on I2C1, CAN2 on TMR0/1 pins" :
+			(port_config & 0x70) == 0x10 ?
+				"CAN1/2 on PSC2 pins" :
+				"MSCAN1/2 not routed"));
+#endif
+
+	return 0;
+}
+
+static int rtcan_mscan_proc_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_mscan_proc_regs, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_mscan_proc_regs_ops,
+			rtcan_mscan_proc_regs_open,
+			single_elease,
+			seq_read,
+			NULL);
+
+int rtcan_mscan_create_proc(struct rtcan_device* dev)
+{
+	if (!dev->proc_root)
+		return -EINVAL;
+
+	proc_create_data("registers", S_IFREG | S_IRUGO | S_IWUSR,
+			 dev->proc_root, &rtcan_mscan_proc_regs_ops, dev);
+	return 0;
+}
+
+void rtcan_mscan_remove_proc(struct rtcan_device* dev)
+{
+	if (!dev->proc_root)
+		return;
+
+	remove_proc_entry("registers", dev->proc_root);
+}
+
+#else /* !CONFIG_XENO_DRIVERS_CAN_DEBUG */
+
+void rtcan_mscan_remove_proc(struct rtcan_device* dev)
+{
+}
+
+int rtcan_mscan_create_proc(struct rtcan_device* dev)
+{
+	return 0;
+}
+#endif	/* CONFIG_XENO_DRIVERS_CAN_DEBUG */
+++ linux-patched/drivers/xenomai/can/mscan/Kconfig	2022-03-21 12:58:29.265890170 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/mscan/rtcan_mscan_regs.h	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_CAN_MSCAN
+	depends on XENO_DRIVERS_CAN && (PPC_MPC52xx || PPC_MPC512x)
+	tristate "MSCAN driver for MPC52xx and MPC512x"
+	default n
+	help
+
+	This driver is for the MSCAN on the MPC5200 and MPC512x processor
+	from Freescale.
+++ linux-patched/drivers/xenomai/can/mscan/rtcan_mscan_regs.h	2022-03-21 12:58:29.261890210 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/mscan/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Based on linux-2.4.25/include/asm-ppc/mpc5xxx.h
+ * Prototypes, etc. for the Motorola MPC5xxx embedded cpu chips
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_MSCAN_REGS_H_
+#define __RTCAN_MSCAN_REGS_H_
+
+#include <linux/version.h>
+#include <linux/of_platform.h>
+#include <asm/mpc52xx.h>
+
+static inline void __iomem *mpc5xxx_gpio_find_and_map(void)
+{
+	struct device_node *ofn;
+	ofn = of_find_compatible_node(NULL, NULL, "mpc5200-gpio");
+	if (!ofn)
+		ofn = of_find_compatible_node(NULL, NULL, "fsl,mpc5200-gpio");
+	return ofn ? of_iomap(ofn, 0) : NULL;
+}
+
+#define MPC5xxx_GPIO	mpc5xxx_gpio_find_and_map()
+#define mpc5xxx_gpio	mpc52xx_gpio
+
+#define mpc5xxx_get_of_node(ofdev) (ofdev)->dev.of_node
+
+#define MSCAN_CAN1_ADDR	(MSCAN_MBAR + 0x0900) /* MSCAN Module 1 */
+#define MSCAN_CAN2_ADDR	(MSCAN_MBAR + 0x0980) /* MSCAN Module 2 */
+#define MSCAN_SIZE	0x80
+
+/* MSCAN control register 0 (CANCTL0) bits */
+#define MSCAN_RXFRM	0x80
+#define MSCAN_RXACT	0x40
+#define MSCAN_CSWAI	0x20
+#define MSCAN_SYNCH	0x10
+#define MSCAN_TIME	0x08
+#define MSCAN_WUPE	0x04
+#define MSCAN_SLPRQ	0x02
+#define MSCAN_INITRQ	0x01
+
+/* MSCAN control register 1 (CANCTL1) bits */
+#define MSCAN_CANE	0x80
+#define MSCAN_CLKSRC	0x40
+#define MSCAN_LOOPB	0x20
+#define MSCAN_LISTEN	0x10
+#define MSCAN_WUPM	0x04
+#define MSCAN_SLPAK	0x02
+#define MSCAN_INITAK	0x01
+
+/* MSCAN receiver flag register (CANRFLG) bits */
+#define MSCAN_WUPIF	0x80
+#define MSCAN_CSCIF	0x40
+#define MSCAN_RSTAT1	0x20
+#define MSCAN_RSTAT0	0x10
+#define MSCAN_TSTAT1	0x08
+#define MSCAN_TSTAT0	0x04
+#define MSCAN_OVRIF	0x02
+#define MSCAN_RXF	0x01
+
+/* MSCAN receiver interrupt enable register (CANRIER) bits */
+#define MSCAN_WUPIE	0x80
+#define MSCAN_CSCIE	0x40
+#define MSCAN_RSTATE1	0x20
+#define MSCAN_RSTATE0	0x10
+#define MSCAN_TSTATE1	0x08
+#define MSCAN_TSTATE0	0x04
+#define MSCAN_OVRIE	0x02
+#define MSCAN_RXFIE	0x01
+
+/* MSCAN transmitter flag register (CANTFLG) bits */
+#define MSCAN_TXE2	0x04
+#define MSCAN_TXE1	0x02
+#define MSCAN_TXE0	0x01
+#define MSCAN_TXE	(MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
+
+/* MSCAN transmitter interrupt enable register (CANTIER) bits */
+#define MSCAN_TXIE2	0x04
+#define MSCAN_TXIE1	0x02
+#define MSCAN_TXIE0	0x01
+#define MSCAN_TXIE	(MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
+
+/* MSCAN transmitter message abort request (CANTARQ) bits */
+#define MSCAN_ABTRQ2	0x04
+#define MSCAN_ABTRQ1	0x02
+#define MSCAN_ABTRQ0	0x01
+
+/* MSCAN transmitter message abort ack (CANTAAK) bits */
+#define MSCAN_ABTAK2	0x04
+#define MSCAN_ABTAK1	0x02
+#define MSCAN_ABTAK0	0x01
+
+/* MSCAN transmit buffer selection (CANTBSEL) bits */
+#define MSCAN_TX2	0x04
+#define MSCAN_TX1	0x02
+#define MSCAN_TX0	0x01
+
+/* MSCAN ID acceptance control register (CANIDAC) bits */
+#define MSCAN_IDAM1	0x20
+#define MSCAN_IDAM0	0x10
+#define MSCAN_IDHIT2	0x04
+#define MSCAN_IDHIT1	0x02
+#define MSCAN_IDHIT0	0x01
+
+struct mscan_msgbuf {
+	volatile u8  idr[0x8];		/* 0x00 */
+	volatile u8  dsr[0x10];		/* 0x08 */
+	volatile u8  dlr;		/* 0x18 */
+	volatile u8  tbpr;		/* 0x19 */	/* This register is not applicable for receive buffers */
+	volatile u16 rsrv1;		/* 0x1A */
+	volatile u8  tsrh;		/* 0x1C */
+	volatile u8  tsrl;		/* 0x1D */
+	volatile u16 rsrv2;		/* 0x1E */
+};
+
+struct mscan_regs {
+	volatile u8  canctl0;		/* MSCAN + 0x00 */
+	volatile u8  canctl1;		/* MSCAN + 0x01 */
+	volatile u16 rsrv1;		/* MSCAN + 0x02 */
+	volatile u8  canbtr0;		/* MSCAN + 0x04 */
+	volatile u8  canbtr1;		/* MSCAN + 0x05 */
+	volatile u16 rsrv2;		/* MSCAN + 0x06 */
+	volatile u8  canrflg;		/* MSCAN + 0x08 */
+	volatile u8  canrier;		/* MSCAN + 0x09 */
+	volatile u16 rsrv3;		/* MSCAN + 0x0A */
+	volatile u8  cantflg;		/* MSCAN + 0x0C */
+	volatile u8  cantier;		/* MSCAN + 0x0D */
+	volatile u16 rsrv4;		/* MSCAN + 0x0E */
+	volatile u8  cantarq;		/* MSCAN + 0x10 */
+	volatile u8  cantaak;		/* MSCAN + 0x11 */
+	volatile u16 rsrv5;		/* MSCAN + 0x12 */
+	volatile u8  cantbsel;		/* MSCAN + 0x14 */
+	volatile u8  canidac;		/* MSCAN + 0x15 */
+	volatile u16 rsrv6[3];		/* MSCAN + 0x16 */
+	volatile u8  canrxerr;		/* MSCAN + 0x1C */
+	volatile u8  cantxerr;		/* MSCAN + 0x1D */
+	volatile u16 rsrv7;		/* MSCAN + 0x1E */
+	volatile u8  canidar0;		/* MSCAN + 0x20 */
+	volatile u8  canidar1;		/* MSCAN + 0x21 */
+	volatile u16 rsrv8;		/* MSCAN + 0x22 */
+	volatile u8  canidar2;		/* MSCAN + 0x24 */
+	volatile u8  canidar3;		/* MSCAN + 0x25 */
+	volatile u16 rsrv9;		/* MSCAN + 0x26 */
+	volatile u8  canidmr0;		/* MSCAN + 0x28 */
+	volatile u8  canidmr1;		/* MSCAN + 0x29 */
+	volatile u16 rsrv10;		/* MSCAN + 0x2A */
+	volatile u8  canidmr2;		/* MSCAN + 0x2C */
+	volatile u8  canidmr3;		/* MSCAN + 0x2D */
+	volatile u16 rsrv11;		/* MSCAN + 0x2E */
+	volatile u8  canidar4;		/* MSCAN + 0x30 */
+	volatile u8  canidar5;		/* MSCAN + 0x31 */
+	volatile u16 rsrv12;		/* MSCAN + 0x32 */
+	volatile u8  canidar6;		/* MSCAN + 0x34 */
+	volatile u8  canidar7;		/* MSCAN + 0x35 */
+	volatile u16 rsrv13;		/* MSCAN + 0x36 */
+	volatile u8  canidmr4;		/* MSCAN + 0x38 */
+	volatile u8  canidmr5;		/* MSCAN + 0x39 */
+	volatile u16 rsrv14;		/* MSCAN + 0x3A */
+	volatile u8  canidmr6;		/* MSCAN + 0x3C */
+	volatile u8  canidmr7;		/* MSCAN + 0x3D */
+	volatile u16 rsrv15;		/* MSCAN + 0x3E */
+
+	struct mscan_msgbuf canrxfg;	/* MSCAN + 0x40 */    /* Foreground receive buffer */
+	struct mscan_msgbuf cantxfg;	/* MSCAN + 0x60 */    /* Foreground transmit buffer */
+};
+
+/* Clock source selection
+ */
+#define MSCAN_CLKSRC_BUS	0
+#define MSCAN_CLKSRC_XTAL	MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS	MSCAN_CLKSRC
+
+/* Message type access macros.
+ */
+#define MSCAN_BUF_STD_RTR	0x10
+#define MSCAN_BUF_EXT_RTR	0x01
+#define MSCAN_BUF_EXTENDED	0x08
+
+#define MSCAN_IDAM1		0x20
+/* Value for the interrupt enable register */
+#define MSCAN_RIER		(MSCAN_OVRIE |		\
+				 MSCAN_RXFIE |		\
+				 MSCAN_WUPIF |		\
+				 MSCAN_CSCIE |		\
+				 MSCAN_RSTATE0 |	\
+				 MSCAN_RSTATE1 |	\
+				 MSCAN_TSTATE0 |	\
+				 MSCAN_TSTATE1)
+
+#define BTR0_BRP_MASK		0x3f
+#define BTR0_SJW_SHIFT		6
+#define BTR0_SJW_MASK		(0x3 << BTR0_SJW_SHIFT)
+
+#define BTR1_TSEG1_MASK		0xf
+#define BTR1_TSEG2_SHIFT	4
+#define BTR1_TSEG2_MASK		(0x7 << BTR1_TSEG2_SHIFT)
+#define BTR1_SAM_SHIFT		7
+
+#define BTR0_SET_BRP(brp)	(((brp) - 1) & BTR0_BRP_MASK)
+#define BTR0_SET_SJW(sjw)	((((sjw) - 1) << BTR0_SJW_SHIFT) & \
+				 BTR0_SJW_MASK)
+
+#define BTR1_SET_TSEG1(tseg1)	(((tseg1) - 1) & BTR1_TSEG1_MASK)
+#define BTR1_SET_TSEG2(tseg2)	((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
+				 BTR1_TSEG2_MASK)
+#define BTR1_SET_SAM(sam)	(((sam) & 1) << BTR1_SAM_SHIFT)
+
+#endif /* __RTCAN_MSCAN_REGS_H_ */
+++ linux-patched/drivers/xenomai/can/mscan/Makefile	2022-03-21 12:58:29.257890249 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:3 @
--- linux/drivers/xenomai/can/mscan/rtcan_mscan.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/can -I$(srctree)/drivers/xenomai/can/mscan
+
+obj-$(CONFIG_XENO_DRIVERS_CAN_MSCAN) += xeno_can_mscan.o
+
+xeno_can_mscan-y := rtcan_mscan.o rtcan_mscan_proc.o rtcan_mscan_mpc5xxx.o
+++ linux-patched/drivers/xenomai/can/mscan/rtcan_mscan.c	2022-03-21 12:58:29.253890287 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/mscan/rtcan_mscan.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2010 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Derived from the PCAN project file driver/src/pcan_mpc5200.c:
+ *
+ * Copyright (c) 2003 Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * Copyright (c) 2005 Felix Daners, Plugit AG, felix.daners@plugit.ch
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+#include "rtcan_mscan_regs.h"
+#include "rtcan_mscan.h"
+
+#define MSCAN_SET_MODE_RETRIES	255
+
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+static struct can_bittiming_const mscan_bittiming_const = {
+	.name = "mscan",
+	.tseg1_min = 4,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+#endif
+
+/**
+ *  Reception Interrupt handler
+ *
+ *  Inline function first called within @ref rtcan_mscan_interrupt when an RX
+ *  interrupt was detected. Here the HW registers are read out and composed
+ *  to a struct rtcan_skb.
+ *
+ *  @param[out] skb  Pointer to an instance of struct rtcan_skb which will be
+ *                   filled with received CAN message
+ *  @param[in]  dev  Device ID
+ */
+static inline void rtcan_mscan_rx_interrupt(struct rtcan_device *dev,
+					    struct rtcan_skb *skb)
+{
+	int i;
+	unsigned char size;
+	struct rtcan_rb_frame *frame = &skb->rb_frame;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE;
+
+	frame->can_dlc = in_8(&regs->canrxfg.dlr) & 0x0F;
+
+	/* If DLC exceeds 8 bytes adjust it to 8 (for the payload size) */
+	size = (frame->can_dlc > 8) ? 8 : frame->can_dlc;
+
+	if (in_8(&regs->canrxfg.idr[1]) & MSCAN_BUF_EXTENDED) {
+		frame->can_id = ((in_8(&regs->canrxfg.idr[0]) << 21) |
+				 ((in_8(&regs->canrxfg.idr[1]) & 0xE0) << 13) |
+				 ((in_8(&regs->canrxfg.idr[1]) & 0x07) << 15) |
+				 (in_8(&regs->canrxfg.idr[4]) << 7) |
+				 (in_8(&regs->canrxfg.idr[5]) >> 1));
+
+		frame->can_id |= CAN_EFF_FLAG;
+
+		if ((in_8(&regs->canrxfg.idr[5]) & MSCAN_BUF_EXT_RTR)) {
+			frame->can_id |= CAN_RTR_FLAG;
+		} else {
+			for (i = 0; i < size; i++)
+				frame->data[i] =
+					in_8(&regs->canrxfg.dsr[i +
+								(i / 2) * 2]);
+			skb->rb_frame_size += size;
+		}
+
+	} else {
+		frame->can_id = ((in_8(&regs->canrxfg.idr[0]) << 3) |
+				 (in_8(&regs->canrxfg.idr[1]) >> 5));
+
+		if ((in_8(&regs->canrxfg.idr[1]) & MSCAN_BUF_STD_RTR)) {
+			frame->can_id |= CAN_RTR_FLAG;
+		} else {
+			for (i = 0; i < size; i++)
+				frame->data[i] =
+					in_8(&regs->canrxfg.dsr[i +
+								(i / 2) * 2]);
+			skb->rb_frame_size += size;
+		}
+	}
+
+
+	/* Store the interface index */
+	frame->can_ifindex = dev->ifindex;
+}
+
+static can_state_t mscan_stat_map[4] = {
+	CAN_STATE_ACTIVE,
+	CAN_STATE_BUS_WARNING,
+	CAN_STATE_BUS_PASSIVE,
+	CAN_STATE_BUS_OFF
+};
+
+static inline void rtcan_mscan_err_interrupt(struct rtcan_device *dev,
+					     struct rtcan_skb *skb,
+					     int r_status)
+{
+	u8 rstat, tstat;
+	struct rtcan_rb_frame *frame = &skb->rb_frame;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC;
+
+	frame->can_id = CAN_ERR_FLAG;
+	frame->can_dlc = CAN_ERR_DLC;
+
+	memset(&frame->data[0], 0, frame->can_dlc);
+
+	if ((r_status & MSCAN_OVRIF)) {
+		frame->can_id |= CAN_ERR_CRTL;
+		frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+	} else if ((r_status & (MSCAN_CSCIF))) {
+
+		rstat = (r_status & (MSCAN_TSTAT0 |
+				     MSCAN_TSTAT1)) >> 2 & 0x3;
+		tstat = (r_status & (MSCAN_RSTAT0 |
+				     MSCAN_RSTAT1)) >> 4 & 0x3;
+		dev->state = mscan_stat_map[max(rstat, tstat)];
+
+		switch (dev->state) {
+		case CAN_STATE_BUS_OFF:
+			/* Bus-off condition */
+			frame->can_id |= CAN_ERR_BUSOFF;
+			dev->state = CAN_STATE_BUS_OFF;
+			/* Disable receiver interrupts */
+			out_8(&regs->canrier, 0);
+			/* Wake up waiting senders */
+			rtdm_sem_destroy(&dev->tx_sem);
+			break;
+
+		case CAN_STATE_BUS_PASSIVE:
+			frame->can_id |= CAN_ERR_CRTL;
+			if (tstat > rstat)
+				frame->data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+			else
+				frame->data[1] = CAN_ERR_CRTL_RX_PASSIVE;
+			break;
+
+		case CAN_STATE_BUS_WARNING:
+			frame->can_id |= CAN_ERR_CRTL;
+			if (tstat > rstat)
+				frame->data[1] = CAN_ERR_CRTL_TX_WARNING;
+			else
+				frame->data[1] = CAN_ERR_CRTL_RX_WARNING;
+			break;
+
+		default:
+			break;
+
+		}
+	}
+	/* Store the interface index */
+	frame->can_ifindex = dev->ifindex;
+}
+
+/** Interrupt handler */
+static int rtcan_mscan_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtcan_skb skb;
+	struct rtcan_device *dev;
+	struct mscan_regs *regs;
+	u8 canrflg;
+	int recv_lock_free = 1;
+	int ret = RTDM_IRQ_NONE;
+
+
+	dev = (struct rtcan_device *)rtdm_irq_get_arg(irq_handle, void);
+	regs = (struct mscan_regs *)dev->base_addr;
+
+	rtdm_lock_get(&dev->device_lock);
+
+	canrflg = in_8(&regs->canrflg);
+
+	ret = RTDM_IRQ_HANDLED;
+
+	/* Transmit Interrupt? */
+	if ((in_8(&regs->cantier) & MSCAN_TXIE0) &&
+	    (in_8(&regs->cantflg) & MSCAN_TXE0)) {
+		out_8(&regs->cantier, 0);
+		/* Wake up a sender */
+		rtdm_sem_up(&dev->tx_sem);
+
+		if (rtcan_loopback_pending(dev)) {
+
+			if (recv_lock_free) {
+				recv_lock_free = 0;
+				rtdm_lock_get(&rtcan_recv_list_lock);
+				rtdm_lock_get(&rtcan_socket_lock);
+			}
+
+			rtcan_loopback(dev);
+		}
+	}
+
+	/* Wakeup interrupt?  */
+	if ((canrflg & MSCAN_WUPIF)) {
+		rtdm_printk("WUPIF interrupt\n");
+	}
+
+	/* Receive Interrupt? */
+	if ((canrflg & MSCAN_RXF)) {
+
+		/* Read out HW registers */
+		rtcan_mscan_rx_interrupt(dev, &skb);
+
+		/* Take more locks. Ensure that they are taken and
+		 * released only once in the IRQ handler. */
+		/* WARNING: Nested locks are dangerous! But they are
+		 * nested only in this routine so a deadlock should
+		 * not be possible. */
+		if (recv_lock_free) {
+			recv_lock_free = 0;
+			rtdm_lock_get(&rtcan_recv_list_lock);
+			rtdm_lock_get(&rtcan_socket_lock);
+		}
+
+		/* Pass received frame out to the sockets */
+		rtcan_rcv(dev, &skb);
+	}
+
+	/* Error Interrupt? */
+	if ((canrflg & (MSCAN_CSCIF | MSCAN_OVRIF))) {
+		/* Check error condition and fill error frame */
+		rtcan_mscan_err_interrupt(dev, &skb, canrflg);
+
+		if (recv_lock_free) {
+			recv_lock_free = 0;
+			rtdm_lock_get(&rtcan_recv_list_lock);
+			rtdm_lock_get(&rtcan_socket_lock);
+		}
+
+		/* Pass error frame out to the sockets */
+		rtcan_rcv(dev, &skb);
+	}
+
+	/* Acknowledge the handled interrupt within the controller.
+	 * Only do so for the receiver interrupts.
+	 */
+	if (canrflg)
+		out_8(&regs->canrflg, canrflg);
+
+	if (!recv_lock_free) {
+		rtdm_lock_put(&rtcan_socket_lock);
+		rtdm_lock_put(&rtcan_recv_list_lock);
+	}
+	rtdm_lock_put(&dev->device_lock);
+
+	return ret;
+}
+
+/**
+ *   Set controller into reset mode. Called from @ref rtcan_mscan_ioctl
+ *   (main usage), init_module and cleanup_module.
+ *
+ *   @param dev_id   Device ID
+ *   @param lock_ctx Pointer to saved IRQ context (if stored before calling
+ *                   this function). Only evaluated if @c locked is true.
+ *   @param locked   Boolean value indicating if function was called in an
+ *                   spin locked and IRQ disabled context
+ *
+ *   @return 0 on success, otherwise:
+ *   - -EAGAIN: Reset mode bit could not be verified after setting it.
+ *              See also note.
+ *
+ *   @note According to the MSCAN specification, it is necessary to check
+ *   the reset mode bit in PeliCAN mode after having set it. So we do. But if
+ *   using a ISA card like the PHYTEC eNET card this should not be necessary
+ *   because the CAN controller clock of this card (16 MHz) is twice as high
+ *   as the ISA bus clock.
+ */
+static int rtcan_mscan_mode_stop(struct rtcan_device *dev,
+				 rtdm_lockctx_t *lock_ctx)
+{
+	int ret = 0;
+	int rinit = 0;
+	can_state_t state;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+	u8 reg;
+
+	state = dev->state;
+	/* If controller is not operating anyway, go out */
+	if (!CAN_STATE_OPERATING(state))
+		goto out;
+
+	/* Switch to sleep mode */
+	setbits8(&regs->canctl0, MSCAN_SLPRQ);
+	reg = in_8(&regs->canctl1);
+	while (!(reg & MSCAN_SLPAK) &&
+	        (rinit < MSCAN_SET_MODE_RETRIES)) {
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+		/* Busy sleep 1 microsecond */
+		rtdm_task_busy_sleep(1000);
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+		rinit++;
+		reg = in_8(&regs->canctl1);
+	}
+	/*
+	 * The mscan controller will fail to enter sleep mode,
+	 * while there are irregular activities on bus, like
+	 * somebody keeps retransmitting. This behavior is
+	 * undocumented and seems to differ between mscan built
+	 * in mpc5200b and mpc5200. We proceed in that case,
+	 * since otherwise the slprq will be kept set and the
+	 * controller will get stuck. NOTE: INITRQ or CSWAI
+	 * will abort all active transmit actions, if still
+	 * any, at once.
+	 */
+	if (rinit >= MSCAN_SET_MODE_RETRIES)
+		rtdm_printk("rtcan_mscan: device failed to enter sleep mode. "
+				"We proceed anyhow.\n");
+	else
+		dev->state = CAN_STATE_SLEEPING;
+
+	rinit = 0;
+	setbits8(&regs->canctl0, MSCAN_INITRQ);
+
+	reg = in_8(&regs->canctl1);
+	while (!(reg & MSCAN_INITAK) &&
+	        (rinit < MSCAN_SET_MODE_RETRIES)) {
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+		/* Busy sleep 1 microsecond */
+		rtdm_task_busy_sleep(1000);
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+		rinit++;
+		reg = in_8(&regs->canctl1);
+	}
+	if (rinit >= MSCAN_SET_MODE_RETRIES)
+		ret = -ENODEV;
+
+	/* Volatile state could have changed while we slept busy. */
+	dev->state = CAN_STATE_STOPPED;
+	/* Wake up waiting senders */
+	rtdm_sem_destroy(&dev->tx_sem);
+
+out:
+	return ret;
+}
+
+/**
+ *   Set controller into operating mode.
+ *
+ *   Called from @ref rtcan_mscan_ioctl in spin locked and IRQ disabled
+ *   context.
+ *
+ *   @param dev_id   Device ID
+ *   @param lock_ctx Pointer to saved IRQ context (only used when coming
+ *                   from @ref CAN_STATE_SLEEPING, see also note)
+ *
+ *   @return 0 on success, otherwise:
+ *   - -EINVAL: No Baud rate set before request to set start mode
+ *
+ *   @note If coming from @c CAN_STATE_SLEEPING, the controller must wait
+ *         some time to avoid bus errors. Measured on an PHYTEC eNET card,
+ *         this time was 110 microseconds.
+ */
+static int rtcan_mscan_mode_start(struct rtcan_device *dev,
+				  rtdm_lockctx_t *lock_ctx)
+{
+	int ret = 0, retries = 0;
+	can_state_t state;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	/* We won't forget that state in the device structure is volatile and
+	 * access to it will not be optimized by the compiler. So ... */
+	state = dev->state;
+
+	switch (state) {
+	case CAN_STATE_ACTIVE:
+	case CAN_STATE_BUS_WARNING:
+	case CAN_STATE_BUS_PASSIVE:
+		break;
+
+	case CAN_STATE_SLEEPING:
+	case CAN_STATE_STOPPED:
+		/* Set error active state */
+		state = CAN_STATE_ACTIVE;
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+
+		if ((dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)) {
+			setbits8(&regs->canctl1, MSCAN_LISTEN);
+		} else {
+			clrbits8(&regs->canctl1, MSCAN_LISTEN);
+		}
+		if ((dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)) {
+			setbits8(&regs->canctl1, MSCAN_LOOPB);
+		} else {
+			clrbits8(&regs->canctl1, MSCAN_LOOPB);
+		}
+
+		/* Switch to normal mode */
+		clrbits8(&regs->canctl0, MSCAN_INITRQ);
+		clrbits8(&regs->canctl0, MSCAN_SLPRQ);
+		while ((in_8(&regs->canctl1) & MSCAN_INITAK) ||
+		       (in_8(&regs->canctl1) & MSCAN_SLPAK)) {
+			if (likely(lock_ctx != NULL))
+				rtdm_lock_put_irqrestore(&dev->device_lock,
+							 *lock_ctx);
+			/* Busy sleep 1 microsecond */
+			rtdm_task_busy_sleep(1000);
+			if (likely(lock_ctx != NULL))
+				rtdm_lock_get_irqsave(&dev->device_lock,
+						      *lock_ctx);
+			retries++;
+		}
+		/* Enable interrupts */
+		setbits8(&regs->canrier, MSCAN_RIER);
+
+		break;
+
+	case CAN_STATE_BUS_OFF:
+		/* Trigger bus-off recovery */
+		out_8(&regs->canrier, MSCAN_RIER);
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+		/* Set error active state */
+		state = CAN_STATE_ACTIVE;
+
+		break;
+
+	default:
+		/* Never reached, but we don't want nasty compiler warnings */
+		break;
+	}
+	/* Store new state in device structure (or old state) */
+	dev->state = state;
+
+	return ret;
+}
+
+static int rtcan_mscan_set_bit_time(struct rtcan_device *dev,
+				    struct can_bittime *bit_time,
+				    rtdm_lockctx_t *lock_ctx)
+{
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+	u8 btr0, btr1;
+
+	switch (bit_time->type) {
+	case CAN_BITTIME_BTR:
+		btr0 = bit_time->btr.btr0;
+		btr1 = bit_time->btr.btr1;
+		break;
+
+	case CAN_BITTIME_STD:
+		btr0 = (BTR0_SET_BRP(bit_time->std.brp) |
+			BTR0_SET_SJW(bit_time->std.sjw));
+		btr1 = (BTR1_SET_TSEG1(bit_time->std.prop_seg +
+				       bit_time->std.phase_seg1) |
+			BTR1_SET_TSEG2(bit_time->std.phase_seg2) |
+			BTR1_SET_SAM(bit_time->std.sam));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	out_8(&regs->canbtr0, btr0);
+	out_8(&regs->canbtr1, btr1);
+
+	rtdm_printk("%s: btr0=0x%02x btr1=0x%02x\n", dev->name, btr0, btr1);
+
+	return 0;
+}
+
+static int rtcan_mscan_set_mode(struct rtcan_device *dev,
+				can_mode_t mode,
+				rtdm_lockctx_t *lock_ctx)
+{
+	int ret = 0, retries = 0;
+	can_state_t state;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	switch (mode) {
+
+	case CAN_MODE_STOP:
+		ret = rtcan_mscan_mode_stop(dev, lock_ctx);
+		break;
+
+	case CAN_MODE_START:
+		ret = rtcan_mscan_mode_start(dev, lock_ctx);
+		break;
+
+	case CAN_MODE_SLEEP:
+
+		state = dev->state;
+
+		/* Controller must operate, otherwise go out */
+		if (!CAN_STATE_OPERATING(state)) {
+			ret = -ENETDOWN;
+			goto mode_sleep_out;
+		}
+
+		/* Is controller sleeping yet? If yes, go out */
+		if (state == CAN_STATE_SLEEPING)
+			goto mode_sleep_out;
+
+		/* Remember into which state to return when we
+		 * wake up */
+		dev->state_before_sleep = state;
+		state = CAN_STATE_SLEEPING;
+
+		/* Let's take a nap. (Now I REALLY understand
+		 * the meaning of interrupts ...) */
+		out_8(&regs->canrier, 0);
+		out_8(&regs->cantier, 0);
+		setbits8(&regs->canctl0,
+			 MSCAN_SLPRQ /*| MSCAN_INITRQ*/ | MSCAN_WUPE);
+		while (!(in_8(&regs->canctl1) & MSCAN_SLPAK)) {
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+			/* Busy sleep 1 microsecond */
+			rtdm_task_busy_sleep(1000);
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+			if (retries++ >= 1000)
+				break;
+		}
+		rtdm_printk("Fallen asleep after %d tries.\n", retries);
+		clrbits8(&regs->canctl0, MSCAN_INITRQ);
+		while ((in_8(&regs->canctl1) & MSCAN_INITAK)) {
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+			/* Busy sleep 1 microsecond */
+			rtdm_task_busy_sleep(1000);
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+			if (retries++ >= 1000)
+				break;
+		}
+		rtdm_printk("Back to normal after %d tries.\n", retries);
+		out_8(&regs->canrier, MSCAN_WUPIE);
+
+	mode_sleep_out:
+		dev->state = state;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+/**
+ *  Start a transmission to a MSCAN
+ *
+ *  Inline function called within @ref rtcan_mscan_sendmsg.
+ *  This is the completion of a send call when hardware access is granted.
+ *  Spinlock is taken before calling this function.
+ *
+ *  @param[in] frame  Pointer to CAN frame which is about to be sent
+ *  @param[in] dev Device ID
+ */
+static int rtcan_mscan_start_xmit(struct rtcan_device *dev, can_frame_t *frame)
+{
+	int             i, id;
+	/* "Real" size of the payload */
+	unsigned char   size;
+	/* Content of frame information register */
+	unsigned char   dlc;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	/* Is TX buffer empty? */
+	if (!(in_8(&regs->cantflg) & MSCAN_TXE0)) {
+		rtdm_printk("rtcan_mscan_start_xmit: TX buffer not empty");
+		return -EIO;
+	}
+	/* Select the buffer we've found. */
+	out_8(&regs->cantbsel, MSCAN_TXE0);
+
+	/* Get DLC and ID */
+	dlc = frame->can_dlc;
+
+	/* If DLC exceeds 8 bytes adjust it to 8 (for the payload) */
+	size = (dlc > 8) ? 8 : dlc;
+
+	id = frame->can_id;
+	if (frame->can_id & CAN_EFF_FLAG) {
+		out_8(&regs->cantxfg.idr[0], (id & 0x1fe00000) >> 21);
+		out_8(&regs->cantxfg.idr[1], ((id & 0x001c0000) >> 13) |
+		      ((id & 0x00038000) >> 15) |
+		      0x18); /* set SRR and IDE bits */
+
+		out_8(&regs->cantxfg.idr[4], (id & 0x00007f80) >> 7);
+		out_8(&regs->cantxfg.idr[5], (id & 0x0000007f) << 1);
+
+		/* RTR? */
+		if (frame->can_id & CAN_RTR_FLAG)
+			setbits8(&regs->cantxfg.idr[5], 0x1);
+		else {
+			clrbits8(&regs->cantxfg.idr[5], 0x1);
+			/* No RTR, write data bytes */
+			for (i = 0; i < size; i++)
+				out_8(&regs->cantxfg.dsr[i + (i / 2) * 2],
+				      frame->data[i]);
+		}
+
+	} else {
+		/* Send standard frame */
+
+		out_8(&regs->cantxfg.idr[0], (id & 0x000007f8) >> 3);
+		out_8(&regs->cantxfg.idr[1], (id & 0x00000007) << 5);
+
+		/* RTR? */
+		if (frame->can_id & CAN_RTR_FLAG)
+			setbits8(&regs->cantxfg.idr[1], 0x10);
+		else {
+			clrbits8(&regs->cantxfg.idr[1], 0x10);
+			/* No RTR, write data bytes */
+			for (i = 0; i < size; i++)
+				out_8(&regs->cantxfg.dsr[i + (i / 2) * 2],
+				      frame->data[i]);
+		}
+	}
+
+	out_8(&regs->cantxfg.dlr, frame->can_dlc);
+	out_8(&regs->cantxfg.tbpr, 0);	/* all messages have the same prio */
+
+	/* Trigger transmission. */
+	out_8(&regs->cantflg, MSCAN_TXE0);
+
+	/* Enable interrupt. */
+	setbits8(&regs->cantier, MSCAN_TXIE0);
+
+	return 0;
+}
+
+/**
+ *  MSCAN Chip configuration
+ *
+ *  Called during @ref init_module. Here, the configuration registers which
+ *  must be set only once are written with the right values. The controller
+ *  is left in reset mode and goes into operating mode not until the IOCTL
+ *  for starting it is triggered.
+ *
+ *  @param[in] dev Device ID of the controller to be configured
+ */
+static inline void __init mscan_chip_config(struct mscan_regs *regs,
+					    int mscan_clksrc)
+{
+	/* Choose IP bus as clock source.
+	 */
+	if (mscan_clksrc)
+		setbits8(&regs->canctl1, MSCAN_CLKSRC);
+	clrbits8(&regs->canctl1, MSCAN_LISTEN);
+
+	/* Configure MSCAN to accept all incoming messages.
+	 */
+	out_8(&regs->canidar0, 0x00);
+	out_8(&regs->canidar1, 0x00);
+	out_8(&regs->canidar2, 0x00);
+	out_8(&regs->canidar3, 0x00);
+	out_8(&regs->canidmr0, 0xFF);
+	out_8(&regs->canidmr1, 0xFF);
+	out_8(&regs->canidmr2, 0xFF);
+	out_8(&regs->canidmr3, 0xFF);
+	out_8(&regs->canidar4, 0x00);
+	out_8(&regs->canidar5, 0x00);
+	out_8(&regs->canidar6, 0x00);
+	out_8(&regs->canidar7, 0x00);
+	out_8(&regs->canidmr4, 0xFF);
+	out_8(&regs->canidmr5, 0xFF);
+	out_8(&regs->canidmr6, 0xFF);
+	out_8(&regs->canidmr7, 0xFF);
+	clrbits8(&regs->canidac, MSCAN_IDAM0 | MSCAN_IDAM1);
+}
+
+/**
+ *  MSCAN Chip registration
+ *
+ *  Called during @ref init_module.
+ *
+ *  @param[in] dev Device ID of the controller to be registered
+ *  @param[in] mscan_clksrc clock source to be used
+ */
+int rtcan_mscan_register(struct rtcan_device *dev, int irq, int mscan_clksrc)
+{
+	int ret;
+	struct mscan_regs *regs;
+
+	regs = (struct mscan_regs *)dev->base_addr;
+
+	/* Enable MSCAN module. */
+	setbits8(&regs->canctl1, MSCAN_CANE);
+	udelay(100);
+
+	/* Set dummy state for following call */
+	dev->state = CAN_STATE_ACTIVE;
+
+	/* Enter reset mode */
+	rtcan_mscan_mode_stop(dev, NULL);
+
+	/* Give device an interface name (so that programs using this driver
+	   don't need to know the device ID) */
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	dev->hard_start_xmit = rtcan_mscan_start_xmit;
+	dev->do_set_mode = rtcan_mscan_set_mode;
+	dev->do_set_bit_time = rtcan_mscan_set_bit_time;
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	dev->bittiming_const = &mscan_bittiming_const;
+#endif
+
+	/* Register IRQ handler and pass device structure as arg */
+	ret = rtdm_irq_request(&dev->irq_handle, irq, rtcan_mscan_interrupt,
+			       0, RTCAN_DRV_NAME, (void *)dev);
+	if (ret) {
+		printk("ERROR! rtdm_irq_request for IRQ %d failed\n", irq);
+		goto out_can_disable;
+	}
+
+	mscan_chip_config(regs, mscan_clksrc);
+
+	/* Register RTDM device */
+	ret = rtcan_dev_register(dev);
+	if (ret) {
+		printk(KERN_ERR
+		       "ERROR while trying to register RTCAN device!\n");
+		goto out_irq_free;
+	}
+
+	rtcan_mscan_create_proc(dev);
+
+	return 0;
+
+out_irq_free:
+	rtdm_irq_free(&dev->irq_handle);
+
+out_can_disable:
+	/* Disable MSCAN module. */
+	clrbits8(&regs->canctl1, MSCAN_CANE);
+
+	return ret;
+}
+
+/**
+ *  MSCAN Chip deregistration
+ *
+ *  Called during @ref cleanup_module
+ *
+ *  @param[in] dev Device ID of the controller to be registered
+ */
+int rtcan_mscan_unregister(struct rtcan_device *dev)
+{
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	printk("Unregistering %s device %s\n", RTCAN_DRV_NAME, dev->name);
+
+	rtcan_mscan_mode_stop(dev, NULL);
+	rtdm_irq_free(&dev->irq_handle);
+	rtcan_mscan_remove_proc(dev);
+	rtcan_dev_unregister(dev);
+
+	/* Disable MSCAN module. */
+	clrbits8(&regs->canctl1, MSCAN_CANE);
+
+	return 0;
+}
+++ linux-patched/drivers/xenomai/can/mscan/rtcan_mscan.h	2022-03-21 12:58:29.249890327 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_flexcan.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Grandegger <wg@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_MSCAN_H_
+#define __RTCAN_MSCAN_H_
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "rtcan_mscan"
+
+/* MSCAN type variants */
+enum {
+	MSCAN_TYPE_MPC5200,
+	MSCAN_TYPE_MPC5121
+};
+
+extern int rtcan_mscan_register(struct rtcan_device *dev, int irq,
+				       int mscan_clksrc);
+extern int rtcan_mscan_unregister(struct rtcan_device *dev);
+
+extern int rtcan_mscan_create_proc(struct rtcan_device* dev);
+extern void rtcan_mscan_remove_proc(struct rtcan_device* dev);
+
+#endif /* __RTCAN_MSCAN_H_ */
+++ linux-patched/drivers/xenomai/can/rtcan_flexcan.c	2022-03-21 12:58:29.245890366 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_raw_filter.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * RTDM-based FLEXCAN CAN controller driver
+ *
+ * Rebased on linux 4.14.58 flexcan driver:
+ * Copyright (c) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Original port to RTDM:
+ * Copyright (c) 2012 Wolfgang Grandegger <wg@denx.de>
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ * LICENCE:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+#include <asm/unaligned.h>
+
+#define DRV_NAME	"flexcan"
+#define DEV_NAME	"rtcan%d"
+
+#define CAN_MAX_DLC 8
+#define get_can_dlc(i)		(min_t(__u8, (i), CAN_MAX_DLC))
+
+/* 8 for RX fifo and 2 error handling */
+#define FLEXCAN_NAPI_WEIGHT		(8 + 2)
+
+/* FLEXCAN module configuration register (CANMCR) bits */
+#define FLEXCAN_MCR_MDIS		BIT(31)
+#define FLEXCAN_MCR_FRZ			BIT(30)
+#define FLEXCAN_MCR_FEN			BIT(29)
+#define FLEXCAN_MCR_HALT		BIT(28)
+#define FLEXCAN_MCR_NOT_RDY		BIT(27)
+#define FLEXCAN_MCR_WAK_MSK		BIT(26)
+#define FLEXCAN_MCR_SOFTRST		BIT(25)
+#define FLEXCAN_MCR_FRZ_ACK		BIT(24)
+#define FLEXCAN_MCR_SUPV		BIT(23)
+#define FLEXCAN_MCR_SLF_WAK		BIT(22)
+#define FLEXCAN_MCR_WRN_EN		BIT(21)
+#define FLEXCAN_MCR_LPM_ACK		BIT(20)
+#define FLEXCAN_MCR_WAK_SRC		BIT(19)
+#define FLEXCAN_MCR_DOZE		BIT(18)
+#define FLEXCAN_MCR_SRX_DIS		BIT(17)
+#define FLEXCAN_MCR_IRMQ		BIT(16)
+#define FLEXCAN_MCR_LPRIO_EN		BIT(13)
+#define FLEXCAN_MCR_AEN			BIT(12)
+/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
+#define FLEXCAN_MCR_MAXMB(x)		((x) & 0x7f)
+#define FLEXCAN_MCR_IDAM_A		(0x0 << 8)
+#define FLEXCAN_MCR_IDAM_B		(0x1 << 8)
+#define FLEXCAN_MCR_IDAM_C		(0x2 << 8)
+#define FLEXCAN_MCR_IDAM_D		(0x3 << 8)
+
+/* FLEXCAN control register (CANCTRL) bits */
+#define FLEXCAN_CTRL_PRESDIV(x)		(((x) & 0xff) << 24)
+#define FLEXCAN_CTRL_RJW(x)		(((x) & 0x03) << 22)
+#define FLEXCAN_CTRL_PSEG1(x)		(((x) & 0x07) << 19)
+#define FLEXCAN_CTRL_PSEG2(x)		(((x) & 0x07) << 16)
+#define FLEXCAN_CTRL_BOFF_MSK		BIT(15)
+#define FLEXCAN_CTRL_ERR_MSK		BIT(14)
+#define FLEXCAN_CTRL_CLK_SRC		BIT(13)
+#define FLEXCAN_CTRL_LPB		BIT(12)
+#define FLEXCAN_CTRL_TWRN_MSK		BIT(11)
+#define FLEXCAN_CTRL_RWRN_MSK		BIT(10)
+#define FLEXCAN_CTRL_SMP		BIT(7)
+#define FLEXCAN_CTRL_BOFF_REC		BIT(6)
+#define FLEXCAN_CTRL_TSYN		BIT(5)
+#define FLEXCAN_CTRL_LBUF		BIT(4)
+#define FLEXCAN_CTRL_LOM		BIT(3)
+#define FLEXCAN_CTRL_PROPSEG(x)		((x) & 0x07)
+#define FLEXCAN_CTRL_ERR_BUS		(FLEXCAN_CTRL_ERR_MSK)
+#define FLEXCAN_CTRL_ERR_STATE \
+	(FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \
+	 FLEXCAN_CTRL_BOFF_MSK)
+#define FLEXCAN_CTRL_ERR_ALL \
+	(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
+
+/* FLEXCAN control register 2 (CTRL2) bits */
+#define FLEXCAN_CTRL2_ECRWRE		BIT(29)
+#define FLEXCAN_CTRL2_WRMFRZ		BIT(28)
+#define FLEXCAN_CTRL2_RFFN(x)		(((x) & 0x0f) << 24)
+#define FLEXCAN_CTRL2_TASD(x)		(((x) & 0x1f) << 19)
+#define FLEXCAN_CTRL2_MRP		BIT(18)
+#define FLEXCAN_CTRL2_RRS		BIT(17)
+#define FLEXCAN_CTRL2_EACEN		BIT(16)
+
+/* FLEXCAN memory error control register (MECR) bits */
+#define FLEXCAN_MECR_ECRWRDIS		BIT(31)
+#define FLEXCAN_MECR_HANCEI_MSK		BIT(19)
+#define FLEXCAN_MECR_FANCEI_MSK		BIT(18)
+#define FLEXCAN_MECR_CEI_MSK		BIT(16)
+#define FLEXCAN_MECR_HAERRIE		BIT(15)
+#define FLEXCAN_MECR_FAERRIE		BIT(14)
+#define FLEXCAN_MECR_EXTERRIE		BIT(13)
+#define FLEXCAN_MECR_RERRDIS		BIT(9)
+#define FLEXCAN_MECR_ECCDIS		BIT(8)
+#define FLEXCAN_MECR_NCEFAFRZ		BIT(7)
+
+/* FLEXCAN error and status register (ESR) bits */
+#define FLEXCAN_ESR_TWRN_INT		BIT(17)
+#define FLEXCAN_ESR_RWRN_INT		BIT(16)
+#define FLEXCAN_ESR_BIT1_ERR		BIT(15)
+#define FLEXCAN_ESR_BIT0_ERR		BIT(14)
+#define FLEXCAN_ESR_ACK_ERR		BIT(13)
+#define FLEXCAN_ESR_CRC_ERR		BIT(12)
+#define FLEXCAN_ESR_FRM_ERR		BIT(11)
+#define FLEXCAN_ESR_STF_ERR		BIT(10)
+#define FLEXCAN_ESR_TX_WRN		BIT(9)
+#define FLEXCAN_ESR_RX_WRN		BIT(8)
+#define FLEXCAN_ESR_IDLE		BIT(7)
+#define FLEXCAN_ESR_TXRX		BIT(6)
+#define FLEXCAN_EST_FLT_CONF_SHIFT	(4)
+#define FLEXCAN_ESR_FLT_CONF_MASK	(0x3 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_ACTIVE	(0x0 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_PASSIVE	(0x1 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_BOFF_INT		BIT(2)
+#define FLEXCAN_ESR_ERR_INT		BIT(1)
+#define FLEXCAN_ESR_WAK_INT		BIT(0)
+#define FLEXCAN_ESR_ERR_BUS \
+	(FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \
+	 FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \
+	 FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR)
+#define FLEXCAN_ESR_ERR_STATE \
+	(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
+#define FLEXCAN_ESR_ERR_ALL \
+	(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+#define FLEXCAN_ESR_ALL_INT \
+	(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
+	 FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+/* FLEXCAN interrupt flag register (IFLAG) bits */
+/* Errata ERR005829 step7: Reserve first valid MB */
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO	8
+#define FLEXCAN_TX_MB_OFF_FIFO		9
+#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP	0
+#define FLEXCAN_TX_MB_OFF_TIMESTAMP		1
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST	(FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST	63
+#define FLEXCAN_RX_MB_TIMESTAMP_COUNT	(FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST -	\
+					 FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST + 1)
+#define FLEXCAN_IFLAG_MB(x)		BIT(x)
+#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW	BIT(7)
+#define FLEXCAN_IFLAG_RX_FIFO_WARN	BIT(6)
+#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE	BIT(5)
+
+/* FLEXCAN message buffers */
+#define FLEXCAN_MB_CODE_MASK		(0xf << 24)
+#define FLEXCAN_MB_CODE_RX_BUSY_BIT	(0x1 << 24)
+#define FLEXCAN_MB_CODE_RX_INACTIVE	(0x0 << 24)
+#define FLEXCAN_MB_CODE_RX_EMPTY	(0x4 << 24)
+#define FLEXCAN_MB_CODE_RX_FULL		(0x2 << 24)
+#define FLEXCAN_MB_CODE_RX_OVERRUN	(0x6 << 24)
+#define FLEXCAN_MB_CODE_RX_RANSWER	(0xa << 24)
+
+#define FLEXCAN_MB_CODE_TX_INACTIVE	(0x8 << 24)
+#define FLEXCAN_MB_CODE_TX_ABORT	(0x9 << 24)
+#define FLEXCAN_MB_CODE_TX_DATA		(0xc << 24)
+#define FLEXCAN_MB_CODE_TX_TANSWER	(0xe << 24)
+
+#define FLEXCAN_MB_CNT_SRR		BIT(22)
+#define FLEXCAN_MB_CNT_IDE		BIT(21)
+#define FLEXCAN_MB_CNT_RTR		BIT(20)
+#define FLEXCAN_MB_CNT_LENGTH(x)	(((x) & 0xf) << 16)
+#define FLEXCAN_MB_CNT_TIMESTAMP(x)	((x) & 0xffff)
+
+#define FLEXCAN_TIMEOUT_US		(50)
+
+/* FLEXCAN hardware feature flags
+ *
+ * Below is some version info we got:
+ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
+ *                                Filter? connected?  Passive detection  ception in MB
+ *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
+ *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
+ *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE	BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_DISABLE_RXFG	BIT(2) /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS	BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_DISABLE_MECR	BIT(4) /* Disable Memory error detection */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP	BIT(5) /* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE	BIT(6) /* No interrupt for error passive */
+
+/* Structure of the message buffer */
+struct flexcan_mb {
+	u32 can_ctrl;
+	u32 can_id;
+	u32 data[2];
+};
+
+/* Structure of the hardware registers */
+struct flexcan_regs {
+	u32 mcr;		/* 0x00 */
+	u32 ctrl;		/* 0x04 */
+	u32 timer;		/* 0x08 */
+	u32 _reserved1;		/* 0x0c */
+	u32 rxgmask;		/* 0x10 */
+	u32 rx14mask;		/* 0x14 */
+	u32 rx15mask;		/* 0x18 */
+	u32 ecr;		/* 0x1c */
+	u32 esr;		/* 0x20 */
+	u32 imask2;		/* 0x24 */
+	u32 imask1;		/* 0x28 */
+	u32 iflag2;		/* 0x2c */
+	u32 iflag1;		/* 0x30 */
+	union {			/* 0x34 */
+		u32 gfwr_mx28;	/* MX28, MX53 */
+		u32 ctrl2;	/* MX6, VF610 */
+	};
+	u32 esr2;		/* 0x38 */
+	u32 imeur;		/* 0x3c */
+	u32 lrfr;		/* 0x40 */
+	u32 crcr;		/* 0x44 */
+	u32 rxfgmask;		/* 0x48 */
+	u32 rxfir;		/* 0x4c */
+	u32 _reserved3[12];	/* 0x50 */
+	struct flexcan_mb mb[64];	/* 0x80 */
+	/* FIFO-mode:
+	 *			MB
+	 * 0x080...0x08f	0	RX message buffer
+	 * 0x090...0x0df	1-5	reserverd
+	 * 0x0e0...0x0ff	6-7	8 entry ID table
+	 *				(mx25, mx28, mx35, mx53)
+	 * 0x0e0...0x2df	6-7..37	8..128 entry ID table
+	 *				size conf'ed via ctrl2::RFFN
+	 *				(mx6, vf610)
+	 */
+	u32 _reserved4[256];	/* 0x480 */
+	u32 rximr[64];		/* 0x880 */
+	u32 _reserved5[24];	/* 0x980 */
+	u32 gfwr_mx6;		/* 0x9e0 - MX6 */
+	u32 _reserved6[63];	/* 0x9e4 */
+	u32 mecr;		/* 0xae0 */
+	u32 erriar;		/* 0xae4 */
+	u32 erridpr;		/* 0xae8 */
+	u32 errippr;		/* 0xaec */
+	u32 rerrar;		/* 0xaf0 */
+	u32 rerrdr;		/* 0xaf4 */
+	u32 rerrsynr;		/* 0xaf8 */
+	u32 errsr;		/* 0xafc */
+};
+
+struct flexcan_devtype_data {
+	u32 quirks;		/* quirks needed for different IP cores */
+};
+
+struct flexcan_timestamped_frame {
+	struct rtcan_skb skb;
+	u32 timestamp;
+	struct list_head next;
+};
+
+struct flexcan_priv {
+	unsigned int irq;
+	unsigned int mb_first;
+	unsigned int mb_last;
+	struct can_bittime bittiming;
+	struct flexcan_timestamped_frame *ts_frames;
+
+	struct flexcan_regs __iomem *regs;
+	struct flexcan_mb __iomem *tx_mb;
+	struct flexcan_mb __iomem *tx_mb_reserved;
+	u8 tx_mb_idx;
+	u32 reg_ctrl_default;
+	u32 reg_imask1_default;
+	u32 reg_imask2_default;
+
+	struct clk *clk_ipg;
+	struct clk *clk_per;
+	const struct flexcan_devtype_data *devtype_data;
+	struct regulator *reg_xceiver;
+
+	unsigned long bus_errors;
+};
+
+static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+	FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct can_bittiming_const flexcan_bittiming_const = {
+	.name = DRV_NAME,
+	.tseg1_min = 4,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 256,
+	.brp_inc = 1,
+};
+
+/* Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianness.
+ */
+#if defined(CONFIG_PPC)
+static inline u32 flexcan_read(void __iomem *addr)
+{
+	return in_be32(addr);
+}
+
+static inline void flexcan_write(u32 val, void __iomem *addr)
+{
+	out_be32(addr, val);
+}
+#else
+static inline u32 flexcan_read(void __iomem *addr)
+{
+	return readl(addr);
+}
+
+static inline void flexcan_write(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+}
+#endif
+
+static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
+
+	flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
+
+	flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+	if (!priv->reg_xceiver)
+		return 0;
+
+	return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+	if (!priv->reg_xceiver)
+		return 0;
+
+	return regulator_disable(priv->reg_xceiver);
+}
+
+static int flexcan_chip_enable(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg &= ~FLEXCAN_MCR_MDIS;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+		udelay(10);
+
+	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_disable(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg |= FLEXCAN_MCR_MDIS;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+		udelay(10);
+
+	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_freeze(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = 1000 * 1000 * 10 / dev->baudrate;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg |= FLEXCAN_MCR_HALT;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+		udelay(100);
+
+	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg &= ~FLEXCAN_MCR_HALT;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+		udelay(10);
+
+	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+		udelay(10);
+
+	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_start_xmit(struct rtcan_device *dev, struct can_frame *cf)
+{
+	const struct flexcan_priv *priv = rtcan_priv(dev);
+	u32 can_id, data, ctrl;
+
+	ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
+	if (cf->can_id & CAN_EFF_FLAG) {
+		can_id = cf->can_id & CAN_EFF_MASK;
+		ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
+	} else {
+		can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+	}
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		ctrl |= FLEXCAN_MB_CNT_RTR;
+
+	if (cf->can_dlc > CAN_MAX_DLC)
+		cf->can_dlc = CAN_MAX_DLC;
+
+	if (cf->can_dlc > 0) {
+		data = be32_to_cpup((__be32 *)&cf->data[0]);
+		flexcan_write(data, &priv->tx_mb->data[0]);
+	}
+	if (cf->can_dlc > 4) {
+		data = be32_to_cpup((__be32 *)&cf->data[4]);
+		flexcan_write(data, &priv->tx_mb->data[1]);
+	}
+
+	flexcan_write(can_id, &priv->tx_mb->can_id);
+	flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+
+	/* Errata ERR005829 step8:
+	 * Write twice INACTIVE(0x8) code to first MB.
+	 */
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb_reserved->can_ctrl);
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb_reserved->can_ctrl);
+
+	return 0;
+}
+
+static void init_err_skb(struct rtcan_skb *skb)
+{
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC;
+	cf->can_id = CAN_ERR_FLAG;
+	cf->can_dlc = CAN_ERR_DLC;
+	memset(&cf->data[0], 0, cf->can_dlc);
+}
+
+static void flexcan_irq_bus_err(struct rtcan_device *dev,
+				u32 reg_esr, struct rtcan_skb *skb)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+
+	init_err_skb(skb);
+
+	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+	if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
+		rtcandev_dbg(dev, "BIT1_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT1;
+	}
+	if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
+		rtcandev_dbg(dev, "BIT0_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT0;
+	}
+	if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
+		rtcandev_dbg(dev, "ACK_ERR irq\n");
+		cf->can_id |= CAN_ERR_ACK;
+		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+	}
+	if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
+		rtcandev_dbg(dev, "CRC_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT;
+		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+	}
+	if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
+		rtcandev_dbg(dev, "FRM_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+	}
+	if (reg_esr & FLEXCAN_ESR_STF_ERR) {
+		rtcandev_dbg(dev, "STF_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+	}
+
+	priv->bus_errors++;
+}
+
+struct berr_counter {
+	u16 txerr;
+	u16 rxerr;
+};
+
+static void flexcan_change_state(struct rtcan_device *dev,
+				 struct rtcan_rb_frame *cf,
+				 struct berr_counter *bec,
+				 can_state_t new_state)
+{
+	switch (dev->state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * from: ERROR_ACTIVE
+		 * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+		 * =>  : there was a warning int
+		 */
+		if (new_state >= CAN_STATE_ERROR_WARNING &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			rtcandev_dbg(dev, "Error Warning IRQ\n");
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (bec->txerr > bec->rxerr) ?
+				CAN_ERR_CRTL_TX_WARNING :
+				CAN_ERR_CRTL_RX_WARNING;
+		}
+		fallthrough;
+	case CAN_STATE_ERROR_WARNING:
+		/*
+		 * from: ERROR_ACTIVE, ERROR_WARNING
+		 * to  : ERROR_PASSIVE, BUS_OFF
+		 * =>  : error passive int
+		 */
+		if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			rtcandev_dbg(dev, "Error Passive IRQ\n");
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (bec->txerr > bec->rxerr) ?
+				CAN_ERR_CRTL_TX_PASSIVE :
+				CAN_ERR_CRTL_RX_PASSIVE;
+		}
+		break;
+	case CAN_STATE_BUS_OFF:
+		rtcandev_err(dev, "BUG! "
+			     "hardware recovered automatically from BUS_OFF\n");
+		break;
+	default:
+		break;
+	}
+
+	/* process state changes depending on the new state */
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		rtcandev_dbg(dev, "Error Active\n");
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+		break;
+	case CAN_STATE_BUS_OFF:
+		cf->can_id |= CAN_ERR_BUSOFF;
+		/* Wake up waiting senders */
+		rtdm_sem_destroy(&dev->tx_sem);
+		break;
+	default:
+		break;
+	}
+
+	dev->state = new_state;
+}
+
+static bool flexcan_irq_state(struct rtcan_device *dev, u32 reg_esr,
+			      struct rtcan_skb *skb)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	enum CAN_STATE new_state, rx_state, tx_state;
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+	struct berr_counter bec;
+	u32 reg;
+	int flt;
+
+	reg = flexcan_read(&regs->ecr);
+	bec.txerr = (reg >> 0) & 0xff;
+	bec.rxerr = (reg >> 8) & 0xff;
+
+	flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
+	if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
+		tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
+			CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+		rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
+			CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+		new_state = max(tx_state, rx_state);
+	} else
+		new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
+			CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
+
+	/* state hasn't changed */
+	if (likely(new_state == dev->state))
+		return false;
+
+	init_err_skb(skb);
+	
+	flexcan_change_state(dev, cf, &bec, new_state);
+
+	return true;
+}
+
+static unsigned int flexcan_mailbox_read(struct rtcan_device *dev,
+					 struct rtcan_skb *skb,
+					 u32 *timestamp, unsigned int n)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	struct flexcan_mb __iomem *mb = &regs->mb[n];
+	u32 reg_ctrl, reg_id, reg_iflag1, code;
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		do {
+			reg_ctrl = flexcan_read(&mb->can_ctrl);
+		} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
+
+		/* is this MB empty? */
+		code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
+		if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
+		    (code != FLEXCAN_MB_CODE_RX_OVERRUN))
+			return 0;
+	} else {
+		reg_iflag1 = flexcan_read(&regs->iflag1);
+		if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
+			return 0;
+
+		reg_ctrl = flexcan_read(&mb->can_ctrl);
+	}
+
+	/* increase timstamp to full 32 bit */
+	*timestamp = reg_ctrl << 16;
+
+	cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+	reg_id = flexcan_read(&mb->can_id);
+	if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
+		cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE;
+
+	if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	else
+		skb->rb_frame_size += cf->can_dlc;
+
+	put_unaligned_be32(flexcan_read(&mb->data[0]), cf->data + 0);
+	put_unaligned_be32(flexcan_read(&mb->data[1]), cf->data + 4);
+
+	cf->can_ifindex = dev->ifindex;
+
+	/* mark as read */
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		/* Clear IRQ */
+		if (n < 32)
+			flexcan_write(BIT(n), &regs->iflag1);
+		else
+			flexcan_write(BIT(n - 32), &regs->iflag2);
+	} else {
+		flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+		flexcan_read(&regs->timer);
+	}
+
+	return 1;
+}
+
+static inline bool flexcan_rx_le(struct flexcan_priv *priv, unsigned int a, unsigned int b)
+{
+	if (priv->mb_first < priv->mb_last)
+		return a <= b;
+
+	return a >= b;
+}
+
+static inline unsigned int flexcan_rx_inc(struct flexcan_priv *priv, unsigned int *val)
+{
+	if (priv->mb_first < priv->mb_last)
+		return (*val)++;
+
+	return (*val)--;
+}
+
+static int flexcan_mailbox_read_timestamp(struct rtcan_device *dev, u64 pending)
+{
+	struct flexcan_timestamped_frame *new, *pos, *tmp;
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct list_head q, *head;
+	int i, count = 0;
+
+	INIT_LIST_HEAD(&q);
+
+	for (i = priv->mb_first;
+	     flexcan_rx_le(priv, i, priv->mb_last);
+	     flexcan_rx_inc(priv, &i)) {
+		if (!(pending & BIT_ULL(i)))
+			continue;
+
+		new = priv->ts_frames + (i - priv->mb_first);
+		if (!flexcan_mailbox_read(dev, &new->skb, &new->timestamp, i))
+			break;
+
+		head = &q;
+		if (list_empty(&q))
+			goto add;
+
+		list_for_each_entry_reverse(pos, &q, next) {
+			/*
+			 * Substract two u32 and return result as int,
+			 * to keep difference steady around the u32
+			 * overflow.
+			 */
+			if (((int)(new->timestamp - pos->timestamp)) >= 0) {
+				head = &pos->next;
+				break;
+			}
+		}
+	add:
+		list_add(&new->next, head);
+		count++;
+	}
+
+	if (list_empty(&q))
+		return 0;
+
+	list_for_each_entry_safe(pos, tmp, &q, next)
+		rtcan_rcv(dev, &pos->skb);
+	
+	return count;
+}
+
+static void flexcan_mailbox_read_fifo(struct rtcan_device *dev)
+{
+	struct rtcan_skb skb;
+	u32 timestamp;
+	
+	for (;;) {
+		if (!flexcan_mailbox_read(dev, &skb, &timestamp, 0))
+			break;
+		rtcan_rcv(dev, &skb);
+	}
+}
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 iflag1, iflag2;
+
+	iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
+	iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+		~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+
+	return (u64)iflag2 << 32 | iflag1;
+}
+
+static int flexcan_do_rx(struct rtcan_device *dev, u32 reg_iflag1)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	struct rtcan_skb skb;
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	bool input = false;
+	u64 reg;
+	int ret;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		while ((reg = flexcan_read_reg_iflag_rx(priv))) {
+			input = true;
+			ret = flexcan_mailbox_read_timestamp(dev, reg);
+			if (!ret)
+				break;
+		}
+	} else {
+		if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+			flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+			init_err_skb(&skb);
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+			input = true;
+		} else  if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
+			flexcan_mailbox_read_fifo(dev);
+			input = true;
+		}
+	}
+
+	return input;
+}
+
+static int flexcan_irq(rtdm_irq_t *irq_handle)
+{
+	struct rtcan_device *dev = rtdm_irq_get_arg(irq_handle, void);
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_iflag1, reg_esr;
+	struct rtcan_skb skb;
+	int handled;
+
+	rtdm_lock_get(&dev->device_lock);
+	rtdm_lock_get(&rtcan_recv_list_lock);
+	rtdm_lock_get(&rtcan_socket_lock);
+
+	reg_iflag1 = flexcan_read(&regs->iflag1);
+
+	/* reception interrupt */
+	if (flexcan_do_rx(dev, reg_iflag1))
+		handled = RTDM_IRQ_HANDLED;
+
+	/* transmission complete interrupt */
+	if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+		/* after sending a RTR frame MB is in RX mode */
+		flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+			      &priv->tx_mb->can_ctrl);
+		flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+		rtdm_sem_up(&dev->tx_sem);
+		if (rtcan_loopback_pending(dev))
+			rtcan_loopback(dev);
+		handled = RTDM_IRQ_HANDLED;
+	}
+
+	reg_esr = flexcan_read(&regs->esr);
+
+	/* ACK all bus error and state change IRQ sources */
+	if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+		handled = RTDM_IRQ_HANDLED;
+	}
+
+	/* state change interrupt or broken error state quirk fix is enabled */
+	if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+		handled = RTDM_IRQ_HANDLED;
+	else if (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+					       FLEXCAN_QUIRK_BROKEN_PERR_STATE))
+		goto esr_err;
+	
+	if (reg_esr & FLEXCAN_ESR_ERR_STATE) {
+	esr_err:
+		if (flexcan_irq_state(dev, reg_esr, &skb)) {
+			rtcan_rcv(dev, &skb);
+		}
+	}
+
+	/* bus error IRQ - report unconditionally */
+	if (reg_esr & FLEXCAN_ESR_ERR_BUS) {
+		flexcan_irq_bus_err(dev, reg_esr, &skb);
+		rtcan_rcv(dev, &skb);
+		handled = RTDM_IRQ_HANDLED;
+	}
+
+	rtdm_lock_put(&rtcan_socket_lock);
+	rtdm_lock_put(&rtcan_recv_list_lock);
+	rtdm_lock_put(&dev->device_lock);
+
+	return handled;
+}
+
+static void flexcan_set_bittiming(struct rtcan_device *dev)
+{
+	const struct flexcan_priv *priv = rtcan_priv(dev);
+	const struct can_bittime *bt = &priv->bittiming;
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg;
+
+	reg = flexcan_read(&regs->ctrl);
+	reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
+		 FLEXCAN_CTRL_RJW(0x3) |
+		 FLEXCAN_CTRL_PSEG1(0x7) |
+		 FLEXCAN_CTRL_PSEG2(0x7) |
+		 FLEXCAN_CTRL_PROPSEG(0x7) |
+		 FLEXCAN_CTRL_LPB |
+		 FLEXCAN_CTRL_SMP |
+		 FLEXCAN_CTRL_LOM);
+
+	reg |= FLEXCAN_CTRL_PRESDIV(bt->std.brp - 1) |
+		FLEXCAN_CTRL_PSEG1(bt->std.phase_seg1 - 1) |
+		FLEXCAN_CTRL_PSEG2(bt->std.phase_seg2 - 1) |
+		FLEXCAN_CTRL_RJW(bt->std.sjw - 1) |
+		FLEXCAN_CTRL_PROPSEG(bt->std.prop_seg - 1);
+
+	if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+		reg |= FLEXCAN_CTRL_LPB;
+	if (dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)
+		reg |= FLEXCAN_CTRL_LOM;
+	if (dev->ctrl_mode & CAN_CTRLMODE_3_SAMPLES)
+		reg |= FLEXCAN_CTRL_SMP;
+
+	rtcandev_dbg(dev, "writing ctrl=0x%08x\n", reg);
+	flexcan_write(reg, &regs->ctrl);
+
+	/* print chip status */
+	rtcandev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+}
+
+/* flexcan_chip_start
+ *
+ * this functions is entered with clocks enabled
+ *
+ */
+static int flexcan_chip_start(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
+	int err, i;
+
+	err = clk_prepare_enable(priv->clk_ipg);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->clk_per);
+	if (err)
+		goto out_disable_ipg;
+
+	/* enable module */
+	err = flexcan_chip_enable(priv);
+	if (err)
+		goto out_disable_per;
+
+	/* soft reset */
+	err = flexcan_chip_softreset(priv);
+	if (err)
+		goto out_chip_disable;
+
+	flexcan_set_bittiming(dev);
+
+	/* MCR
+	 *
+	 * enable freeze
+	 * enable fifo
+	 * halt now
+	 * only supervisor access
+	 * enable warning int
+	 * disable local echo
+	 * enable individual RX masking
+	 * choose format C
+	 * set max mailbox number
+	 */
+	reg_mcr = flexcan_read(&regs->mcr);
+	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
+	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
+		FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
+		FLEXCAN_MCR_IDAM_C;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		reg_mcr &= ~FLEXCAN_MCR_FEN;
+		reg_mcr |= FLEXCAN_MCR_MAXMB(priv->mb_last);
+	} else {
+		reg_mcr |= FLEXCAN_MCR_FEN |
+			FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
+	}
+	rtcandev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+	flexcan_write(reg_mcr, &regs->mcr);
+
+	/* CTRL
+	 *
+	 * disable timer sync feature
+	 *
+	 * disable auto busoff recovery
+	 * transmit lowest buffer first
+	 *
+	 * enable tx and rx warning interrupt
+	 * enable bus off interrupt
+	 * (== FLEXCAN_CTRL_ERR_STATE)
+	 */
+	reg_ctrl = flexcan_read(&regs->ctrl);
+	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
+	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
+		FLEXCAN_CTRL_ERR_STATE;
+
+	/* enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
+	 * on most Flexcan cores, too. Otherwise we don't get
+	 * any error warning or passive interrupts.
+	 */
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE)
+		reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
+	else
+		reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK;
+
+	/* save for later use */
+	priv->reg_ctrl_default = reg_ctrl;
+	/* leave interrupts disabled for now */
+	reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
+	rtcandev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+	flexcan_write(reg_ctrl, &regs->ctrl);
+
+	if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
+		flexcan_write(reg_ctrl2, &regs->ctrl2);
+	}
+
+	/* clear and invalidate all mailboxes first */
+	for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
+		flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
+			      &regs->mb[i].can_ctrl);
+	}
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		for (i = priv->mb_first; i <= priv->mb_last; i++)
+			flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
+				      &regs->mb[i].can_ctrl);
+	}
+
+	/* Errata ERR005829: mark first TX mailbox as INACTIVE */
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb_reserved->can_ctrl);
+
+	/* mark TX mailbox as INACTIVE */
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb->can_ctrl);
+
+	/* acceptance mask/acceptance code (accept everything) */
+	flexcan_write(0x0, &regs->rxgmask);
+	flexcan_write(0x0, &regs->rx14mask);
+	flexcan_write(0x0, &regs->rx15mask);
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
+		flexcan_write(0x0, &regs->rxfgmask);
+
+	/* clear acceptance filters */
+	for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
+		flexcan_write(0, &regs->rximr[i]);
+
+	/* On Vybrid, disable memory error detection interrupts
+	 * and freeze mode.
+	 * This also works around errata e5295 which generates
+	 * false positive memory errors and put the device in
+	 * freeze mode.
+	 */
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
+		/* Follow the protocol as described in "Detection
+		 * and Correction of Memory Errors" to write to
+		 * MECR register
+		 */
+		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+		flexcan_write(reg_ctrl2, &regs->ctrl2);
+
+		reg_mecr = flexcan_read(&regs->mecr);
+		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
+		flexcan_write(reg_mecr, &regs->mecr);
+		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
+			      FLEXCAN_MECR_FANCEI_MSK);
+		flexcan_write(reg_mecr, &regs->mecr);
+	}
+
+	err = flexcan_transceiver_enable(priv);
+	if (err)
+		goto out_chip_disable;
+
+	/* synchronize with the can bus */
+	err = flexcan_chip_unfreeze(priv);
+	if (err)
+		goto out_transceiver_disable;
+
+	dev->state = CAN_STATE_ERROR_ACTIVE;
+
+	/* enable interrupts atomically */
+	rtdm_irq_disable(&dev->irq_handle);
+	flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
+	flexcan_write(priv->reg_imask1_default, &regs->imask1);
+	flexcan_write(priv->reg_imask2_default, &regs->imask2);
+	rtdm_irq_enable(&dev->irq_handle);
+
+	/* print chip status */
+	rtcandev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
+		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+
+	return 0;
+
+ out_transceiver_disable:
+	flexcan_transceiver_disable(priv);
+ out_chip_disable:
+	flexcan_chip_disable(priv);
+ out_disable_per:
+	clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
+	clk_disable_unprepare(priv->clk_ipg);
+
+	return err;
+}
+
+/* flexcan_chip_stop
+ *
+ * this functions is entered with clocks enabled
+ */
+static void flexcan_chip_stop(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+
+	/* freeze + disable module */
+	flexcan_chip_freeze(dev);
+	flexcan_chip_disable(priv);
+
+	/* Disable all interrupts */
+	flexcan_write(0, &regs->imask2);
+	flexcan_write(0, &regs->imask1);
+	flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+		      &regs->ctrl);
+
+	flexcan_transceiver_disable(priv);
+
+	clk_disable_unprepare(priv->clk_per);
+	clk_disable_unprepare(priv->clk_ipg);
+}
+
+static int flexcan_mode_start(struct rtcan_device *dev,
+			      rtdm_lockctx_t *lock_ctx)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	int err = 0;
+
+	rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+
+	switch (dev->state) {
+
+	case CAN_STATE_ACTIVE:
+	case CAN_STATE_BUS_WARNING:
+	case CAN_STATE_BUS_PASSIVE:
+		break;
+
+	case CAN_STATE_STOPPED:
+		/* Register IRQ handler and pass device structure as arg */
+		err = rtdm_irq_request(&dev->irq_handle, priv->irq,
+				       flexcan_irq, 0, DRV_NAME,
+				       dev);
+		if (err) {
+			rtcandev_err(dev, "couldn't request irq %d\n",
+				     priv->irq);
+			goto out;
+		}
+
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+
+		/* start chip and queuing */
+		err = flexcan_chip_start(dev);
+		if (err) {
+			rtdm_irq_free(&dev->irq_handle);
+			rtdm_sem_destroy(&dev->tx_sem);
+			goto out;
+		}
+		break;
+
+	case CAN_STATE_BUS_OFF:
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+		/* start chip and queuing */
+		err = flexcan_chip_start(dev);
+		if (err) {
+			rtdm_sem_destroy(&dev->tx_sem);
+			goto out;
+		}
+		break;
+
+	case CAN_STATE_SLEEPING:
+	default:
+		err = 0;
+		break;
+	}
+
+out:
+	rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+
+	return err;
+}
+
+static int flexcan_mode_stop(struct rtcan_device *dev,
+			     rtdm_lockctx_t *lock_ctx)
+{
+	if (!CAN_STATE_OPERATING(dev->state))
+		return 0;
+
+	dev->state = CAN_STATE_STOPPED;
+
+	rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+
+	flexcan_chip_stop(dev);
+	rtdm_irq_free(&dev->irq_handle);
+	rtdm_sem_destroy(&dev->tx_sem);
+
+	rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+
+	return 0;
+}
+
+static int flexcan_set_mode(struct rtcan_device *dev, can_mode_t mode,
+			    rtdm_lockctx_t *lock_ctx)
+{
+	if (mode == CAN_MODE_START)
+		return flexcan_mode_start(dev, lock_ctx);
+
+	if (mode == CAN_MODE_STOP)
+		return flexcan_mode_stop(dev, lock_ctx);
+
+	return -EOPNOTSUPP;
+}
+
+static int flexcan_copy_bittiming(struct rtcan_device *dev,
+				  struct can_bittime *bt,
+				  rtdm_lockctx_t *lock_ctx)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+
+	memcpy(&priv->bittiming, bt, sizeof(*bt));
+
+	return 0;
+}
+
+static int register_flexcandev(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg, err;
+
+	err = clk_prepare_enable(priv->clk_ipg);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->clk_per);
+	if (err)
+		goto out_disable_ipg;
+
+	/* select "bus clock", chip must be disabled */
+	err = flexcan_chip_disable(priv);
+	if (err)
+		goto out_disable_per;
+	reg = flexcan_read(&regs->ctrl);
+	reg |= FLEXCAN_CTRL_CLK_SRC;
+	flexcan_write(reg, &regs->ctrl);
+
+	err = flexcan_chip_enable(priv);
+	if (err)
+		goto out_chip_disable;
+
+	/* set freeze, halt and activate FIFO, restrict register access */
+	reg = flexcan_read(&regs->mcr);
+	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
+		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+	flexcan_write(reg, &regs->mcr);
+
+	/* Currently we only support newer versions of this core
+	 * featuring a RX hardware FIFO (although this driver doesn't
+	 * make use of it on some cores). Older cores, found on some
+	 * Coldfire derivates are not tested.
+	 */
+	reg = flexcan_read(&regs->mcr);
+	if (!(reg & FLEXCAN_MCR_FEN)) {
+		rtcandev_err(dev, "Could not enable RX FIFO, unsupported core\n");
+		err = -ENODEV;
+		goto out_chip_disable;
+	}
+
+	err = rtcan_dev_register(dev);
+
+	/* disable core and turn off clocks */
+ out_chip_disable:
+	flexcan_chip_disable(priv);
+ out_disable_per:
+	clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
+	clk_disable_unprepare(priv->clk_ipg);
+
+	return err;
+}
+
+static void unregister_flexcandev(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+
+	rtcan_dev_unregister(dev);
+	if (priv->ts_frames)
+		kfree(priv->ts_frames);
+}
+
+static const struct of_device_id flexcan_of_match[] = {
+	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, flexcan_of_match);
+
+static const struct platform_device_id flexcan_id_table[] = {
+	{ .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
+
+static int flexcan_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id;
+	const struct flexcan_devtype_data *devtype_data;
+	struct rtcan_device *dev;
+	struct flexcan_priv *priv;
+	struct regulator *reg_xceiver;
+	struct resource *mem;
+	struct clk *clk_ipg = NULL, *clk_per = NULL;
+	struct flexcan_regs __iomem *regs;
+	int err, irq;
+	u32 clock_freq = 0;
+
+	reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+	if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	else if (IS_ERR(reg_xceiver))
+		reg_xceiver = NULL;
+
+	if (pdev->dev.of_node)
+		of_property_read_u32(pdev->dev.of_node,
+				     "clock-frequency", &clock_freq);
+
+	if (!clock_freq) {
+		clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+		if (IS_ERR(clk_ipg)) {
+			dev_err(&pdev->dev, "no ipg clock defined\n");
+			return PTR_ERR(clk_ipg);
+		}
+
+		clk_per = devm_clk_get(&pdev->dev, "per");
+		if (IS_ERR(clk_per)) {
+			dev_err(&pdev->dev, "no per clock defined\n");
+			return PTR_ERR(clk_per);
+		}
+		clock_freq = clk_get_rate(clk_per);
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return -ENODEV;
+
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
+
+	of_id = of_match_device(flexcan_of_match, &pdev->dev);
+	if (of_id) {
+		devtype_data = of_id->data;
+	} else if (platform_get_device_id(pdev)->driver_data) {
+		devtype_data = (struct flexcan_devtype_data *)
+			platform_get_device_id(pdev)->driver_data;
+	} else {
+		return -ENODEV;
+	}
+
+	dev = rtcan_dev_alloc(sizeof(struct flexcan_priv), 0);
+	if (!dev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, dev);
+
+	priv = rtcan_priv(dev);
+	priv->regs = regs;
+	priv->irq = irq;
+	priv->clk_ipg = clk_ipg;
+	priv->clk_per = clk_per;
+	priv->devtype_data = devtype_data;
+	priv->reg_xceiver = reg_xceiver;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+		priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
+	} else {
+		priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+		priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
+	}
+	priv->tx_mb = &regs->mb[priv->tx_mb_idx];
+
+	priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+	priv->reg_imask2_default = 0;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		u64 imask;
+
+		priv->mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+		priv->mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST;
+		priv->ts_frames = kzalloc(sizeof(*priv->ts_frames) *
+					  FLEXCAN_RX_MB_TIMESTAMP_COUNT, GFP_KERNEL);
+		if (priv->ts_frames == NULL) {
+			err = -ENOMEM;
+			goto failed_fralloc;
+		}
+
+		imask = GENMASK_ULL(priv->mb_last, priv->mb_first);
+		priv->reg_imask1_default |= imask;
+		priv->reg_imask2_default |= imask >> 32;
+	} else {
+		priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+			FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
+		priv->ts_frames = NULL;
+	}
+
+	dev->ctrl_name = "FLEXCAN";
+	dev->board_name = "FLEXCAN";
+	dev->base_addr = (unsigned long)regs;
+	dev->can_sys_clock = clock_freq;
+	dev->hard_start_xmit = flexcan_start_xmit;
+	dev->do_set_mode = flexcan_set_mode;
+	dev->do_set_bit_time = flexcan_copy_bittiming;
+	dev->bittiming_const = &flexcan_bittiming_const;
+	dev->state = CAN_STATE_STOPPED;
+	strncpy(dev->name, DEV_NAME, IFNAMSIZ);
+	
+	err = register_flexcandev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "registering netdev failed\n");
+		goto failed_register;
+	}
+
+	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+		 priv->regs, priv->irq);
+
+	return 0;
+
+ failed_register:
+	if (priv->ts_frames)
+		kfree(priv->ts_frames);
+ failed_fralloc:
+	rtcan_dev_free(dev);
+	return err;
+}
+
+static int flexcan_remove(struct platform_device *pdev)
+{
+	struct rtcan_device *dev = platform_get_drvdata(pdev);
+
+	unregister_flexcandev(dev);
+	rtcan_dev_free(dev);
+
+	return 0;
+}
+
+static struct platform_driver flexcan_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = flexcan_of_match,
+	},
+	.probe = flexcan_probe,
+	.remove = flexcan_remove,
+	.id_table = flexcan_id_table,
+};
+
+module_platform_driver(flexcan_driver);
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>, "
+	      "Sascha Hauer <kernel@pengutronix.de>, "
+	      "Marc Kleine-Budde <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RT-CAN port driver for flexcan based chip");
+++ linux-patched/drivers/xenomai/can/rtcan_raw_filter.c	2022-03-21 12:58:29.241890405 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_dev.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; eitherer version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include "rtcan_internal.h"
+#include "rtcan_socket.h"
+#include "rtcan_list.h"
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+
+
+#if 0
+void rtcan_raw_print_filter(struct rtcan_device *dev)
+{
+    int i;
+    struct rtcan_recv *r = dev->receivers;
+
+    rtdm_printk("%s: recv_list=%p empty_list=%p free_entries=%d\n",
+		dev->name, dev->recv_list, dev->empty_list, dev->free_entries);
+    for (i = 0; i < RTCAN_MAX_RECEIVERS; i++, r++) {
+	rtdm_printk("%2d %p sock=%p next=%p id=%x mask=%x\n",
+		    i, r, r->sock, r->next,
+		    r->can_filter.can_id, r->can_filter.can_mask);
+    }
+}
+#else
+#define rtcan_raw_print_filter(dev)
+#endif
+
+
+static inline void rtcan_raw_mount_filter(can_filter_t *recv_filter,
+					  can_filter_t *filter)
+{
+    if (filter->can_id & CAN_INV_FILTER) {
+	recv_filter->can_id = filter->can_id & ~CAN_INV_FILTER;
+	recv_filter->can_mask = filter->can_mask | CAN_INV_FILTER;
+    } else {
+	recv_filter->can_id = filter->can_id;
+	recv_filter->can_mask = filter->can_mask & ~CAN_INV_FILTER;
+    }
+
+    /* Apply mask for fast filter check */
+    recv_filter->can_id &= recv_filter->can_mask;
+}
+
+
+int rtcan_raw_check_filter(struct rtcan_socket *sock, int ifindex,
+			   struct rtcan_filter_list *flist)
+{
+    int old_ifindex = 0, old_flistlen_all = 0;
+    int free_entries, i, begin, end;
+    struct rtcan_device *dev;
+    int flistlen;
+
+    if (rtcan_flist_no_filter(flist))
+	return 0;
+
+    /* Check if filter list has been defined by user */
+    flistlen = (flist) ? flist->flistlen : 1;
+
+    /* Now we check if a reception list would overflow. This takes some
+     * preparation, so let's go ... */
+
+    /* Check current bind status */
+    if (rtcan_sock_has_filter(sock)) {
+	/* Socket is bound */
+	i = atomic_read(&sock->ifindex);
+
+	if (i == 0)
+	    /* Socket was bound to ALL interfaces */
+	    old_flistlen_all = sock->flistlen;
+	else    /* Socket was bound to only one interface */
+	    old_ifindex = i;
+    }
+
+    if (ifindex) {
+	/* We bind the socket to only one interface. */
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	/* Socket must be bound to all interfaces. */
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    /* Check if there is space for the new binding */
+    for (i = begin; i <= end; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+	free_entries = dev->free_entries + old_flistlen_all;
+	rtcan_dev_dereference(dev);
+	if (i == old_ifindex)
+	    free_entries += sock->flistlen;
+	/* Compare free list space to new filter list length */
+	if (free_entries < flistlen)
+	    return -ENOSPC;
+    }
+
+    return 0;
+}
+
+
+int rtcan_raw_add_filter(struct rtcan_socket *sock, int ifindex)
+{
+    int i, j, begin, end;
+    struct rtcan_recv *first, *last;
+    struct rtcan_device *dev;
+    /* Check if filter list has been defined by user */
+    int flistlen;
+
+    if (rtcan_flist_no_filter(sock->flist)) {
+	return 0;
+    }
+
+    flistlen = (sock->flist) ? sock->flist->flistlen : 0;
+
+    if (ifindex) {
+	/* We bind the socket to only one interface. */
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	/* Socket must be bound to all interfaces. */
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    for (i = begin; i <= end; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+
+	/* Take first entry of empty list */
+	first = last = dev->empty_list;
+	/* Check if filter list is empty */
+	if (flistlen) {
+	    /* Filter list is not empty */
+	    /* Register first filter */
+	    rtcan_raw_mount_filter(&last->can_filter,
+				   &sock->flist->flist[0]);
+	    last->match_count = 0;
+	    last->sock = sock;
+	    for (j = 1; j < flistlen; j++) {
+		/* Register remaining filters */
+		last = last->next;
+		rtcan_raw_mount_filter(&last->can_filter,
+				       &sock->flist->flist[j]);
+		last->sock = sock;
+		last->match_count = 0;
+	    }
+	    /* Decrease free entries counter by length of filter list */
+	    dev->free_entries -= flistlen;
+
+	} else {
+	    /* Filter list is empty. Socket must be bound to all CAN IDs. */
+	    /* Fill list entry members */
+	    last->can_filter.can_id = last->can_filter.can_mask = 0;
+	    last->sock = sock;
+	    last->match_count = 0;
+	    /* Decrease free entries counter by 1
+	     * (one filter for all CAN frames) */
+	    dev->free_entries--;
+	}
+
+	/* Set new empty list header */
+	dev->empty_list = last->next;
+	/* Add new partial recv list to the head of reception list */
+	last->next = dev->recv_list;
+	/* Adjust rececption list pointer */
+	dev->recv_list = first;
+
+	rtcan_raw_print_filter(dev);
+	rtcan_dev_dereference(dev);
+    }
+
+    return (flistlen) ? flistlen : 1;
+}
+
+
+void rtcan_raw_remove_filter(struct rtcan_socket *sock)
+{
+    int i, j, begin, end;
+    struct rtcan_recv *first, *next, *last;
+    int ifindex = atomic_read(&sock->ifindex);
+    struct rtcan_device *dev;
+
+    if (!rtcan_sock_has_filter(sock)) /* nothing to do */
+	return;
+
+    if (ifindex) {
+	/* Socket was bound to one interface only. */
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	/* Socket was bound to all interfaces */
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    for (i = begin; i <= end; i++) {
+
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+
+	/* Search for first list entry pointing to this socket */
+	first = NULL;
+	next = dev->recv_list;
+	while (next->sock != sock) {
+	    first = next;
+	    next = first->next;
+	}
+
+	/* Now go to the end of the old filter list */
+	last = next;
+	for (j = 1; j < sock->flistlen; j++)
+	    last = last->next;
+
+	/* Detach found first list entry from reception list */
+	if (first)
+	    first->next = last->next;
+	else
+	    dev->recv_list = last->next;
+	/* Add partial list to the head of empty list */
+	last->next = dev->empty_list;
+	/* Adjust empty list pointer */
+	dev->empty_list = next;
+
+	/* Increase free entries counter by length of old filter list */
+	dev->free_entries += sock->flistlen;
+
+	rtcan_raw_print_filter(dev);
+	rtcan_dev_dereference(dev);
+    }
+}
+++ linux-patched/drivers/xenomai/can/rtcan_dev.h	2022-03-21 12:58:29.238890434 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_internal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/include/rtdev.h:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __RTCAN_DEV_H_
+#define __RTCAN_DEV_H_
+
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+
+#include "rtcan_list.h"
+
+
+/* Number of MSCAN devices the driver can handle */
+#define RTCAN_MAX_DEVICES    CONFIG_XENO_DRIVERS_CAN_MAX_DEVICES
+
+/* Maximum number of single filters per controller which can be registered
+ * for reception at the same time using Bind */
+#define RTCAN_MAX_RECEIVERS  CONFIG_XENO_DRIVERS_CAN_MAX_RECEIVERS
+
+/* Suppress handling of refcount if module support is not enabled
+ * or modules cannot be unloaded */
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_MODULE_UNLOAD)
+#define RTCAN_USE_REFCOUNT
+#endif
+
+/*
+ * CAN harware-dependent bit-timing constant
+ *
+ * Used for calculating and checking bit-timing parameters
+ */
+struct can_bittiming_const {
+	char name[16];		/* Name of the CAN controller hardware */
+	__u32 tseg1_min;	/* Time segement 1 = prop_seg + phase_seg1 */
+	__u32 tseg1_max;
+	__u32 tseg2_min;	/* Time segement 2 = phase_seg2 */
+	__u32 tseg2_max;
+	__u32 sjw_max;		/* Synchronisation jump width */
+	__u32 brp_min;		/* Bit-rate prescaler */
+	__u32 brp_max;
+	__u32 brp_inc;
+};
+
+struct rtcan_device {
+    unsigned int        version;
+
+    char                name[IFNAMSIZ];
+
+    char                *ctrl_name; /* Name of CAN controller */
+    char                *board_name;/* Name of CAN board */
+
+    unsigned long       base_addr;  /* device I/O address   */
+    rtdm_irq_t          irq_handle; /* RTDM IRQ handle */
+
+    int                 ifindex;
+#ifdef RTCAN_USE_REFCOUNT
+    atomic_t            refcount;
+#endif
+
+    void                *priv;      /* pointer to chip private data */
+
+    void                *board_priv;/* pointer to board private data*/
+
+    struct semaphore    nrt_lock;   /* non-real-time locking        */
+
+    /* Spinlock for all devices (but not for all attributes) and also for HW
+     * access to all CAN controllers
+     */
+    rtdm_lock_t         device_lock;
+
+    /* Acts as a mutex allowing only one sender to write to the MSCAN
+     * simultaneously. Created when the controller goes into operating mode,
+     * destroyed if it goes into reset mode. */
+    rtdm_sem_t          tx_sem;
+
+    /* Baudrate of this device. Protected by device_lock in all device
+     * structures. */
+    unsigned int        can_sys_clock;
+
+
+    /* Baudrate of this device. Protected by device_lock in all device
+     * structures. */
+    can_baudrate_t      baudrate;
+
+    struct can_bittime  bit_time;
+    const struct can_bittiming_const *bittiming_const;
+
+    /* State which the controller is in. Protected by device_lock in all
+     * device structures. */
+    can_state_t state;
+
+    /* State which the controller was before sleeping. Protected by
+     * device_lock in all device structures. */
+    can_state_t          state_before_sleep;
+
+    /* Controller specific settings. Protected by device_lock in all
+     * device structures. */
+    can_ctrlmode_t       ctrl_mode;
+
+    /* Device operations */
+    int                 (*hard_start_xmit)(struct rtcan_device *dev,
+					   struct can_frame *frame);
+    int                 (*do_set_mode)(struct rtcan_device *dev,
+				       can_mode_t mode,
+				       rtdm_lockctx_t *lock_ctx);
+    can_state_t         (*do_get_state)(struct rtcan_device *dev);
+    int                 (*do_set_bit_time)(struct rtcan_device *dev,
+					   struct can_bittime *bit_time,
+					   rtdm_lockctx_t *lock_ctx);
+#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR
+    void                (*do_enable_bus_err)(struct rtcan_device *dev);
+#endif
+
+    /* Reception list head. This list contains all filters which have been
+     * registered via a bind call. */
+    struct rtcan_recv               *recv_list;
+
+    /* Empty list head. This list contains all empty entries not needed
+     * by the reception list and therefore is disjunctive with it. */
+    struct rtcan_recv               *empty_list;
+
+    /* Preallocated array for the list entries. To increase cache
+     * locality all list elements are kept in this array. */
+    struct rtcan_recv               receivers[RTCAN_MAX_RECEIVERS];
+
+    /* Indicates the length of the empty list */
+    int                             free_entries;
+
+    /* A few statistics counters */
+    unsigned int tx_count;
+    unsigned int rx_count;
+    unsigned int err_count;
+
+#ifdef CONFIG_PROC_FS
+    struct proc_dir_entry *proc_root;
+#endif
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+    struct rtcan_skb tx_skb;
+    struct rtcan_socket *tx_socket;
+#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+};
+
+
+extern struct semaphore rtcan_devices_nrt_lock;
+
+
+void rtcan_dev_free(struct rtcan_device *dev);
+
+int rtcan_dev_register(struct rtcan_device *dev);
+int rtcan_dev_unregister(struct rtcan_device *dev);
+
+struct rtcan_device *rtcan_dev_alloc(int sizeof_priv, int sizeof_board_priv);
+void rtcan_dev_alloc_name (struct rtcan_device *dev, const char *name_mask);
+
+struct rtcan_device *rtcan_dev_get_by_name(const char *if_name);
+struct rtcan_device *rtcan_dev_get_by_index(int ifindex);
+
+#ifdef RTCAN_USE_REFCOUNT
+#define rtcan_dev_reference(dev)      atomic_inc(&(dev)->refcount)
+#define rtcan_dev_dereference(dev)    atomic_dec(&(dev)->refcount)
+#else
+#define rtcan_dev_reference(dev)      do {} while(0)
+#define rtcan_dev_dereference(dev)    do {} while(0)
+#endif
+
+#ifdef CONFIG_PROC_FS
+int rtcan_dev_create_proc(struct rtcan_device* dev);
+void rtcan_dev_remove_proc(struct rtcan_device* dev);
+#else /* !CONFIG_PROC_FS */
+static inline int rtcan_dev_create_proc(struct rtcan_device* dev)
+{
+	return 0;
+}
+static inline void rtcan_dev_remove_proc(struct rtcan_device* dev) { }
+#endif /* !CONFIG_PROC_FS */
+
+#endif  /* __KERNEL__ */
+
+#endif  /* __RTCAN_DEV_H_ */
+++ linux-patched/drivers/xenomai/can/rtcan_internal.h	2022-03-21 12:58:29.234890473 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_socket.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/include/rtnet_internal.h:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __RTCAN_INTERNAL_H_
+#define __RTCAN_INTERNAL_H_
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+#define RTCAN_ASSERT(expr, func) \
+    if (!(expr)) { \
+	rtdm_printk("Assertion failed! %s:%s:%d %s\n", \
+	__FILE__, __FUNCTION__, __LINE__, (#expr)); \
+	func \
+    }
+#else
+#define RTCAN_ASSERT(expr, func)
+#endif /* CONFIG_RTCAN_CHECKED */
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+# define RTCAN_DBG(fmt,args...) do { printk(fmt ,##args); } while (0)
+# define RTCAN_RTDM_DBG(fmt,args...) do { rtdm_printk(fmt ,##args); } while (0)
+#else
+# define RTCAN_DBG(fmt,args...) do {} while (0)
+# define RTCAN_RTDM_DBG(fmt,args...) do {} while (0)
+#endif
+
+#define rtcan_priv(dev)			(dev)->priv
+#define rtcandev_dbg(dev, fmt, args...)				\
+	printk(KERN_DEBUG "%s: " fmt, (dev)->name, ##args)
+#define rtcandev_info(dev, fmt, args...)			\
+	printk(KERN_INFO "%s: " fmt, (dev)->name, ##args)
+#define rtcandev_warn(dev, fmt, args...)			\
+	printk(KERN_WARNING "%s: " fmt, (dev)->name, ##args)
+#define rtcandev_err(dev, fmt, args...)				\
+	printk(KERN_ERR "%s: " fmt, (dev)->name, ##args)
+
+#endif /* __RTCAN_INTERNAL_H_ */
+++ linux-patched/drivers/xenomai/can/rtcan_socket.h	2022-03-21 12:58:29.231890502 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Derived from RTnet project file include/stack/socket.h:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_SOCKET_H_
+#define __RTCAN_SOCKET_H_
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+
+
+
+/* This MUST BE 2^N */
+#define RTCAN_RXBUF_SIZE          CONFIG_XENO_DRIVERS_CAN_RXBUF_SIZE
+
+/* Size of timestamp */
+#define RTCAN_TIMESTAMP_SIZE      sizeof(nanosecs_abs_t)
+
+/* Bit in the can_dlc member of struct ring_buffer_frame used to indicate
+ * whether a frame has got a timestamp or not */
+#define RTCAN_HAS_TIMESTAMP       0x80
+
+/* Mask for clearing bit RTCAN_HAS_TIMESTAMP */
+#define RTCAN_HAS_NO_TIMESTAMP    0x7F
+
+#define RTCAN_SOCK_UNBOUND        -1
+#define RTCAN_FLIST_NO_FILTER     (struct rtcan_filter_list *)-1
+#define rtcan_flist_no_filter(f)  ((f) == RTCAN_FLIST_NO_FILTER)
+#define rtcan_sock_has_filter(s)  ((s)->flistlen > 0)
+#define rtcan_sock_is_bound(s)    ((s)->flistlen >= 0)
+
+/*
+ *  Internal frame representation within the ring buffer of a
+ *  struct rtcan_socket.
+ *
+ *  The data array is of arbitrary size when the frame is actually
+ *  stored in a socket's ring buffer. The timestamp member exists if the
+ *  socket was set to take timestamps (then it follows direcly after the
+ *  arbitrary-sized data array), otherwise it does not exist.
+ */
+struct rtcan_rb_frame {
+
+    /* CAN ID representation equal to struct can_frame */
+    uint32_t            can_id;
+
+    /* Interface index from which the frame originates */
+    unsigned char       can_ifindex;
+
+    /* DLC (between 0 and 15) and mark if frame has got a timestamp. The
+     * existence of a timestamp is indicated by the RTCAN_HAS_TIMESTAMP
+     * bit. */
+    unsigned char       can_dlc;
+
+    /* Data bytes */
+    uint8_t             data[8];
+
+    /* High precision timestamp indicating when the frame was received.
+     * Exists when RTCAN_HAS_TIMESTAMP bit in can_dlc is set. */
+    nanosecs_abs_t      timestamp;
+
+} __attribute__ ((packed));
+
+
+/* Size of struct rtcan_rb_frame without any data bytes and timestamp */
+#define EMPTY_RB_FRAME_SIZE \
+    sizeof(struct rtcan_rb_frame) - 8 - RTCAN_TIMESTAMP_SIZE
+
+
+/*
+ *  Wrapper structure around a struct rtcan_rb_frame with actual size
+ *  of the frame.
+ *
+ *  This isn't really a socket buffer but only a sort of. It is constructed
+ *  within the interrupt routine when a CAN frame is read from
+ *  the controller. Then it's passed to the reception handler where only
+ *  rb_frame finds its way to the sockets' ring buffers.
+ */
+struct rtcan_skb {
+    /* Actual size of following rb_frame (without timestamp) */
+    size_t                rb_frame_size;
+    /* Frame to be stored in the sockets' ring buffers (as is) */
+    struct rtcan_rb_frame rb_frame;
+};
+
+struct rtcan_filter_list {
+    int flistlen;
+    struct can_filter flist[1];
+};
+
+/*
+ * Internal CAN socket structure.
+ *
+ * Every socket has an internal ring buffer for incoming messages. A message
+ * is not stored as a struct can_frame (in order to save buffer space)
+ * but as struct rtcan_rb_frame of arbitrary length depending on the
+ * actual payload.
+ */
+struct rtcan_socket {
+
+    struct list_head    socket_list;
+
+    unsigned long	flags;
+
+    /* Transmission timeout in ns. Protected by rtcan_socket_lock
+     * in all socket structures. */
+    nanosecs_rel_t      tx_timeout;
+
+    /* Reception timeout in ns. Protected by rtcan_socket_lock
+     * in all socket structures. */
+    nanosecs_rel_t      rx_timeout;
+
+
+    /* Begin of first frame data in the ring buffer. Protected by
+     * rtcan_socket_lock in all socket structures. */
+    int                 recv_head;
+
+    /* End of last frame data in the ring buffer. I.e. position of first
+     * free byte in the ring buffer. Protected by
+     * rtcan_socket_lock in all socket structures. */
+    int                 recv_tail;
+
+    /* Ring buffer for incoming CAN frames. Protected by
+     * rtcan_socket_lock in all socket structures. */
+    unsigned char       recv_buf[RTCAN_RXBUF_SIZE];
+
+    /* Semaphore for receivers and incoming messages */
+    rtdm_sem_t          recv_sem;
+
+
+    /* All senders waiting to be able to send
+     * via this socket are queued here */
+    struct list_head    tx_wait_head;
+
+
+    /* Interface index the socket is bound to. Protected by
+     * rtcan_recv_list_lock in all socket structures. */
+    atomic_t            ifindex;
+
+    /* Length of filter list. I.e. how many entries does this socket occupy in
+     * the reception list. 0 if unbound. Protected by
+     * rtcan_recv_list_lock in all socket structures. */
+    int                 flistlen;
+
+    uint32_t            err_mask;
+
+    uint32_t            rx_buf_full;
+
+    struct rtcan_filter_list *flist;
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+    int loopback;
+#endif
+};
+
+
+
+/*
+ *  Get the RTDM context from a struct rtcan_socket
+ *
+ *  @param[in] sock Pointer to socket structure
+ *
+ *  @return Pointer to a file descriptor of type struct rtdm_fd this socket
+ *          belongs to
+ */
+/* FIXME: to be replaced with container_of */
+static inline struct rtdm_fd *rtcan_socket_to_fd(struct rtcan_socket *sock)
+{
+    return rtdm_private_to_fd(sock);
+}
+
+/* Spinlock protecting the ring buffers and the timeouts of all
+ * rtcan_sockets */
+extern rtdm_lock_t rtcan_socket_lock;
+extern struct list_head rtcan_socket_list;
+
+extern void rtcan_socket_init(struct rtdm_fd *fd);
+extern void rtcan_socket_cleanup(struct rtdm_fd *fd);
+
+
+#endif  /* __RTCAN_SOCKET_H_ */
+++ linux-patched/drivers/xenomai/can/Makefile	2022-03-21 12:58:29.227890541 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_raw.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/can
+
+obj-$(CONFIG_XENO_DRIVERS_CAN) += xeno_can.o mscan/ sja1000/ peak_canfd/
+obj-$(CONFIG_XENO_DRIVERS_CAN_FLEXCAN) += xeno_can_flexcan.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_VIRT) += xeno_can_virt.o
+
+xeno_can-y := rtcan_dev.o rtcan_socket.o rtcan_module.o rtcan_raw.o rtcan_raw_dev.o rtcan_raw_filter.o
+xeno_can_virt-y := rtcan_virt.o
+xeno_can_flexcan-y := rtcan_flexcan.o
+++ linux-patched/drivers/xenomai/can/rtcan_raw.c	2022-03-21 12:58:29.224890570 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Parts of this software are based on the following:
+ *
+ * - RTAI CAN device driver for SJA1000 controllers by Jan Kiszka
+ *
+ * - linux-can.patch, a CAN socket framework for Linux,
+ *   Copyright (C) 2004, 2005, Robert Schwebel, Benedikt Spranger,
+ *   Marc Kleine-Budde, Sascha Hauer, Pengutronix
+ *
+ * - RTnet (www.rtnet.org)
+ *
+ * - serial device driver and profile included in Xenomai (RTDM),
+ *   Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include "rtcan_version.h"
+#include "rtcan_socket.h"
+#include "rtcan_list.h"
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+
+
+/*
+ * Set if socket wants to receive a high precision timestamp together with
+ * CAN frames
+ */
+#define RTCAN_GET_TIMESTAMP         0
+
+
+MODULE_AUTHOR("RT-Socket-CAN Development Team");
+MODULE_DESCRIPTION("RTDM CAN raw socket device driver");
+MODULE_VERSION(__stringify(RTCAN_MAJOR_VER)
+	       __stringify(RTCAN_MINOR_VER)
+	       __stringify(RTCAN_BUGFIX_VER));
+MODULE_LICENSE("GPL");
+
+void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock,
+		   can_frame_t *frame);
+
+static inline int rtcan_accept_msg(uint32_t can_id, can_filter_t *filter)
+{
+    if ((filter->can_mask & CAN_INV_FILTER))
+	return ((can_id & filter->can_mask) != filter->can_id);
+    else
+	return ((can_id & filter->can_mask) == filter->can_id);
+}
+
+
+static void rtcan_rcv_deliver(struct rtcan_recv *recv_listener,
+			      struct rtcan_skb *skb)
+{
+    int size_free;
+    size_t cpy_size, first_part_size;
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+    struct rtdm_fd *fd = rtdm_private_to_fd(recv_listener->sock);
+    struct rtcan_socket *sock;
+
+    if (rtdm_fd_lock(fd) < 0)
+	return;
+
+    sock = recv_listener->sock;
+
+    cpy_size = skb->rb_frame_size;
+    /* Check if socket wants to receive a timestamp */
+    if (test_bit(RTCAN_GET_TIMESTAMP, &sock->flags)) {
+	cpy_size += RTCAN_TIMESTAMP_SIZE;
+	frame->can_dlc |= RTCAN_HAS_TIMESTAMP;
+    } else
+	frame->can_dlc &= RTCAN_HAS_NO_TIMESTAMP;
+
+    /* Calculate free size in the ring buffer */
+    size_free = sock->recv_head - sock->recv_tail;
+    if (size_free <= 0)
+	size_free += RTCAN_RXBUF_SIZE;
+
+    /* Test if ring buffer has enough space. */
+    if (size_free > cpy_size) {
+	/* Check if we must wrap around the end of buffer */
+	if ((sock->recv_tail + cpy_size) > RTCAN_RXBUF_SIZE) {
+	    /* Wrap around: Two memcpy operations */
+
+	    first_part_size = RTCAN_RXBUF_SIZE - sock->recv_tail;
+
+	    memcpy(&sock->recv_buf[sock->recv_tail], (void *)frame,
+		   first_part_size);
+	    memcpy(&sock->recv_buf[0], (void *)frame +
+		   first_part_size, cpy_size - first_part_size);
+	} else
+	    memcpy(&sock->recv_buf[sock->recv_tail], (void *)frame,
+		   cpy_size);
+
+	/* Adjust tail */
+	sock->recv_tail = (sock->recv_tail + cpy_size) &
+	    (RTCAN_RXBUF_SIZE - 1);
+
+	/*Notify the delivery of the message */
+	rtdm_sem_up(&sock->recv_sem);
+
+    } else {
+	/* Overflow of socket's ring buffer! */
+	sock->rx_buf_full++;
+	RTCAN_RTDM_DBG("rtcan: socket buffer overflow, message discarded\n");
+    }
+
+    rtdm_fd_unlock(fd);
+}
+
+
+void rtcan_rcv(struct rtcan_device *dev, struct rtcan_skb *skb)
+{
+    nanosecs_abs_t timestamp = rtdm_clock_read();
+    /* Entry in reception list, begin with head */
+    struct rtcan_recv *recv_listener = dev->recv_list;
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+
+    /* Copy timestamp to skb */
+    memcpy((void *)&skb->rb_frame + skb->rb_frame_size,
+	   &timestamp, RTCAN_TIMESTAMP_SIZE);
+
+    if ((frame->can_id & CAN_ERR_FLAG)) {
+	dev->err_count++;
+	while (recv_listener != NULL) {
+	    if ((frame->can_id & recv_listener->sock->err_mask)) {
+		recv_listener->match_count++;
+		rtcan_rcv_deliver(recv_listener, skb);
+	    }
+	    recv_listener = recv_listener->next;
+	}
+    } else {
+	dev->rx_count++;
+	while (recv_listener != NULL) {
+	    if (rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) {
+		recv_listener->match_count++;
+		rtcan_rcv_deliver(recv_listener, skb);
+	    }
+	    recv_listener = recv_listener->next;
+	}
+    }
+}
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+
+void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock,
+		   can_frame_t *frame)
+{
+    struct rtcan_rb_frame *rb_frame = &dev->tx_skb.rb_frame;
+
+    RTCAN_ASSERT(dev->tx_socket == 0,
+		 rtdm_printk("(%d) TX skb still in use", dev->ifindex););
+
+    rb_frame->can_id = frame->can_id;
+    rb_frame->can_dlc = frame->can_dlc;
+    dev->tx_skb.rb_frame_size = EMPTY_RB_FRAME_SIZE;
+    if (frame->can_dlc && !(frame->can_id & CAN_RTR_FLAG)) {
+	memcpy(rb_frame->data, frame->data, frame->can_dlc);
+	dev->tx_skb.rb_frame_size += frame->can_dlc;
+    }
+    rb_frame->can_ifindex = dev->ifindex;
+    dev->tx_socket = sock;
+}
+
+void rtcan_loopback(struct rtcan_device *dev)
+{
+    nanosecs_abs_t timestamp = rtdm_clock_read();
+    /* Entry in reception list, begin with head */
+    struct rtcan_recv *recv_listener = dev->recv_list;
+    struct rtcan_rb_frame *frame = &dev->tx_skb.rb_frame;
+
+    memcpy((void *)&dev->tx_skb.rb_frame + dev->tx_skb.rb_frame_size,
+	   &timestamp, RTCAN_TIMESTAMP_SIZE);
+
+    while (recv_listener != NULL) {
+	dev->rx_count++;
+	if ((dev->tx_socket != recv_listener->sock) &&
+	    rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) {
+	    recv_listener->match_count++;
+	    rtcan_rcv_deliver(recv_listener, &dev->tx_skb);
+	}
+	recv_listener = recv_listener->next;
+    }
+    dev->tx_socket = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rtcan_loopback);
+
+#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+
+
+int rtcan_raw_socket(struct rtdm_fd *fd, int protocol)
+{
+    /* Only protocol CAN_RAW is supported */
+    if (protocol != CAN_RAW && protocol != 0)
+	return -EPROTONOSUPPORT;
+
+    rtcan_socket_init(fd);
+
+    return 0;
+}
+
+
+static inline void rtcan_raw_unbind(struct rtcan_socket *sock)
+{
+    rtcan_raw_remove_filter(sock);
+    if (!rtcan_flist_no_filter(sock->flist) && sock->flist)
+	rtdm_free(sock->flist);
+    sock->flist = NULL;
+    sock->flistlen = RTCAN_SOCK_UNBOUND;
+    atomic_set(&sock->ifindex, 0);
+}
+
+
+static void rtcan_raw_close(struct rtdm_fd *fd)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    rtdm_lockctx_t lock_ctx;
+
+    /* Get lock for reception lists */
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    /* Check if socket is bound */
+    if (rtcan_sock_is_bound(sock))
+	rtcan_raw_unbind(sock);
+
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+
+    rtcan_socket_cleanup(fd);
+}
+
+
+int rtcan_raw_bind(struct rtdm_fd *fd,
+		   struct sockaddr_can *scan)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    rtdm_lockctx_t lock_ctx;
+    int ret = 0;
+
+    /* Check address family and
+       check if given length of filter list is plausible */
+    if (scan->can_family != AF_CAN)
+	return -EINVAL;
+    /* Check range of ifindex, must be between 0 and RTCAN_MAX_DEVICES */
+    if (scan->can_ifindex < 0 || scan->can_ifindex > RTCAN_MAX_DEVICES)
+	return -ENODEV;
+
+    /* Get lock for reception lists */
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    if ((ret = rtcan_raw_check_filter(sock, scan->can_ifindex,
+				      sock->flist)))
+	goto out;
+    rtcan_raw_remove_filter(sock);
+    /* Add filter and mark socket as bound */
+    sock->flistlen = rtcan_raw_add_filter(sock, scan->can_ifindex);
+
+    /* Set new interface index the socket is now bound to */
+    atomic_set(&sock->ifindex, scan->can_ifindex);
+
+ out:
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+    return ret;
+}
+
+
+static int rtcan_raw_setsockopt(struct rtdm_fd *fd,
+				struct _rtdm_setsockopt_args *so)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct rtcan_filter_list *flist;
+    int ifindex = atomic_read(&sock->ifindex);
+    rtdm_lockctx_t lock_ctx;
+    can_err_mask_t err_mask;
+    int val, ret = 0;
+
+    if (so->level != SOL_CAN_RAW)
+	return -ENOPROTOOPT;
+
+    switch (so->optname) {
+
+    case CAN_RAW_FILTER:
+	if (so->optlen == 0) {
+	    flist = RTCAN_FLIST_NO_FILTER;
+	} else {
+	    int flistlen;
+	    flistlen = so->optlen / sizeof(struct can_filter);
+	    if (flistlen < 1 || flistlen > RTCAN_MAX_RECEIVERS ||
+		so->optlen % sizeof(struct can_filter) != 0)
+		return -EINVAL;
+
+	    flist = (struct rtcan_filter_list *)rtdm_malloc(so->optlen + sizeof(int));
+	    if (flist == NULL)
+		return -ENOMEM;
+	    if (rtdm_fd_is_user(fd)) {
+		if (!rtdm_read_user_ok(fd, so->optval, so->optlen) ||
+		    rtdm_copy_from_user(fd, flist->flist,
+					so->optval, so->optlen)) {
+		    rtdm_free(flist);
+		    return -EFAULT;
+		}
+	    } else
+		memcpy(flist->flist, so->optval, so->optlen);
+	    flist->flistlen = flistlen;
+	}
+
+	/* Get lock for reception lists */
+	rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+	/* Check if there is space for the filter list if already bound */
+	if (rtcan_sock_is_bound(sock)) {
+	    if (!rtcan_flist_no_filter(flist) &&
+		(ret = rtcan_raw_check_filter(sock, ifindex, flist))) {
+		rtdm_free(flist);
+		goto out_filter;
+	    }
+	    rtcan_raw_remove_filter(sock);
+	}
+
+	/* Remove previous list and attach the new one */
+	if (!rtcan_flist_no_filter(flist) && sock->flist)
+	    rtdm_free(sock->flist);
+	sock->flist = flist;
+
+	if (rtcan_sock_is_bound(sock))
+	    sock->flistlen = rtcan_raw_add_filter(sock, ifindex);
+
+    out_filter:
+	/* Release lock for reception lists */
+	rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+	break;
+
+    case CAN_RAW_ERR_FILTER:
+
+	if (so->optlen != sizeof(can_err_mask_t))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_read_user_ok(fd, so->optval, so->optlen) ||
+		rtdm_copy_from_user(fd, &err_mask, so->optval, so->optlen))
+		return -EFAULT;
+	} else
+	    memcpy(&err_mask, so->optval, so->optlen);
+
+	/* Get lock for reception lists */
+	rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+	sock->err_mask = err_mask;
+	rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+	break;
+
+    case CAN_RAW_LOOPBACK:
+
+	if (so->optlen != sizeof(int))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_read_user_ok(fd, so->optval, so->optlen) ||
+		rtdm_copy_from_user(fd, &val, so->optval, so->optlen))
+		return -EFAULT;
+	} else
+	    memcpy(&val, so->optval, so->optlen);
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+	sock->loopback = val;
+#else
+	if (val)
+	    return -EOPNOTSUPP;
+#endif
+	break;
+
+    default:
+	ret = -ENOPROTOOPT;
+    }
+
+    return ret;
+}
+
+
+int rtcan_raw_ioctl(struct rtdm_fd *fd,
+		    unsigned int request, void *arg)
+{
+    int ret = 0;
+
+    switch (request) {
+    case _RTIOC_BIND: {
+	struct _rtdm_setsockaddr_args *setaddr, setaddr_buf;
+	struct sockaddr_can *sockaddr, sockaddr_buf;
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy argument structure from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(struct _rtdm_setsockaddr_args)) ||
+		rtdm_copy_from_user(fd, &setaddr_buf, arg,
+				    sizeof(struct _rtdm_setsockaddr_args)))
+		return -EFAULT;
+
+	    setaddr = &setaddr_buf;
+
+	    /* Check size */
+	    if (setaddr->addrlen != sizeof(struct sockaddr_can))
+		return -EINVAL;
+
+	    /* Copy argument structure from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(struct sockaddr_can)) ||
+		rtdm_copy_from_user(fd, &sockaddr_buf, setaddr->addr,
+				    sizeof(struct sockaddr_can)))
+		return -EFAULT;
+	    sockaddr = &sockaddr_buf;
+	} else {
+	    setaddr = (struct _rtdm_setsockaddr_args *)arg;
+	    sockaddr = (struct sockaddr_can *)setaddr->addr;
+	}
+
+	/* Now, all required data are in kernel space */
+	ret = rtcan_raw_bind(fd, sockaddr);
+
+	break;
+    }
+
+    case _RTIOC_SETSOCKOPT: {
+	struct _rtdm_setsockopt_args *setopt;
+	struct _rtdm_setsockopt_args setopt_buf;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(struct _rtdm_setsockopt_args)) ||
+		rtdm_copy_from_user(fd, &setopt_buf, arg,
+				    sizeof(struct _rtdm_setsockopt_args)))
+		return -EFAULT;
+
+	    setopt = &setopt_buf;
+	} else
+	    setopt = (struct _rtdm_setsockopt_args *)arg;
+
+	return rtcan_raw_setsockopt(fd, setopt);
+    }
+
+    case RTCAN_RTIOC_TAKE_TIMESTAMP: {
+	struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+	long timestamp_switch = (long)arg;
+
+	if (timestamp_switch == RTCAN_TAKE_TIMESTAMPS)
+	    set_bit(RTCAN_GET_TIMESTAMP, &sock->flags);
+	else
+	    clear_bit(RTCAN_GET_TIMESTAMP, &sock->flags);
+	break;
+    }
+
+    case RTCAN_RTIOC_RCV_TIMEOUT:
+    case RTCAN_RTIOC_SND_TIMEOUT: {
+	/* Do some work these requests have in common. */
+	struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+
+	nanosecs_rel_t *timeout = (nanosecs_rel_t *)arg;
+	nanosecs_rel_t timeo_buf;
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy 64 bit timeout value from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(nanosecs_rel_t)) ||
+		rtdm_copy_from_user(fd, &timeo_buf,
+				    arg, sizeof(nanosecs_rel_t)))
+		return -EFAULT;
+
+	    timeout = &timeo_buf;
+	}
+
+	/* Now the differences begin between the requests. */
+	if (request == RTCAN_RTIOC_RCV_TIMEOUT)
+	    sock->rx_timeout = *timeout;
+	else
+	    sock->tx_timeout = *timeout;
+
+	break;
+    }
+
+    default:
+	ret = rtcan_raw_ioctl_dev(fd, request, arg);
+	break;
+    }
+
+    return ret;
+}
+
+
+#define MEMCPY_FROM_RING_BUF(to, len)					\
+do {									\
+	if (unlikely((recv_buf_index + len) > RTCAN_RXBUF_SIZE)) { 	\
+		/* Wrap around end of buffer */				\
+		first_part_size = RTCAN_RXBUF_SIZE - recv_buf_index; 	\
+		memcpy(to, &recv_buf[recv_buf_index], first_part_size);	\
+		memcpy((void *)to + first_part_size, recv_buf,		\
+		       len - first_part_size);				\
+	} else								\
+		memcpy(to, &recv_buf[recv_buf_index], len);		\
+	recv_buf_index = (recv_buf_index + len) & (RTCAN_RXBUF_SIZE - 1); \
+} while (0)
+
+ssize_t rtcan_raw_recvmsg(struct rtdm_fd *fd,
+			  struct user_msghdr *msg, int flags)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct sockaddr_can scan;
+    nanosecs_rel_t timeout;
+    struct iovec *iov = (struct iovec *)msg->msg_iov;
+    struct iovec iov_buf;
+    can_frame_t frame;
+    nanosecs_abs_t timestamp = 0;
+    unsigned char ifindex;
+    unsigned char can_dlc;
+    unsigned char *recv_buf;
+    int recv_buf_index;
+    size_t first_part_size;
+    size_t payload_size;
+    rtdm_lockctx_t lock_ctx;
+    int ret;
+
+    /* Clear frame memory location */
+    memset(&frame, 0, sizeof(can_frame_t));
+
+    /* Check flags */
+    if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
+	return -EINVAL;
+
+
+    /* Check if msghdr entries are sane */
+
+    if (msg->msg_name != NULL) {
+	if (msg->msg_namelen < sizeof(struct sockaddr_can))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_rw_user_ok(fd, msg->msg_name, msg->msg_namelen))
+		return -EFAULT;
+	}
+
+    } else {
+	if (msg->msg_namelen != 0)
+	    return -EINVAL;
+    }
+
+    /* Check msg_iovlen, only one buffer allowed */
+    if (msg->msg_iovlen != 1)
+	return -EMSGSIZE;
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy IO vector from userspace */
+	if (!rtdm_rw_user_ok(fd, msg->msg_iov,
+			     sizeof(struct iovec)) ||
+	    rtdm_copy_from_user(fd, &iov_buf, msg->msg_iov,
+				sizeof(struct iovec)))
+	    return -EFAULT;
+
+	iov = &iov_buf;
+    }
+
+    /* Check size of buffer */
+    if (iov->iov_len < sizeof(can_frame_t))
+	return -EMSGSIZE;
+
+    /* Check buffer if in user space */
+    if (rtdm_fd_is_user(fd)) {
+	if (!rtdm_rw_user_ok(fd, iov->iov_base, iov->iov_len))
+	    return -EFAULT;
+    }
+
+    if (msg->msg_control != NULL) {
+	if (msg->msg_controllen < sizeof(nanosecs_abs_t))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_rw_user_ok(fd, msg->msg_control,
+				 msg->msg_controllen))
+		return -EFAULT;
+	}
+
+    } else {
+	if (msg->msg_controllen != 0)
+	    return -EINVAL;
+    }
+
+    rtcan_raw_enable_bus_err(sock);
+
+    /* Set RX timeout */
+    timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sock->rx_timeout;
+
+    /* Fetch message (ok, try it ...) */
+    ret = rtdm_sem_timeddown(&sock->recv_sem, timeout, NULL);
+
+    /* Error code returned? */
+    if (unlikely(ret)) {
+	/* Which error code? */
+
+	if (ret == -EIDRM)
+	    /* Socket was closed */
+	    return -EBADF;
+
+	else if (ret == -EWOULDBLOCK)
+	    /* We would block but don't want to */
+	    return -EAGAIN;
+
+	else
+	    /* Return all other error codes unmodified. */
+	    return ret;
+    }
+
+
+    /* OK, we've got mail. */
+
+    rtdm_lock_get_irqsave(&rtcan_socket_lock, lock_ctx);
+
+
+    /* Construct a struct can_frame with data from socket's ring buffer */
+    recv_buf_index = sock->recv_head;
+    recv_buf = sock->recv_buf;
+
+
+    /* Begin with CAN ID */
+    MEMCPY_FROM_RING_BUF(&frame.can_id, sizeof(uint32_t));
+
+
+    /* Fetch interface index */
+    ifindex = recv_buf[recv_buf_index];
+    recv_buf_index = (recv_buf_index + 1) & (RTCAN_RXBUF_SIZE - 1);
+
+
+    /* Fetch DLC (with indicator if a timestamp exists) */
+    can_dlc = recv_buf[recv_buf_index];
+    recv_buf_index = (recv_buf_index + 1) & (RTCAN_RXBUF_SIZE - 1);
+
+    frame.can_dlc = can_dlc & RTCAN_HAS_NO_TIMESTAMP;
+    payload_size = (frame.can_dlc > 8) ? 8 : frame.can_dlc;
+
+
+    /* If frame is an RTR or one with no payload it's not necessary
+     * to copy the data bytes. */
+    if (!(frame.can_id & CAN_RTR_FLAG) && payload_size)
+	/* Copy data bytes */
+	MEMCPY_FROM_RING_BUF(frame.data, payload_size);
+
+    /* Is a timestamp available and is the caller actually interested? */
+    if (msg->msg_controllen && (can_dlc & RTCAN_HAS_TIMESTAMP))
+	/* Copy timestamp */
+	MEMCPY_FROM_RING_BUF(&timestamp, RTCAN_TIMESTAMP_SIZE);
+
+    /* Message completely read from the socket's ring buffer. Now check if
+     * caller is just peeking. */
+    if (flags & MSG_PEEK)
+	/* Next one, please! */
+	rtdm_sem_up(&sock->recv_sem);
+    else
+	/* Adjust begin of first message in the ring buffer. */
+	sock->recv_head = recv_buf_index;
+
+
+    /* Release lock */
+    rtdm_lock_put_irqrestore(&rtcan_socket_lock, lock_ctx);
+
+
+    /* Create CAN socket address to give back */
+    if (msg->msg_namelen) {
+	scan.can_family = AF_CAN;
+	scan.can_ifindex = ifindex;
+    }
+
+
+    /* Last duty: Copy all back to the caller's buffers. */
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy to user space */
+
+	/* Copy socket address */
+	if (msg->msg_namelen) {
+	    if (rtdm_copy_to_user(fd, msg->msg_name, &scan,
+				  sizeof(struct sockaddr_can)))
+		return -EFAULT;
+
+	    msg->msg_namelen = sizeof(struct sockaddr_can);
+	}
+
+	/* Copy CAN frame */
+	if (rtdm_copy_to_user(fd, iov->iov_base, &frame,
+			      sizeof(can_frame_t)))
+	    return -EFAULT;
+	/* Adjust iovec in the common way */
+	iov->iov_base += sizeof(can_frame_t);
+	iov->iov_len -= sizeof(can_frame_t);
+	/* ... and copy it, too. */
+	if (rtdm_copy_to_user(fd, msg->msg_iov, iov,
+			      sizeof(struct iovec)))
+	    return -EFAULT;
+
+	/* Copy timestamp if existent and wanted */
+	if (msg->msg_controllen) {
+	    if (can_dlc & RTCAN_HAS_TIMESTAMP) {
+		if (rtdm_copy_to_user(fd, msg->msg_control,
+				      &timestamp, RTCAN_TIMESTAMP_SIZE))
+		    return -EFAULT;
+
+		msg->msg_controllen = RTCAN_TIMESTAMP_SIZE;
+	    } else
+		msg->msg_controllen = 0;
+	}
+
+    } else {
+	/* Kernel space */
+
+	/* Copy socket address */
+	if (msg->msg_namelen) {
+	    memcpy(msg->msg_name, &scan, sizeof(struct sockaddr_can));
+	    msg->msg_namelen = sizeof(struct sockaddr_can);
+	}
+
+	/* Copy CAN frame */
+	memcpy(iov->iov_base, &frame, sizeof(can_frame_t));
+	/* Adjust iovec in the common way */
+	iov->iov_base += sizeof(can_frame_t);
+	iov->iov_len -= sizeof(can_frame_t);
+
+	/* Copy timestamp if existent and wanted */
+	if (msg->msg_controllen) {
+	    if (can_dlc & RTCAN_HAS_TIMESTAMP) {
+		memcpy(msg->msg_control, &timestamp, RTCAN_TIMESTAMP_SIZE);
+		msg->msg_controllen = RTCAN_TIMESTAMP_SIZE;
+	    } else
+		msg->msg_controllen = 0;
+	}
+    }
+
+
+    return sizeof(can_frame_t);
+}
+
+
+ssize_t rtcan_raw_sendmsg(struct rtdm_fd *fd,
+			  const struct user_msghdr *msg, int flags)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct sockaddr_can *scan = (struct sockaddr_can *)msg->msg_name;
+    struct sockaddr_can scan_buf;
+    struct iovec *iov = (struct iovec *)msg->msg_iov;
+    struct iovec iov_buf;
+    can_frame_t *frame;
+    can_frame_t frame_buf;
+    rtdm_lockctx_t lock_ctx;
+    nanosecs_rel_t timeout = 0;
+    struct tx_wait_queue tx_wait;
+    struct rtcan_device *dev;
+    int ifindex = 0;
+    int ret  = 0;
+    spl_t s;
+
+
+    if (flags & MSG_OOB)   /* Mirror BSD error message compatibility */
+	return -EOPNOTSUPP;
+
+    /* Only MSG_DONTWAIT is a valid flag. */
+    if (flags & ~MSG_DONTWAIT)
+	return -EINVAL;
+
+    /* Check msg_iovlen, only one buffer allowed */
+    if (msg->msg_iovlen != 1)
+	return -EMSGSIZE;
+
+    if (scan == NULL) {
+	/* No socket address. Will use bound interface for sending */
+
+	if (msg->msg_namelen != 0)
+	    return -EINVAL;
+
+
+	/* We only want a consistent value here, a spin lock would be
+	 * overkill. Nevertheless, the binding could change till we have
+	 * the chance to send. Blame the user, though. */
+	ifindex = atomic_read(&sock->ifindex);
+
+	if (!ifindex)
+	    /* Socket isn't bound or bound to all interfaces. Go out. */
+	    return -ENXIO;
+    } else {
+	/* Socket address given */
+	if (msg->msg_namelen < sizeof(struct sockaddr_can))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy socket address from userspace */
+	    if (!rtdm_read_user_ok(fd, msg->msg_name,
+				   sizeof(struct sockaddr_can)) ||
+		rtdm_copy_from_user(fd, &scan_buf, msg->msg_name,
+				    sizeof(struct sockaddr_can)))
+		return -EFAULT;
+
+	    scan = &scan_buf;
+	}
+
+	/* Check address family */
+	if (scan->can_family != AF_CAN)
+	    return -EINVAL;
+
+	ifindex = scan->can_ifindex;
+    }
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy IO vector from userspace */
+	if (!rtdm_rw_user_ok(fd, msg->msg_iov,
+			     sizeof(struct iovec)) ||
+	    rtdm_copy_from_user(fd, &iov_buf, msg->msg_iov,
+				sizeof(struct iovec)))
+	    return -EFAULT;
+
+	iov = &iov_buf;
+    }
+
+    /* Check size of buffer */
+    if (iov->iov_len != sizeof(can_frame_t))
+	return -EMSGSIZE;
+
+    frame = (can_frame_t *)iov->iov_base;
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy CAN frame from userspace */
+	if (!rtdm_read_user_ok(fd, iov->iov_base,
+			       sizeof(can_frame_t)) ||
+	    rtdm_copy_from_user(fd, &frame_buf, iov->iov_base,
+				sizeof(can_frame_t)))
+	    return -EFAULT;
+
+	frame = &frame_buf;
+    }
+
+    /* Adjust iovec in the common way */
+    iov->iov_base += sizeof(can_frame_t);
+    iov->iov_len -= sizeof(can_frame_t);
+    /* ... and copy it back to userspace if necessary */
+    if (rtdm_fd_is_user(fd)) {
+	if (rtdm_copy_to_user(fd, msg->msg_iov, iov,
+			      sizeof(struct iovec)))
+	    return -EFAULT;
+    }
+
+    /* At last, we've got the frame ... */
+
+    /* Check if DLC between 0 and 15 */
+    if (frame->can_dlc > 15)
+	return -EINVAL;
+
+    /* Check if it is a standard frame and the ID between 0 and 2031 */
+    if (!(frame->can_id & CAN_EFF_FLAG)) {
+	u32 id = frame->can_id & CAN_EFF_MASK;
+	if (id > (CAN_SFF_MASK - 16))
+	    return -EINVAL;
+    }
+
+    if ((dev = rtcan_dev_get_by_index(ifindex)) == NULL)
+	return -ENXIO;
+
+    timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sock->tx_timeout;
+
+    tx_wait.rt_task = rtdm_task_current();
+
+    /* Register the task at the socket's TX wait queue and decrement
+     * the TX semaphore. This must be atomic. Finally, the task must
+     * be deregistered again (also atomic). */
+    cobalt_atomic_enter(s);
+
+    list_add(&tx_wait.tx_wait_list, &sock->tx_wait_head);
+
+    /* Try to pass the guard in order to access the controller */
+    ret = rtdm_sem_timeddown(&dev->tx_sem, timeout, NULL);
+
+    /* Only dequeue task again if socket isn't being closed i.e. if
+     * this task was not unblocked within the close() function. */
+    if (likely(!list_empty(&tx_wait.tx_wait_list)))
+	/* Dequeue this task from the TX wait queue */
+	list_del_init(&tx_wait.tx_wait_list);
+    else
+	/* The socket was closed. */
+	ret = -EBADF;
+
+    cobalt_atomic_leave(s);
+
+    /* Error code returned? */
+    if (ret != 0) {
+	/* Which error code? */
+	switch (ret) {
+	case -EIDRM:
+	    /* Controller is stopped or bus-off */
+	    ret = -ENETDOWN;
+	    goto send_out1;
+
+	case -EWOULDBLOCK:
+	    /* We would block but don't want to */
+	    ret = -EAGAIN;
+	    goto send_out1;
+
+	default:
+	    /* Return all other error codes unmodified. */
+	    goto send_out1;
+	}
+    }
+
+    /* We got access */
+
+
+    /* Push message onto stack for loopback when TX done */
+    if (rtcan_loopback_enabled(sock))
+	rtcan_tx_push(dev, sock, frame);
+
+    rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+
+    /* Controller should be operating */
+    if (!CAN_STATE_OPERATING(dev->state)) {
+	if (dev->state == CAN_STATE_SLEEPING) {
+	    ret = -ECOMM;
+	    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+	    rtdm_sem_up(&dev->tx_sem);
+	    goto send_out1;
+	}
+	ret = -ENETDOWN;
+	goto send_out2;
+    }
+
+    dev->tx_count++;
+    ret = dev->hard_start_xmit(dev, frame);
+
+    /* Return number of bytes sent upon successful completion */
+    if (ret == 0)
+	ret = sizeof(can_frame_t);
+
+ send_out2:
+    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+ send_out1:
+    rtcan_dev_dereference(dev);
+    return ret;
+}
+
+
+static struct rtdm_driver rtcan_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtcan,
+						    RTDM_CLASS_CAN,
+						    RTDM_SUBCLASS_GENERIC,
+						    RTCAN_PROFILE_VER),
+	.device_flags		= RTDM_PROTOCOL_DEVICE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rtcan_socket),
+	.protocol_family	= PF_CAN,
+	.socket_type		= SOCK_RAW,
+	.ops = {
+		.socket		= rtcan_raw_socket,
+		.close		= rtcan_raw_close,
+		.ioctl_nrt	= rtcan_raw_ioctl,
+		.recvmsg_rt	= rtcan_raw_recvmsg,
+		.sendmsg_rt	= rtcan_raw_sendmsg,
+	},
+};
+
+static struct rtdm_device rtcan_device = {
+	.driver = &rtcan_driver,
+	.label = "rtcan",
+};
+
+int __init rtcan_raw_proto_register(void)
+{
+    return rtdm_dev_register(&rtcan_device);
+}
+
+void __exit rtcan_raw_proto_unregister(void)
+{
+    rtdm_dev_unregister(&rtcan_device);
+}
+
+
+EXPORT_SYMBOL_GPL(rtcan_rcv);
+++ linux-patched/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h	2022-03-21 12:58:29.220890609 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/peak_canfd/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CAN driver for PEAK System micro-CAN based adapters.
+ *
+ * Copyright (C) 2001-2021 PEAK System-Technik GmbH
+ * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com>
+ */
+#ifndef PEAK_CANFD_USER_H
+#define PEAK_CANFD_USER_H
+
+#include <linux/can/dev/peak_canfd.h>
+
+#define CAN_MAX_DLC		8
+#define get_can_dlc(i)		(min_t(__u8, (i), CAN_MAX_DLC))
+
+struct peak_berr_counter {
+	__u16 txerr;
+	__u16 rxerr;
+};
+
+/* data structure private to each uCAN interface */
+struct peak_canfd_priv {
+	struct rtcan_device *rdev;	/* RTCAN device */
+	int index;			/* channel index */
+
+	struct peak_berr_counter bec;
+
+	int cmd_len;
+	void *cmd_buffer;
+	int cmd_maxlen;
+
+	int (*pre_cmd)(struct peak_canfd_priv *priv);
+	int (*write_cmd)(struct peak_canfd_priv *priv);
+	int (*post_cmd)(struct peak_canfd_priv *priv);
+
+	int (*enable_tx_path)(struct peak_canfd_priv *priv);
+	void *(*alloc_tx_msg)(struct peak_canfd_priv *priv, u16 msg_size,
+			      int *room_left);
+	int (*write_tx_msg)(struct peak_canfd_priv *priv,
+			    struct pucan_tx_msg *msg);
+};
+
+struct rtcan_device *alloc_peak_canfd_dev(int sizeof_priv, int index);
+void rtcan_peak_pciefd_remove_proc(struct rtcan_device *rdev);
+int rtcan_peak_pciefd_create_proc(struct rtcan_device *rdev);
+
+int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
+			  struct pucan_rx_msg *msg);
+int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
+				struct pucan_rx_msg *rx_msg, int rx_count);
+#endif
+++ linux-patched/drivers/xenomai/can/peak_canfd/Kconfig	2022-03-21 12:58:29.217890639 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:3 @
--- linux/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c	1970-01-01 01:00:00.000000000 +0100
+config XENO_DRIVERS_CAN_PEAK_CANFD
+	depends on XENO_DRIVERS_CAN && PCI && !XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	tristate "PEAK driver for PCAN-PCIe FD family"
+	help
+
+	This driver supports the PCAN-PCIe FD boards family from PEAK-System.
+++ linux-patched/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c	2022-03-21 12:58:29.213890678 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/peak_canfd/Makefile	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CANFD firmware interface.
+ *
+ * Copyright (C) 2001-2021 PEAK System-Technik GmbH
+ * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com>
+ */
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_peak_canfd_user.h"
+
+#define DRV_NAME		"xeno_peak_canfd"
+
+#define RTCAN_DEV_NAME		"rtcan%d"
+#define RTCAN_CTRLR_NAME	"peak_canfd"
+
+/* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
+static const struct can_bittiming_const peak_canfd_nominal_const = {
+	.name = RTCAN_CTRLR_NAME,
+	.tseg1_min = 1,
+	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+	.tseg2_min = 1,
+	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+	.brp_min = 1,
+	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+	.brp_inc = 1,
+};
+
+/* initialize the command area */
+static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
+{
+	priv->cmd_len = 0;
+	return priv;
+}
+
+/* add command 'cmd_op' to the command area */
+static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
+{
+	struct pucan_command *cmd;
+
+	if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
+		return NULL;
+
+	cmd = priv->cmd_buffer + priv->cmd_len;
+
+	/* reset all unused bit to default */
+	memset(cmd, 0, sizeof(*cmd));
+
+	cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
+	priv->cmd_len += sizeof(*cmd);
+
+	return cmd;
+}
+
+/* send the command(s) to the IP core through the host-device interface */
+static int pucan_write_cmd(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	/* prepare environment before writing the command */
+	if (priv->pre_cmd) {
+		err = priv->pre_cmd(priv);
+		if (err)
+			return err;
+	}
+
+	err = priv->write_cmd(priv);
+	if (err)
+		return err;
+
+	/* update environment after writing the command */
+	if (priv->post_cmd)
+		err = priv->post_cmd(priv);
+
+	return err;
+}
+
+/* set the device in RESET mode */
+static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
+	err = pucan_write_cmd(priv);
+	if (!err)
+		priv->rdev->state = CAN_STATE_STOPPED;
+
+	return err;
+}
+
+/* set the device in NORMAL mode */
+static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
+	err = pucan_write_cmd(priv);
+	if (!err)
+		priv->rdev->state = CAN_STATE_ERROR_ACTIVE;
+
+	return err;
+}
+
+/* set the device in LISTEN_ONLY mode */
+static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
+	err = pucan_write_cmd(priv);
+	if (!err)
+		priv->rdev->state = CAN_STATE_ERROR_ACTIVE;
+
+	return err;
+}
+
+/* set acceptance filters */
+static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
+{
+	struct pucan_std_filter *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
+
+	/* All the 11-bit CAN ID values are represented by one bit in a
+	 * 64 rows array of 32 columns: the upper 6 bit of the CAN ID select
+	 * the row while the lowest 5 bit select the column in that row.
+	 *
+	 * bit  filter
+	 * 1    passed
+	 * 0    discarded
+	 */
+
+	/* select the row */
+	cmd->idx = row;
+
+	/* set/unset bits in the row */
+	cmd->mask = cpu_to_le32(mask);
+
+	return pucan_write_cmd(priv);
+}
+
+/* request the device to stop transmission */
+static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
+{
+	struct pucan_tx_abort *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
+
+	cmd->flags = cpu_to_le16(flags);
+
+	return pucan_write_cmd(priv);
+}
+
+/* request the device to clear rx/tx error counters */
+static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
+{
+	struct pucan_wr_err_cnt *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
+
+	cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
+
+	/* write the counters new value */
+	cmd->tx_counter = 0;
+	cmd->rx_counter = 0;
+
+	return pucan_write_cmd(priv);
+}
+
+/* set options to the device */
+static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
+{
+	struct pucan_options *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
+
+	cmd->options = cpu_to_le16(opt_mask);
+
+	return pucan_write_cmd(priv);
+}
+
+/* request the device to notify the driver when Tx path is ready */
+static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
+{
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
+
+	return pucan_write_cmd(priv);
+}
+
+/* handle the reception of one CAN frame */
+static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
+			       struct pucan_rx_msg *msg)
+{
+	struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, };
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	struct rtcan_device *rdev = priv->rdev;
+	const u16 rx_msg_flags = le16_to_cpu(msg->flags);
+
+	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+		/* CAN-FD frames are silently discarded */
+		return 0;
+	}
+
+	cf->can_id = le32_to_cpu(msg->can_id);
+	cf->can_dlc = get_can_dlc(pucan_msg_get_dlc(msg));
+
+	if (rx_msg_flags & PUCAN_MSG_EXT_ID)
+		cf->can_id |= CAN_EFF_FLAG;
+
+	if (rx_msg_flags & PUCAN_MSG_RTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	else {
+		memcpy(cf->data, msg->d, cf->can_dlc);
+		skb.rb_frame_size += cf->can_dlc;
+	}
+
+	cf->can_ifindex = rdev->ifindex;
+
+	/* Pass received frame out to the sockets */
+	rtcan_rcv(rdev, &skb);
+
+	return 0;
+}
+
+/* handle rx/tx error counters notification */
+static int pucan_handle_error(struct peak_canfd_priv *priv,
+			      struct pucan_error_msg *msg)
+{
+	priv->bec.txerr = msg->tx_err_cnt;
+	priv->bec.rxerr = msg->rx_err_cnt;
+
+	return 0;
+}
+
+/* handle status notification */
+static int pucan_handle_status(struct peak_canfd_priv *priv,
+			       struct pucan_status_msg *msg)
+{
+	struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, };
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	struct rtcan_device *rdev = priv->rdev;
+
+	/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
+	if (pucan_status_is_rx_barrier(msg)) {
+		if (priv->enable_tx_path) {
+			int err = priv->enable_tx_path(priv);
+
+			if (err)
+				return err;
+		}
+
+		/* unlock senders */
+		rtdm_sem_up(&rdev->tx_sem);
+		return 0;
+	}
+
+	/* otherwise, it's a BUS status */
+	cf->can_id = CAN_ERR_FLAG;
+	cf->can_dlc = CAN_ERR_DLC;
+
+	/* test state error bits according to their priority */
+	if (pucan_status_is_busoff(msg)) {
+		rtdm_printk(DRV_NAME " CAN%u: Bus-off entry status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_BUS_OFF;
+		cf->can_id |= CAN_ERR_BUSOFF;
+
+		/* wakeup waiting senders */
+		rtdm_sem_destroy(&rdev->tx_sem);
+
+	} else if (pucan_status_is_passive(msg)) {
+		rtdm_printk(DRV_NAME " CAN%u: Error passive status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_ERROR_PASSIVE;
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
+					CAN_ERR_CRTL_TX_PASSIVE :
+					CAN_ERR_CRTL_RX_PASSIVE;
+		cf->data[6] = priv->bec.txerr;
+		cf->data[7] = priv->bec.rxerr;
+
+	} else if (pucan_status_is_warning(msg)) {
+		rtdm_printk(DRV_NAME " CAN%u: Error warning status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_ERROR_WARNING;
+
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
+					CAN_ERR_CRTL_TX_WARNING :
+					CAN_ERR_CRTL_RX_WARNING;
+		cf->data[6] = priv->bec.txerr;
+		cf->data[7] = priv->bec.rxerr;
+
+	} else if (rdev->state != CAN_STATE_ERROR_ACTIVE) {
+		/* back to ERROR_ACTIVE */
+		rtdm_printk(DRV_NAME " CAN%u: Error active status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_ERROR_ACTIVE;
+	}
+
+	skb.rb_frame_size += cf->can_dlc;
+	cf->can_ifindex = rdev->ifindex;
+
+	/* Pass received frame out to the sockets */
+	rtcan_rcv(rdev, &skb);
+
+	return 0;
+}
+
+/* handle IP core Rx overflow notification */
+static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
+{
+	struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, };
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	struct rtcan_device *rdev = priv->rdev;
+
+	cf->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+	cf->can_dlc = CAN_ERR_DLC;
+
+	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+	cf->data[6] = priv->bec.txerr;
+	cf->data[7] = priv->bec.rxerr;
+
+	skb.rb_frame_size += cf->can_dlc;
+	cf->can_ifindex = rdev->ifindex;
+
+	/* Pass received frame out to the sockets */
+	rtcan_rcv(rdev, &skb);
+
+	return 0;
+}
+
+/* handle a single uCAN message */
+int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
+			  struct pucan_rx_msg *msg)
+{
+	u16 msg_type = le16_to_cpu(msg->type);
+	int msg_size = le16_to_cpu(msg->size);
+	int err;
+
+	if (!msg_size || !msg_type) {
+		/* null packet found: end of list */
+		goto exit;
+	}
+
+	switch (msg_type) {
+	case PUCAN_MSG_CAN_RX:
+		err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
+		break;
+	case PUCAN_MSG_ERROR:
+		err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
+		break;
+	case PUCAN_MSG_STATUS:
+		err = pucan_handle_status(priv,
+					  (struct pucan_status_msg *)msg);
+		break;
+	case PUCAN_MSG_CACHE_CRITICAL:
+		err = pucan_handle_cache_critical(priv);
+		break;
+	default:
+		err = 0;
+	}
+
+	if (err < 0)
+		return err;
+
+exit:
+	return msg_size;
+}
+
+/* handle a list of rx_count messages from rx_msg memory address */
+int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
+				struct pucan_rx_msg *msg_list, int msg_count)
+{
+	void *msg_ptr = msg_list;
+	int i, msg_size = 0;
+
+	for (i = 0; i < msg_count; i++) {
+		msg_size = peak_canfd_handle_msg(priv, msg_ptr);
+
+		/* a null packet can be found at the end of a list */
+		if (msg_size <= 0)
+			break;
+
+		msg_ptr += ALIGN(msg_size, 4);
+	}
+
+	if (msg_size < 0)
+		return msg_size;
+
+	return i;
+}
+
+/* start the device (set the IP core in NORMAL or LISTEN-ONLY mode) */
+static int peak_canfd_start(struct rtcan_device *rdev,
+			    rtdm_lockctx_t *lock_ctx)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	int i, err = 0;
+
+	switch (rdev->state) {
+	case CAN_STATE_BUS_OFF:
+	case CAN_STATE_STOPPED:
+		err = pucan_set_reset_mode(priv);
+		if (err)
+			break;
+
+		/* set ineeded option: get rx/tx error counters */
+		err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
+		if (err)
+			break;
+
+		/* accept all standard CAN ID */
+		for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
+			pucan_set_std_filter(priv, i, 0xffffffff);
+
+		/* clear device rx/tx error counters */
+		err = pucan_clr_err_counters(priv);
+		if (err)
+			break;
+
+		/* set resquested mode */
+		if (priv->rdev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)
+			err = pucan_set_listen_only_mode(priv);
+		else
+			err = pucan_set_normal_mode(priv);
+
+		rtdm_sem_init(&rdev->tx_sem, 1);
+
+		/* receiving the RB status says when Tx path is ready */
+		err = pucan_setup_rx_barrier(priv);
+		break;
+
+	default:
+		break;
+	}
+
+	return err;
+}
+
+/* stop the device (set the IP core in RESET mode) */
+static int peak_canfd_stop(struct rtcan_device *rdev,
+			   rtdm_lockctx_t *lock_ctx)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	int err = 0;
+
+	switch (rdev->state) {
+	case CAN_STATE_BUS_OFF:
+	case CAN_STATE_STOPPED:
+		break;
+
+	default:
+		/* go back to RESET mode */
+		err = pucan_set_reset_mode(priv);
+		if (err) {
+			rtdm_printk(DRV_NAME " CAN%u: reset failed\n",
+				    priv->index+1);
+			break;
+		}
+
+		/* abort last Tx (MUST be done in RESET mode only!) */
+		pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
+
+		rtdm_sem_destroy(&rdev->tx_sem);
+		break;
+	}
+
+	return err;
+}
+
+/* RT-Socket-CAN driver interface */
+static int peak_canfd_set_mode(struct rtcan_device *rdev, can_mode_t mode,
+			       rtdm_lockctx_t *lock_ctx)
+{
+	int err = 0;
+
+	switch (mode) {
+	case CAN_MODE_STOP:
+		err = peak_canfd_stop(rdev, lock_ctx);
+		break;
+	case CAN_MODE_START:
+		err = peak_canfd_start(rdev, lock_ctx);
+		break;
+	case CAN_MODE_SLEEP:
+		/* Controller must operate, otherwise go out */
+		if (!CAN_STATE_OPERATING(rdev->state)) {
+			err = -ENETDOWN;
+			break;
+		}
+		if (rdev->state == CAN_STATE_SLEEPING)
+			break;
+
+		fallthrough;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int peak_canfd_set_bittiming(struct rtcan_device *rdev,
+				    struct can_bittime *pbt,
+				    rtdm_lockctx_t *lock_ctx)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	struct pucan_timing_slow *cmd;
+
+	/* can't support BTR0BTR1 mode with clock greater than 8 MHz */
+	if (pbt->type != CAN_BITTIME_STD) {
+		rtdm_printk(DRV_NAME
+			    " CAN%u: unsupported bittiming mode %u\n",
+			    priv->index+1, pbt->type);
+		return -EINVAL;
+	}
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
+
+	cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->std.sjw - 1,
+				       priv->rdev->ctrl_mode &
+						CAN_CTRLMODE_3_SAMPLES);
+
+	cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->std.prop_seg +
+				       pbt->std.phase_seg1 - 1);
+	cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->std.phase_seg2 - 1);
+	cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->std.brp - 1));
+
+	cmd->ewl = 96;	/* default */
+
+	rtdm_printk(DRV_NAME ": nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
+		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
+
+	return pucan_write_cmd(priv);
+}
+
+/* hard transmit callback: write the CAN frame to the device */
+static netdev_tx_t peak_canfd_start_xmit(struct rtcan_device *rdev,
+					 can_frame_t *cf)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	struct pucan_tx_msg *msg;
+	u16 msg_size, msg_flags;
+	int room_left;
+	const u8 dlc = (cf->can_dlc > CAN_MAX_DLC) ? CAN_MAX_DLC : cf->can_dlc;
+
+	msg_size = ALIGN(sizeof(*msg) + dlc, 4);
+	msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
+
+	/* should never happen except under bus-off condition and
+	 * (auto-)restart mechanism
+	 */
+	if (!msg) {
+		rtdm_printk(DRV_NAME
+			    " CAN%u: skb lost (No room left in tx buffer)\n",
+			    priv->index+1);
+		return 0;
+	}
+
+	msg->size = cpu_to_le16(msg_size);
+	msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+	msg_flags = 0;
+	if (cf->can_id & CAN_EFF_FLAG) {
+		msg_flags |= PUCAN_MSG_EXT_ID;
+		msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
+	} else {
+		msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
+	}
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		msg_flags |= PUCAN_MSG_RTR;
+
+	/* set driver specific bit to differentiate with application
+	 * loopback
+	 */
+	if (rdev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+		msg_flags |= PUCAN_MSG_LOOPED_BACK;
+
+	msg->flags = cpu_to_le16(msg_flags);
+	msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, dlc);
+	memcpy(msg->d, cf->data, dlc);
+
+	/* write the skb on the interface */
+	priv->write_tx_msg(priv, msg);
+
+	/* control senders flow */
+	if (room_left > (sizeof(*msg) + CAN_MAX_DLC))
+		rtdm_sem_up(&rdev->tx_sem);
+
+	return 0;
+}
+
+/* allocate a rtcan device for channel #index, with enough space to store
+ * private information.
+ */
+struct rtcan_device *alloc_peak_canfd_dev(int sizeof_priv, int index)
+{
+	struct rtcan_device *rdev;
+	struct peak_canfd_priv *priv;
+
+	/* allocate the candev object */
+	rdev = rtcan_dev_alloc(sizeof_priv, 0);
+	if (!rdev)
+		return NULL;
+
+	/* RTCAN part initialization */
+	strncpy(rdev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+	rdev->ctrl_name = RTCAN_CTRLR_NAME;
+	rdev->can_sys_clock = 80*1000*1000;	/* default */
+	rdev->state = CAN_STATE_STOPPED;
+	rdev->hard_start_xmit = peak_canfd_start_xmit;
+	rdev->do_set_mode = peak_canfd_set_mode;
+	rdev->do_set_bit_time = peak_canfd_set_bittiming;
+	rdev->bittiming_const = &peak_canfd_nominal_const;
+
+	priv = rdev->priv;
+
+	/* private part initialization */
+	priv->rdev = rdev;
+	priv->index = index;
+	priv->cmd_len = 0;
+	priv->bec.txerr = 0;
+	priv->bec.rxerr = 0;
+
+	return rdev;
+}
+++ linux-patched/drivers/xenomai/can/peak_canfd/Makefile	2022-03-21 12:58:29.209890717 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c	1970-01-01 01:00:00.000000000 +0100
+#
+# Makefile for the PEAK-System CAN-FD IP module drivers
+#
+ccflags-y += -I$(srctree)/drivers/xenomai/can
+
+obj-$(CONFIG_XENO_DRIVERS_CAN_PEAK_CANFD) += xeno_can_peak_pciefd.o
+
+xeno_can_peak_pciefd-y := rtcan_peak_pciefd.o rtcan_peak_canfd.o
+++ linux-patched/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c	2022-03-21 12:58:29.206890746 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_module.c	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CAN driver PCI interface.
+ *
+ * Copyright (C) 2001-2021 PEAK System-Technik GmbH
+ * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_peak_canfd_user.h"
+
+#ifdef CONFIG_PCI_MSI
+#define PCIEFD_USES_MSI
+#endif
+
+#ifndef struct_size
+#define struct_size(p, member, n)	((n)*sizeof(*(p)->member) + \
+					 sizeof(*(p)))
+#endif
+
+#define DRV_NAME			"xeno_peak_pciefd"
+
+static char *pciefd_board_name = "PEAK-PCIe FD";
+
+MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_DESCRIPTION("RTCAN driver for PEAK PCAN PCIe/M.2 FD family cards");
+MODULE_LICENSE("GPL v2");
+
+#define PEAK_PCI_VENDOR_ID	0x001c	/* The PCI device and vendor IDs */
+#define PEAK_PCIEFD_ID		0x0013	/* for PCIe slot cards */
+#define PCAN_CPCIEFD_ID		0x0014	/* for Compact-PCI Serial slot cards */
+#define PCAN_PCIE104FD_ID	0x0017	/* for PCIe-104 Express slot cards */
+#define PCAN_MINIPCIEFD_ID	0x0018	/* for mini-PCIe slot cards */
+#define PCAN_PCIEFD_OEM_ID	0x0019	/* for PCIe slot OEM cards */
+#define PCAN_M2_ID		0x001a	/* for M2 slot cards */
+
+/* supported device ids. */
+static const struct pci_device_id peak_pciefd_tbl[] = {
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl);
+
+/* PEAK PCIe board access description */
+#define PCIEFD_BAR0_SIZE		(64 * 1024)
+#define PCIEFD_RX_DMA_SIZE		(4 * 1024)
+#define PCIEFD_TX_DMA_SIZE		(4 * 1024)
+
+#define PCIEFD_TX_PAGE_SIZE		(2 * 1024)
+
+/* System Control Registers */
+#define PCIEFD_REG_SYS_CTL_SET		0x0000	/* set bits */
+#define PCIEFD_REG_SYS_CTL_CLR		0x0004	/* clear bits */
+
+/* Version info registers */
+#define PCIEFD_REG_SYS_VER1		0x0040	/* version reg #1 */
+#define PCIEFD_REG_SYS_VER2		0x0044	/* version reg #2 */
+
+#define PCIEFD_FW_VERSION(x, y, z)	(((u32)(x) << 24) | \
+					 ((u32)(y) << 16) | \
+					 ((u32)(z) << 8))
+
+/* System Control Registers Bits */
+#define PCIEFD_SYS_CTL_TS_RST		0x00000001	/* timestamp clock */
+#define PCIEFD_SYS_CTL_CLK_EN		0x00000002	/* system clock */
+
+/* CAN-FD channel addresses */
+#define PCIEFD_CANX_OFF(c)		(((c) + 1) * 0x1000)
+
+#define PCIEFD_ECHO_SKB_MAX		PCANFD_ECHO_SKB_DEF
+
+/* CAN-FD channel registers */
+#define PCIEFD_REG_CAN_MISC		0x0000	/* Misc. control */
+#define PCIEFD_REG_CAN_CLK_SEL		0x0008	/* Clock selector */
+#define PCIEFD_REG_CAN_CMD_PORT_L	0x0010	/* 64-bits command port */
+#define PCIEFD_REG_CAN_CMD_PORT_H	0x0014
+#define PCIEFD_REG_CAN_TX_REQ_ACC	0x0020	/* Tx request accumulator */
+#define PCIEFD_REG_CAN_TX_CTL_SET	0x0030	/* Tx control set register */
+#define PCIEFD_REG_CAN_TX_CTL_CLR	0x0038	/* Tx control clear register */
+#define PCIEFD_REG_CAN_TX_DMA_ADDR_L	0x0040	/* 64-bits addr for Tx DMA */
+#define PCIEFD_REG_CAN_TX_DMA_ADDR_H	0x0044
+#define PCIEFD_REG_CAN_RX_CTL_SET	0x0050	/* Rx control set register */
+#define PCIEFD_REG_CAN_RX_CTL_CLR	0x0058	/* Rx control clear register */
+#define PCIEFD_REG_CAN_RX_CTL_WRT	0x0060	/* Rx control write register */
+#define PCIEFD_REG_CAN_RX_CTL_ACK	0x0068	/* Rx control ACK register */
+#define PCIEFD_REG_CAN_RX_DMA_ADDR_L	0x0070	/* 64-bits addr for Rx DMA */
+#define PCIEFD_REG_CAN_RX_DMA_ADDR_H	0x0074
+
+/* CAN-FD channel misc register bits */
+#define CANFD_MISC_TS_RST		0x00000001	/* timestamp cnt rst */
+
+/* CAN-FD channel Clock SELector Source & DIVider */
+#define CANFD_CLK_SEL_DIV_MASK		0x00000007
+#define CANFD_CLK_SEL_DIV_60MHZ		0x00000000	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_40MHZ		0x00000001	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_30MHZ		0x00000002	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_24MHZ		0x00000003	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_20MHZ		0x00000004	/* SRC=240MHz only */
+
+#define CANFD_CLK_SEL_SRC_MASK		0x00000008	/* 0=80MHz, 1=240MHz */
+#define CANFD_CLK_SEL_SRC_240MHZ	0x00000008
+#define CANFD_CLK_SEL_SRC_80MHZ		(~CANFD_CLK_SEL_SRC_240MHZ & \
+					 CANFD_CLK_SEL_SRC_MASK)
+
+#define CANFD_CLK_SEL_20MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_20MHZ)
+#define CANFD_CLK_SEL_24MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_24MHZ)
+#define CANFD_CLK_SEL_30MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_30MHZ)
+#define CANFD_CLK_SEL_40MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_40MHZ)
+#define CANFD_CLK_SEL_60MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_60MHZ)
+#define CANFD_CLK_SEL_80MHZ		(CANFD_CLK_SEL_SRC_80MHZ)
+
+/* CAN-FD channel Rx/Tx control register bits */
+#define CANFD_CTL_UNC_BIT		0x00010000	/* Uncached DMA mem */
+#define CANFD_CTL_RST_BIT		0x00020000	/* reset DMA action */
+#define CANFD_CTL_IEN_BIT		0x00040000	/* IRQ enable */
+
+/* Rx IRQ Count and Time Limits */
+#define CANFD_CTL_IRQ_CL_DEF	8	/* Rx msg max nb per IRQ in Rx DMA */
+#define CANFD_CTL_IRQ_TL_DEF	5	/* Time before IRQ if < CL (x100 us) */
+
+#define CANFD_OPTIONS_SET	(CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD)
+
+/* Tx anticipation window (link logical address should be aligned on 2K
+ * boundary)
+ */
+#define PCIEFD_TX_PAGE_COUNT	(PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE)
+
+#define CANFD_MSG_LNK_TX	0x1001	/* Tx msgs link */
+
+/* 32-bit IRQ status fields, heading Rx DMA area */
+static inline int pciefd_irq_tag(u32 irq_status)
+{
+	return irq_status & 0x0000000f;
+}
+
+static inline int pciefd_irq_rx_cnt(u32 irq_status)
+{
+	return (irq_status & 0x000007f0) >> 4;
+}
+
+static inline int pciefd_irq_is_lnk(u32 irq_status)
+{
+	return irq_status & 0x00010000;
+}
+
+/* Rx record */
+struct pciefd_rx_dma {
+	__le32 irq_status;
+	__le32 sys_time_low;
+	__le32 sys_time_high;
+	struct pucan_rx_msg msg[0];
+} __packed __aligned(4);
+
+/* Tx Link record */
+struct pciefd_tx_link {
+	__le16 size;
+	__le16 type;
+	__le32 laddr_lo;
+	__le32 laddr_hi;
+} __packed __aligned(4);
+
+/* Tx page descriptor */
+struct pciefd_page {
+	void *vbase;			/* page virtual address */
+	dma_addr_t lbase;		/* page logical address */
+	u32 offset;
+	u32 size;
+};
+
+/* CAN channel object */
+struct pciefd_board;
+struct pciefd_can {
+	struct peak_canfd_priv ucan;	/* must be the first member */
+	void __iomem *reg_base;		/* channel config base addr */
+	struct pciefd_board *board;	/* reverse link */
+
+	struct pucan_command pucan_cmd;	/* command buffer */
+
+	dma_addr_t rx_dma_laddr;	/* DMA virtual and logical addr */
+	void *rx_dma_vaddr;		/* for Rx and Tx areas */
+	dma_addr_t tx_dma_laddr;
+	void *tx_dma_vaddr;
+
+	struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT];
+	u16 tx_pages_free;		/* free Tx pages counter */
+	u16 tx_page_index;		/* current page used for Tx */
+	rtdm_lock_t tx_lock;
+	u32 irq_status;
+	u32 irq_tag;			/* next irq tag */
+	int irq;
+
+	u32 flags;
+};
+
+/* PEAK-PCIe FD board object */
+struct pciefd_board {
+	void __iomem *reg_base;
+	struct pci_dev *pci_dev;
+	int can_count;
+	int irq_flags;			/* RTDM_IRQTYPE_SHARED or 0 */
+	rtdm_lock_t cmd_lock;		/* 64-bits cmds must be atomic */
+	struct pciefd_can *can[0];	/* array of network devices */
+};
+
+#define CANFD_CTL_IRQ_CL_MIN	1
+#define CANFD_CTL_IRQ_CL_MAX	127	/* 7-bit field */
+
+#define CANFD_CTL_IRQ_TL_MIN	1
+#define CANFD_CTL_IRQ_TL_MAX	15	/* 4-bit field */
+
+static uint irqcl = CANFD_CTL_IRQ_CL_DEF;
+module_param(irqcl, uint, 0644);
+MODULE_PARM_DESC(irqcl,
+" PCIe FD IRQ Count Limit (default=" __stringify(CANFD_CTL_IRQ_CL_DEF) ")");
+
+static uint irqtl = CANFD_CTL_IRQ_TL_DEF;
+module_param(irqtl, uint, 0644);
+MODULE_PARM_DESC(irqtl,
+" PCIe FD IRQ Time Limit (default=" __stringify(CANFD_CTL_IRQ_TL_DEF) ")");
+
+#ifdef PCIEFD_USES_MSI
+
+#ifdef CONFIG_XENO_OPT_SHIRQ
+/* default behaviour: run as mainline driver in INTx mode */
+#define PCIEFD_USEMSI_DEFAULT	0
+#else
+/* default behaviour: run in MSI mode (one IRQ per channel) */
+#define PCIEFD_USEMSI_DEFAULT	1
+#endif
+
+static uint usemsi = PCIEFD_USEMSI_DEFAULT;
+module_param(usemsi, uint, 0644);
+MODULE_PARM_DESC(usemsi,
+" 0=INTA; 1=MSI (def=" __stringify(PCIEFD_USEMSI_DEFAULT) ")");
+#endif
+
+/* read a 32 bit value from a SYS block register */
+static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg)
+{
+	return readl(priv->reg_base + reg);
+}
+
+/* write a 32 bit value into a SYS block register */
+static inline void pciefd_sys_writereg(const struct pciefd_board *priv,
+				       u32 val, u16 reg)
+{
+	writel(val, priv->reg_base + reg);
+}
+
+/* read a 32 bits value from CAN-FD block register */
+static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg)
+{
+	return readl(priv->reg_base + reg);
+}
+
+/* write a 32 bits value into a CAN-FD block register */
+static inline void pciefd_can_writereg(const struct pciefd_can *priv,
+				       u32 val, u16 reg)
+{
+	writel(val, priv->reg_base + reg);
+}
+
+/* give a channel logical Rx DMA address to the board */
+static void pciefd_can_setup_rx_dma(struct pciefd_can *priv)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32);
+#else
+	const u32 dma_addr_h = 0;
+#endif
+
+	/* (DMA must be reset for Rx) */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_RX_CTL_SET);
+
+	/* write the logical address of the Rx DMA area for this channel */
+	pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr,
+			    PCIEFD_REG_CAN_RX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H);
+
+	/* also indicates that Rx DMA is cacheable */
+	pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT,
+			    PCIEFD_REG_CAN_RX_CTL_CLR);
+}
+
+/* clear channel logical Rx DMA address from the board */
+static void pciefd_can_clear_rx_dma(struct pciefd_can *priv)
+{
+	/* DMA must be reset for Rx */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_RX_CTL_SET);
+
+	/* clear the logical address of the Rx DMA area for this channel */
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H);
+}
+
+/* give a channel logical Tx DMA address to the board */
+static void pciefd_can_setup_tx_dma(struct pciefd_can *priv)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32);
+#else
+	const u32 dma_addr_h = 0;
+#endif
+
+	/* (DMA must be reset for Tx) */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_SET);
+
+	/* write the logical address of the Tx DMA area for this channel */
+	pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr,
+			    PCIEFD_REG_CAN_TX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H);
+
+	/* also indicates that Tx DMA is cacheable */
+	pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_CLR);
+}
+
+/* clear channel logical Tx DMA address from the board */
+static void pciefd_can_clear_tx_dma(struct pciefd_can *priv)
+{
+	/* DMA must be reset for Tx */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_SET);
+
+	/* clear the logical address of the Tx DMA area for this channel */
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H);
+}
+
+/* acknowledge interrupt to the device */
+static void pciefd_can_ack_rx_dma(struct pciefd_can *priv)
+{
+	/* read value of current IRQ tag and inc it for next one */
+	priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr);
+	priv->irq_tag++;
+	priv->irq_tag &= 0xf;
+
+	/* write the next IRQ tag for this CAN */
+	pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK);
+}
+
+/* IRQ handler */
+static int pciefd_irq_handler(rtdm_irq_t *irq_handle)
+{
+	struct pciefd_can *priv = rtdm_irq_get_arg(irq_handle, void);
+	struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr;
+
+	/* INTA mode only, dummy read to sync with PCIe transaction */
+	if (!pci_dev_msi_enabled(priv->board->pci_dev))
+		(void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
+
+	/* read IRQ status from the first 32-bit of the Rx DMA area */
+	priv->irq_status = le32_to_cpu(rx_dma->irq_status);
+
+	/* check if this (shared) IRQ is for this CAN */
+	if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag)
+		return RTDM_IRQ_NONE;
+
+	/* handle rx messages (if any) */
+	peak_canfd_handle_msgs_list(&priv->ucan,
+				    rx_dma->msg,
+				    pciefd_irq_rx_cnt(priv->irq_status));
+
+	/* handle tx link interrupt (if any) */
+	if (pciefd_irq_is_lnk(priv->irq_status)) {
+		rtdm_lock_get(&priv->tx_lock);
+		priv->tx_pages_free++;
+		rtdm_lock_put(&priv->tx_lock);
+
+		/* Wake up a sender */
+		rtdm_sem_up(&priv->ucan.rdev->tx_sem);
+	}
+
+	/* re-enable Rx DMA transfer for this CAN */
+	pciefd_can_ack_rx_dma(priv);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/* initialize structures used for sending CAN frames */
+static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	int i;
+
+	/* initialize the Tx pages descriptors */
+	priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1;
+	priv->tx_page_index = 0;
+
+	priv->tx_pages[0].vbase = priv->tx_dma_vaddr;
+	priv->tx_pages[0].lbase = priv->tx_dma_laddr;
+
+	for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) {
+		priv->tx_pages[i].offset = 0;
+		priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE -
+					 sizeof(struct pciefd_tx_link);
+		if (i) {
+			priv->tx_pages[i].vbase =
+					  priv->tx_pages[i - 1].vbase +
+					  PCIEFD_TX_PAGE_SIZE;
+			priv->tx_pages[i].lbase =
+					  priv->tx_pages[i - 1].lbase +
+					  PCIEFD_TX_PAGE_SIZE;
+		}
+	}
+
+	/* setup Tx DMA addresses into IP core */
+	pciefd_can_setup_tx_dma(priv);
+
+	/* start (TX_RST=0) Tx Path */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_CLR);
+
+	return 0;
+}
+
+/* board specific command pre-processing */
+static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd);
+
+	/* pre-process command */
+	switch (cmd) {
+	case PUCAN_CMD_NORMAL_MODE:
+	case PUCAN_CMD_LISTEN_ONLY_MODE:
+
+		if (ucan->rdev->state == CAN_STATE_BUS_OFF)
+			break;
+
+		/* setup Rx DMA address */
+		pciefd_can_setup_rx_dma(priv);
+
+		/* setup max count of msgs per IRQ */
+		pciefd_can_writereg(priv, (irqtl << 8) | irqcl,
+				    PCIEFD_REG_CAN_RX_CTL_WRT);
+
+		/* clear DMA RST for Rx (Rx start) */
+		pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+				    PCIEFD_REG_CAN_RX_CTL_CLR);
+
+		/* reset timestamps */
+		pciefd_can_writereg(priv, !CANFD_MISC_TS_RST,
+				    PCIEFD_REG_CAN_MISC);
+
+		/* do an initial ACK */
+		pciefd_can_ack_rx_dma(priv);
+
+		/* enable IRQ for this CAN after having set next irq_tag */
+		pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+				    PCIEFD_REG_CAN_RX_CTL_SET);
+
+		/* Tx path will be setup as soon as RX_BARRIER is received */
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* write a command */
+static int pciefd_write_cmd(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	unsigned long flags;
+
+	/* 64-bit command must be atomic */
+	rtdm_lock_get_irqsave(&priv->board->cmd_lock, flags);
+
+	pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer,
+			    PCIEFD_REG_CAN_CMD_PORT_L);
+	pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4),
+			    PCIEFD_REG_CAN_CMD_PORT_H);
+
+	rtdm_lock_put_irqrestore(&priv->board->cmd_lock, flags);
+
+	return 0;
+}
+
+/* board specific command post-processing */
+static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd);
+
+	switch (cmd) {
+	case PUCAN_CMD_RESET_MODE:
+
+		if (ucan->rdev->state == CAN_STATE_STOPPED)
+			break;
+
+		/* controller now in reset mode: disable IRQ for this CAN */
+		pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+				    PCIEFD_REG_CAN_RX_CTL_CLR);
+
+		/* stop and reset DMA addresses in Tx/Rx engines */
+		pciefd_can_clear_tx_dma(priv);
+		pciefd_can_clear_rx_dma(priv);
+
+		/* wait for above commands to complete (read cycle) */
+		(void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
+
+		ucan->rdev->state = CAN_STATE_STOPPED;
+
+		break;
+	}
+
+	return 0;
+}
+
+/* allocate enough room into the Tx dma area to store a CAN message */
+static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size,
+				 int *room_left)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
+	unsigned long flags;
+	void *msg;
+
+	rtdm_lock_get_irqsave(&priv->tx_lock, flags);
+
+	if (page->offset + msg_size > page->size) {
+		struct pciefd_tx_link *lk;
+
+		/* not enough space in this page: try another one */
+		if (!priv->tx_pages_free) {
+			rtdm_lock_put_irqrestore(&priv->tx_lock, flags);
+
+			/* Tx overflow */
+			return NULL;
+		}
+
+		priv->tx_pages_free--;
+
+		/* keep address of the very last free slot of current page */
+		lk = page->vbase + page->offset;
+
+		/* next, move on a new free page */
+		priv->tx_page_index = (priv->tx_page_index + 1) %
+				      PCIEFD_TX_PAGE_COUNT;
+		page = priv->tx_pages + priv->tx_page_index;
+
+		/* put link record to this new page at the end of prev one */
+		lk->size = cpu_to_le16(sizeof(*lk));
+		lk->type = cpu_to_le16(CANFD_MSG_LNK_TX);
+		lk->laddr_lo = cpu_to_le32(page->lbase);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		lk->laddr_hi = cpu_to_le32(page->lbase >> 32);
+#else
+		lk->laddr_hi = 0;
+#endif
+		/* next msgs will be put from the begininng of this new page */
+		page->offset = 0;
+	}
+
+	*room_left = priv->tx_pages_free * page->size;
+
+	rtdm_lock_put_irqrestore(&priv->tx_lock, flags);
+
+	msg = page->vbase + page->offset;
+
+	/* give back room left in the tx ring */
+	*room_left += page->size - (page->offset + msg_size);
+
+	return msg;
+}
+
+/* tell the IP core tha a frame has been written into the Tx DMA area */
+static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan,
+			       struct pucan_tx_msg *msg)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
+
+	/* this slot is now reserved for writing the frame */
+	page->offset += le16_to_cpu(msg->size);
+
+	/* tell the board a frame has been written in Tx DMA area */
+	pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC);
+
+	return 0;
+}
+
+/* probe for CAN channel number #pciefd_board->can_count */
+static int pciefd_can_probe(struct pciefd_board *pciefd)
+{
+	struct rtcan_device *rdev;
+	struct pciefd_can *priv;
+	u32 clk;
+	int err;
+
+	/* allocate the RTCAN object */
+	rdev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count);
+	if (!rdev) {
+		dev_err(&pciefd->pci_dev->dev,
+			"failed to alloc RTCAN device object\n");
+		goto failure;
+	}
+
+	/* fill-in board specific parts */
+	rdev->board_name = pciefd_board_name;
+
+	/* fill-in rtcan private object */
+	priv = rdev->priv;
+
+	/* setup PCIe-FD own callbacks */
+	priv->ucan.pre_cmd = pciefd_pre_cmd;
+	priv->ucan.write_cmd = pciefd_write_cmd;
+	priv->ucan.post_cmd = pciefd_post_cmd;
+	priv->ucan.enable_tx_path = pciefd_enable_tx_path;
+	priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg;
+	priv->ucan.write_tx_msg = pciefd_write_tx_msg;
+
+	/* setup PCIe-FD own command buffer */
+	priv->ucan.cmd_buffer = &priv->pucan_cmd;
+	priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd);
+
+	priv->board = pciefd;
+
+	/* CAN config regs block address */
+	priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index);
+	rdev->base_addr = (unsigned long)priv->reg_base;
+
+	/* allocate non-cacheable DMA'able 4KB memory area for Rx */
+	priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev,
+						 PCIEFD_RX_DMA_SIZE,
+						 &priv->rx_dma_laddr,
+						 GFP_KERNEL);
+	if (!priv->rx_dma_vaddr) {
+		dev_err(&pciefd->pci_dev->dev,
+			"Rx dmam_alloc_coherent(%u) failure\n",
+			PCIEFD_RX_DMA_SIZE);
+		goto err_free_rtdev;
+	}
+
+	/* allocate non-cacheable DMA'able 4KB memory area for Tx */
+	priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev,
+						 PCIEFD_TX_DMA_SIZE,
+						 &priv->tx_dma_laddr,
+						 GFP_KERNEL);
+	if (!priv->tx_dma_vaddr) {
+		dev_err(&pciefd->pci_dev->dev,
+			"Tx dmam_alloc_coherent(%u) failure\n",
+			PCIEFD_TX_DMA_SIZE);
+		goto err_free_rtdev;
+	}
+
+	/* CAN clock in RST mode */
+	pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC);
+
+	/* read current clock value */
+	clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL);
+	switch (clk) {
+	case CANFD_CLK_SEL_20MHZ:
+		priv->ucan.rdev->can_sys_clock = 20 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_24MHZ:
+		priv->ucan.rdev->can_sys_clock = 24 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_30MHZ:
+		priv->ucan.rdev->can_sys_clock = 30 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_40MHZ:
+		priv->ucan.rdev->can_sys_clock = 40 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_60MHZ:
+		priv->ucan.rdev->can_sys_clock = 60 * 1000 * 1000;
+		break;
+	default:
+		pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
+				    PCIEFD_REG_CAN_CLK_SEL);
+
+		fallthrough;
+	case CANFD_CLK_SEL_80MHZ:
+		priv->ucan.rdev->can_sys_clock = 80 * 1000 * 1000;
+		break;
+	}
+
+#ifdef PCIEFD_USES_MSI
+	priv->irq = (pciefd->irq_flags & RTDM_IRQTYPE_SHARED) ?
+		    pciefd->pci_dev->irq :
+		    pci_irq_vector(pciefd->pci_dev, priv->ucan.index);
+#else
+	priv->irq = pciefd->pci_dev->irq;
+#endif
+
+	/* setup irq handler */
+	err = rtdm_irq_request(&rdev->irq_handle,
+			       priv->irq,
+			       pciefd_irq_handler,
+			       pciefd->irq_flags,
+			       DRV_NAME,
+			       priv);
+	if (err) {
+		dev_err(&pciefd->pci_dev->dev,
+			"rtdm_irq_request(IRQ%u) failure err %d\n",
+			priv->irq, err);
+		goto err_free_rtdev;
+	}
+
+	err = rtcan_dev_register(rdev);
+	if (err) {
+		dev_err(&pciefd->pci_dev->dev,
+			"couldn't register RTCAN device: %d\n", err);
+		goto err_free_irq;
+	}
+
+	rtdm_lock_init(&priv->tx_lock);
+
+	/* save the object address in the board structure */
+	pciefd->can[pciefd->can_count] = priv;
+
+	dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
+		 rdev->name, priv->reg_base, priv->irq);
+
+	return 0;
+
+err_free_irq:
+	rtdm_irq_free(&rdev->irq_handle);
+
+err_free_rtdev:
+	rtcan_dev_free(rdev);
+
+failure:
+	return -ENOMEM;
+}
+
+/* wakeup all RT tasks that are blocked on read */
+static void pciefd_can_unlock_recv_tasks(struct rtcan_device *rdev)
+{
+	struct rtcan_recv *recv_listener = rdev->recv_list;
+
+	while (recv_listener) {
+		struct rtcan_socket *sock = recv_listener->sock;
+
+		/* wakeup any rx task */
+		rtdm_sem_destroy(&sock->recv_sem);
+
+		recv_listener = recv_listener->next;
+	}
+}
+
+/* remove a CAN-FD channel by releasing all of its resources */
+static void pciefd_can_remove(struct pciefd_can *priv)
+{
+	struct rtcan_device *rdev = priv->ucan.rdev;
+
+	/* unlock any tasks that wait for read on a socket bound to this CAN */
+	pciefd_can_unlock_recv_tasks(rdev);
+
+	/* in case the driver is removed when the interface is UP
+	 * (device MUST be closed before being unregistered)
+	 */
+	rdev->do_set_mode(rdev, CAN_MODE_STOP, NULL);
+
+	rtcan_dev_unregister(rdev);
+	rtdm_irq_disable(&rdev->irq_handle);
+	rtdm_irq_free(&rdev->irq_handle);
+	rtcan_dev_free(rdev);
+}
+
+/* remove all CAN-FD channels by releasing their own resources */
+static void pciefd_can_remove_all(struct pciefd_board *pciefd)
+{
+	while (pciefd->can_count > 0)
+		pciefd_can_remove(pciefd->can[--pciefd->can_count]);
+}
+
+/* probe for the entire device */
+static int peak_pciefd_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	struct pciefd_board *pciefd;
+	int err, can_count;
+	u16 sub_sys_id;
+	u8 hw_ver_major;
+	u8 hw_ver_minor;
+	u8 hw_ver_sub;
+	u32 v2;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto err_disable_pci;
+
+	/* the number of channels depends on sub-system id */
+	err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id);
+	if (err)
+		goto err_release_regions;
+
+	dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n",
+		pdev->vendor, pdev->device, sub_sys_id);
+
+	if (sub_sys_id >= 0x0012)
+		can_count = 4;
+	else if (sub_sys_id >= 0x0010)
+		can_count = 3;
+	else if (sub_sys_id >= 0x0004)
+		can_count = 2;
+	else
+		can_count = 1;
+
+	/* allocate board structure object */
+	pciefd = devm_kzalloc(&pdev->dev, struct_size(pciefd, can, can_count),
+			      GFP_KERNEL);
+	if (!pciefd) {
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	/* initialize the board structure */
+	pciefd->pci_dev = pdev;
+	rtdm_lock_init(&pciefd->cmd_lock);
+
+	/* save the PCI BAR0 virtual address for further system regs access */
+	pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE);
+	if (!pciefd->reg_base) {
+		dev_err(&pdev->dev, "failed to map PCI resource #0\n");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	/* read the firmware version number */
+	v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2);
+
+	hw_ver_major = (v2 & 0x0000f000) >> 12;
+	hw_ver_minor = (v2 & 0x00000f00) >> 8;
+	hw_ver_sub = (v2 & 0x000000f0) >> 4;
+
+	dev_info(&pdev->dev,
+		 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
+		 hw_ver_major, hw_ver_minor, hw_ver_sub);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	/* DMA logic doesn't handle mix of 32-bit and 64-bit logical addresses
+	 * in fw <= 3.2.x
+	 */
+	if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+		PCIEFD_FW_VERSION(3, 3, 0)) {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err)
+			dev_warn(&pdev->dev,
+				"warning: can't set DMA mask %llxh (err %d)\n",
+				DMA_BIT_MASK(32), err);
+	}
+#endif
+
+	/* default interrupt mode is: shared INTx */
+	pciefd->irq_flags = RTDM_IRQTYPE_SHARED;
+
+#ifdef PCIEFD_USES_MSI
+	if (usemsi) {
+		err = pci_msi_vec_count(pdev);
+		if (err > 0) {
+			int msi_maxvec = err;
+
+			err = pci_alloc_irq_vectors_affinity(pdev, can_count,
+							     msi_maxvec,
+							     PCI_IRQ_MSI,
+							     NULL);
+			dev_info(&pdev->dev,
+				 "MSI[%u..%u] enabling status: %d\n",
+				 can_count, msi_maxvec, err);
+
+			/* if didn't get the requested count of MSI, fall back
+			 * to INTx
+			 */
+			if (err >= can_count)
+				pciefd->irq_flags &= ~RTDM_IRQTYPE_SHARED;
+			else if (err >= 0)
+				pci_free_irq_vectors(pdev);
+		}
+	}
+#endif
+
+	/* stop system clock */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+			    PCIEFD_REG_SYS_CTL_CLR);
+
+	pci_set_master(pdev);
+
+	/* create now the corresponding channels objects */
+	while (pciefd->can_count < can_count) {
+		err = pciefd_can_probe(pciefd);
+		if (err)
+			goto err_free_canfd;
+
+		pciefd->can_count++;
+	}
+
+	/* set system timestamps counter in RST mode */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST,
+			    PCIEFD_REG_SYS_CTL_SET);
+
+	/* wait a bit (read cycle) */
+	(void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1);
+
+	/* free all clocks */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST,
+			    PCIEFD_REG_SYS_CTL_CLR);
+
+	/* start system clock */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+			    PCIEFD_REG_SYS_CTL_SET);
+
+	/* remember the board structure address in the device user data */
+	pci_set_drvdata(pdev, pciefd);
+
+	return 0;
+
+err_free_canfd:
+	pciefd_can_remove_all(pciefd);
+
+#ifdef PCIEFD_USES_MSI
+	pci_free_irq_vectors(pdev);
+#endif
+	pci_iounmap(pdev, pciefd->reg_base);
+
+err_release_regions:
+	pci_release_regions(pdev);
+
+err_disable_pci:
+	pci_disable_device(pdev);
+
+	/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+	 * the probe() function must return a negative errno in case of failure
+	 * (err is unchanged if negative)
+	 */
+	return pcibios_err_to_errno(err);
+}
+
+/* free the board structure object, as well as its resources: */
+static void peak_pciefd_remove(struct pci_dev *pdev)
+{
+	struct pciefd_board *pciefd = pci_get_drvdata(pdev);
+
+	/* release CAN-FD channels resources */
+	pciefd_can_remove_all(pciefd);
+
+#ifdef PCIEFD_USES_MSI
+	pci_free_irq_vectors(pdev);
+#endif
+	pci_iounmap(pdev, pciefd->reg_base);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver rtcan_peak_pciefd_driver = {
+	.name = DRV_NAME,
+	.id_table = peak_pciefd_tbl,
+	.probe = peak_pciefd_probe,
+	.remove = peak_pciefd_remove,
+};
+
+static int __init rtcan_peak_pciefd_init(void)
+{
+	if (!realtime_core_enabled())
+		return 0;
+
+	return pci_register_driver(&rtcan_peak_pciefd_driver);
+}
+
+static void __exit rtcan_peak_pciefd_exit(void)
+{
+	if (realtime_core_enabled())
+		pci_unregister_driver(&rtcan_peak_pciefd_driver);
+}
+
+module_init(rtcan_peak_pciefd_init);
+module_exit(rtcan_peak_pciefd_exit);
+++ linux-patched/drivers/xenomai/can/rtcan_module.c	2022-03-21 12:58:29.202890785 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_list.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/rtcan_module.c:
+ *
+ * Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+#include <rtcan_version.h>
+#include <rtcan_internal.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+
+MODULE_LICENSE("GPL");
+
+
+const char rtcan_rtdm_provider_name[] =
+    "(C) 2006 RT-Socket-CAN Development Team";
+
+
+#ifdef CONFIG_PROC_FS
+
+struct proc_dir_entry *rtcan_proc_root;
+
+static void rtcan_dev_get_ctrlmode_name(can_ctrlmode_t ctrlmode,
+					char* name, int max_len)
+{
+    snprintf(name, max_len, "%s%s",
+	     ctrlmode & CAN_CTRLMODE_LISTENONLY ? "listen-only " : "",
+	     ctrlmode & CAN_CTRLMODE_LOOPBACK ? "loopback " : "");
+}
+
+static char *rtcan_state_names[] = {
+    "active", "warning", "passive" , "bus-off",
+    "scanning", "stopped", "sleeping"
+};
+
+static void rtcan_dev_get_state_name(can_state_t state,
+				     char* name, int max_len)
+{
+    if (state >= CAN_STATE_ACTIVE &&
+	state <= CAN_STATE_SLEEPING)
+	strncpy(name, rtcan_state_names[state], max_len);
+    else
+	strncpy(name, "unknown", max_len);
+}
+
+static void rtcan_dev_get_baudrate_name(can_baudrate_t baudrate,
+					char* name, int max_len)
+{
+    switch (baudrate) {
+    case CAN_BAUDRATE_UNCONFIGURED:
+	strncpy(name, "undefined", max_len);
+	break;
+    case CAN_BAUDRATE_UNKNOWN:
+	strncpy(name, "unknown", max_len);
+	break;
+    default:
+	ksformat(name, max_len, "%d", baudrate);
+	break;
+    }
+}
+
+static void rtcan_dev_get_bittime_name(struct can_bittime *bit_time,
+				       char* name, int max_len)
+{
+    switch (bit_time->type) {
+    case CAN_BITTIME_STD:
+	ksformat(name, max_len,
+		 "brp=%d prop_seg=%d phase_seg1=%d "
+		 "phase_seg2=%d sjw=%d sam=%d",
+		 bit_time->std.brp,
+		 bit_time->std.prop_seg,
+		 bit_time->std.phase_seg1,
+		 bit_time->std.phase_seg2,
+		 bit_time->std.sjw,
+		 bit_time->std.sam);
+	break;
+    case CAN_BITTIME_BTR:
+	ksformat(name, max_len, "btr0=0x%02x btr1=0x%02x",
+		 bit_time->btr.btr0, bit_time->btr.btr1);
+	break;
+    default:
+	strncpy(name, "unknown", max_len);
+	break;
+    }
+}
+
+static void rtcan_get_timeout_name(nanosecs_rel_t timeout,
+				   char* name, int max_len)
+{
+    if (timeout == RTDM_TIMEOUT_INFINITE)
+	strncpy(name, "infinite", max_len);
+    else
+	ksformat(name, max_len, "%lld", (long long)timeout);
+}
+
+static int rtcan_read_proc_devices(struct seq_file *p, void *data)
+{
+    int i;
+    struct rtcan_device *dev;
+    char state_name[20], baudrate_name[20];
+
+    if (down_interruptible(&rtcan_devices_nrt_lock))
+	return -ERESTARTSYS;
+
+    /* Name___________ _Baudrate State___ _TX_Counts _TX_Counts ____Errors
+     * rtcan0             125000 stopped  1234567890 1234567890 1234567890
+     * rtcan1          undefined warning  1234567890 1234567890 1234567890
+     * rtcan2          undefined scanning 1234567890 1234567890 1234567890
+     */
+    seq_printf(p, "Name___________ _Baudrate State___ TX_Counter RX_Counter "
+		  "____Errors\n");
+
+    for (i = 1; i <= RTCAN_MAX_DEVICES; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) != NULL) {
+	    rtcan_dev_get_state_name(dev->state,
+				     state_name, sizeof(state_name));
+	    rtcan_dev_get_baudrate_name(dev->baudrate,
+					baudrate_name, sizeof(baudrate_name));
+	    seq_printf(p, "%-15s %9s %-8s %10d %10d %10d\n",
+		       dev->name, baudrate_name, state_name, dev->tx_count,
+		       dev->rx_count, dev->err_count);
+	    rtcan_dev_dereference(dev);
+	}
+    }
+
+    up(&rtcan_devices_nrt_lock);
+
+    return 0;
+}
+
+static int rtcan_proc_devices_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_devices, NULL);
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_devices_ops,
+			rtcan_proc_devices_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_sockets(struct seq_file *p, void *data)
+{
+    struct rtcan_socket *sock;
+    struct rtdm_fd *fd;
+    struct rtcan_device *dev;
+    char name[IFNAMSIZ] = "not-bound";
+    char rx_timeout[20], tx_timeout[20];
+    rtdm_lockctx_t lock_ctx;
+    int ifindex;
+
+    if (down_interruptible(&rtcan_devices_nrt_lock))
+	return -ERESTARTSYS;
+
+    /* Name___________ Filter ErrMask RX_Timeout TX_Timeout RX_BufFull TX_Lo
+     * rtcan0               1 0x00010 1234567890 1234567890 1234567890 12345
+     */
+    seq_printf(p, "Name___________ Filter ErrMask RX_Timeout_ns "
+		  "TX_Timeout_ns RX_BufFull TX_Lo\n");
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    list_for_each_entry(sock, &rtcan_socket_list, socket_list) {
+	fd = rtcan_socket_to_fd(sock);
+	if (rtcan_sock_is_bound(sock)) {
+	    ifindex = atomic_read(&sock->ifindex);
+	    if (ifindex) {
+		dev = rtcan_dev_get_by_index(ifindex);
+		if (dev) {
+		    strncpy(name, dev->name, IFNAMSIZ);
+		    rtcan_dev_dereference(dev);
+		}
+	    } else
+		ksformat(name, sizeof(name), "%d", ifindex);
+	}
+	rtcan_get_timeout_name(sock->tx_timeout,
+			       tx_timeout, sizeof(tx_timeout));
+	rtcan_get_timeout_name(sock->rx_timeout,
+			       rx_timeout, sizeof(rx_timeout));
+	seq_printf(p, "%-15s %6d 0x%05x %13s %13s %10d %5d\n",
+		   name, sock->flistlen, sock->err_mask,
+		   rx_timeout, tx_timeout, sock->rx_buf_full,
+		   rtcan_loopback_enabled(sock));
+    }
+
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+    up(&rtcan_devices_nrt_lock);
+
+    return 0;
+}
+
+static int rtcan_proc_sockets_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_sockets, NULL);
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_sockets_ops,
+			rtcan_proc_sockets_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_info(struct seq_file *p, void *data)
+{
+    struct rtcan_device *dev = p->private;
+    char state_name[20], baudrate_name[20];
+    char ctrlmode_name[80], bittime_name[80];
+
+    if (down_interruptible(&rtcan_devices_nrt_lock))
+	return -ERESTARTSYS;
+
+    rtcan_dev_get_state_name(dev->state,
+			     state_name, sizeof(state_name));
+    rtcan_dev_get_ctrlmode_name(dev->ctrl_mode,
+				ctrlmode_name, sizeof(ctrlmode_name));
+    rtcan_dev_get_baudrate_name(dev->baudrate,
+				baudrate_name, sizeof(baudrate_name));
+    rtcan_dev_get_bittime_name(&dev->bit_time,
+			       bittime_name, sizeof(bittime_name));
+
+    seq_printf(p, "Device     %s\n", dev->name);
+    seq_printf(p, "Controller %s\n", dev->ctrl_name);
+    seq_printf(p, "Board      %s\n", dev->board_name);
+    seq_printf(p, "Clock-Hz   %d\n", dev->can_sys_clock);
+    seq_printf(p, "Baudrate   %s\n", baudrate_name);
+    seq_printf(p, "Bit-time   %s\n", bittime_name);
+    seq_printf(p, "Ctrl-Mode  %s\n", ctrlmode_name);
+    seq_printf(p, "State      %s\n", state_name);
+    seq_printf(p, "TX-Counter %d\n", dev->tx_count);
+    seq_printf(p, "RX-Counter %d\n", dev->rx_count);
+    seq_printf(p, "Errors     %d\n", dev->err_count);
+#ifdef RTCAN_USE_REFCOUNT
+    seq_printf(p, "Refcount   %d\n", atomic_read(&dev->refcount));
+#endif
+
+    up(&rtcan_devices_nrt_lock);
+
+    return 0;
+}
+
+static int rtcan_proc_info_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_info, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_info_ops,
+			rtcan_proc_info_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_filter(struct seq_file *p, void *data)
+{
+    struct rtcan_device *dev = p->private;
+    struct rtcan_recv *recv_listener = dev->recv_list;
+    struct rtdm_fd *fd;
+    rtdm_lockctx_t lock_ctx;
+
+    /*  __CAN_ID__ _CAN_Mask_ Inv MatchCount
+     *  0x12345678 0x12345678  no 1234567890
+     */
+
+    seq_printf(p, "__CAN_ID__ _CAN_Mask_ Inv MatchCount\n");
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    /* Loop over the reception list of the device */
+    while (recv_listener != NULL) {
+	fd = rtcan_socket_to_fd(recv_listener->sock);
+
+	seq_printf(p, "0x%08x 0x%08x %s %10d\n",
+		   recv_listener->can_filter.can_id,
+		   recv_listener->can_filter.can_mask & ~CAN_INV_FILTER,
+		   (recv_listener->can_filter.can_mask & CAN_INV_FILTER) ?
+			"yes" : " no",
+		   recv_listener->match_count);
+
+	recv_listener = recv_listener->next;
+    }
+
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+    return 0;
+}
+
+static int rtcan_proc_filter_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_filter, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_filter_ops,
+			rtcan_proc_filter_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_version(struct seq_file *p, void *data)
+{
+	seq_printf(p, "RT-Socket-CAN %d.%d.%d\n",
+		   RTCAN_MAJOR_VER, RTCAN_MINOR_VER, RTCAN_BUGFIX_VER);
+
+	return 0;
+}
+
+static int rtcan_proc_version_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_version, NULL);
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_version_ops,
+			rtcan_proc_version_open,
+			single_release,
+			seq_read,
+			NULL);
+
+void rtcan_dev_remove_proc(struct rtcan_device* dev)
+{
+    if (!dev->proc_root)
+	return;
+
+    remove_proc_entry("info", dev->proc_root);
+    remove_proc_entry("filters", dev->proc_root);
+    remove_proc_entry(dev->name, rtcan_proc_root);
+
+    dev->proc_root = NULL;
+}
+
+int rtcan_dev_create_proc(struct rtcan_device* dev)
+{
+    if (!rtcan_proc_root)
+	return -EINVAL;
+
+    dev->proc_root = proc_mkdir(dev->name, rtcan_proc_root);
+    if (!dev->proc_root) {
+	printk("%s: unable to create /proc device entries\n", dev->name);
+	return -1;
+    }
+
+    proc_create_data("info", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root,
+		     &rtcan_proc_info_ops, dev);
+    proc_create_data("filters", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root,
+		     &rtcan_proc_filter_ops, dev);
+    return 0;
+
+}
+
+
+static int rtcan_proc_register(void)
+{
+    rtcan_proc_root = proc_mkdir("rtcan", NULL);
+    if (!rtcan_proc_root) {
+	printk("rtcan: unable to initialize /proc entries\n");
+	return -1;
+    }
+
+    proc_create("devices", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root,
+		&rtcan_proc_devices_ops);
+    proc_create("version", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root,
+		&rtcan_proc_version_ops);
+    proc_create("sockets", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root,
+		&rtcan_proc_sockets_ops);
+    return 0;
+}
+
+
+
+static void rtcan_proc_unregister(void)
+{
+    remove_proc_entry("devices", rtcan_proc_root);
+    remove_proc_entry("version", rtcan_proc_root);
+    remove_proc_entry("sockets", rtcan_proc_root);
+    remove_proc_entry("rtcan", 0);
+}
+#endif  /* CONFIG_PROC_FS */
+
+
+
+int __init rtcan_init(void)
+{
+    int err = 0;
+
+    if (!rtdm_available())
+	return -ENOSYS;
+
+    printk("RT-Socket-CAN %d.%d.%d - %s\n",
+	   RTCAN_MAJOR_VER, RTCAN_MINOR_VER, RTCAN_BUGFIX_VER,
+	   rtcan_rtdm_provider_name);
+
+    if ((err = rtcan_raw_proto_register()) != 0)
+	goto out;
+
+#ifdef CONFIG_PROC_FS
+    if ((err = rtcan_proc_register()) != 0)
+	goto out;
+#endif
+
+ out:
+    return err;
+}
+
+
+void __exit rtcan_exit(void)
+{
+    rtcan_raw_proto_unregister();
+#ifdef CONFIG_PROC_FS
+    rtcan_proc_unregister();
+#endif
+
+    printk("rtcan: unloaded\n");
+}
+
+
+module_init(rtcan_init);
+module_exit(rtcan_exit);
+++ linux-patched/drivers/xenomai/can/rtcan_list.h	2022-03-21 12:58:29.199890814 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_dev.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * List management for the RTDM RTCAN device driver
+ *
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_LIST_H_
+#define __RTCAN_LIST_H_
+
+#include "rtcan_socket.h"
+
+
+/*
+ * List element in a single linked list used for registering reception sockets.
+ * Every single struct can_filter which was bound to a socket gets such a
+ * list entry. There is no member for the CAN interface because there is one
+ * reception list for every CAN controller. This is because when a CAN message
+ * is received it is clear from which interface and therefore minimizes
+ * searching time.
+ */
+struct rtcan_recv {
+    can_filter_t            can_filter;     /* filter used for deciding if
+					     *   a socket wants to get a CAN
+					     *   message */
+    unsigned int            match_count;    /* count accepted messages */
+    struct rtcan_socket     *sock;          /* pointer to registered socket
+					     */
+    struct rtcan_recv       *next;          /* pointer to next list element
+					     */
+};
+
+
+/*
+ *  Element in a TX wait queue.
+ *
+ *  Every socket holds a TX wait queue where all RT tasks are queued when they
+ *  are blocked while waiting to be able to transmit a message via this socket.
+ *
+ *  Every sender holds its own element.
+ */
+struct tx_wait_queue {
+    struct list_head        tx_wait_list;   /* List pointers */
+    rtdm_task_t             *rt_task;       /* Pointer to task handle */
+};
+
+
+/* Spinlock for all reception lists and also for some members in
+ * struct rtcan_socket */
+extern rtdm_lock_t rtcan_recv_list_lock;
+
+
+#endif  /* __RTCAN_LIST_H_ */
+++ linux-patched/drivers/xenomai/can/rtcan_dev.c	2022-03-21 12:58:29.196890843 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_virt.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/rtdev.c:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "rtcan_internal.h"
+#include "rtcan_dev.h"
+
+
+static struct rtcan_device *rtcan_devices[RTCAN_MAX_DEVICES];
+static DEFINE_RTDM_LOCK(rtcan_devices_rt_lock);
+
+static int rtcan_global_init_done;
+
+DEFINE_SEMAPHORE(rtcan_devices_nrt_lock);
+
+/* Spinlock for all reception lists and also for some members in
+ * struct rtcan_socket */
+rtdm_lock_t rtcan_socket_lock;
+
+/* Spinlock for all reception lists and also for some members in
+ * struct rtcan_socket */
+rtdm_lock_t rtcan_recv_list_lock;
+
+
+
+static inline void rtcan_global_init(void)
+{
+    if (!rtcan_global_init_done) {
+	rtdm_lock_init(&rtcan_socket_lock);
+	rtdm_lock_init(&rtcan_recv_list_lock);
+	rtcan_global_init_done = 1;
+    }
+}
+
+
+static inline struct rtcan_device *__rtcan_dev_get_by_name(const char *name)
+{
+    int i;
+    struct rtcan_device *dev;
+
+
+    for (i = 0; i < RTCAN_MAX_DEVICES; i++) {
+	dev = rtcan_devices[i];
+	if ((dev != NULL) && (strncmp(dev->name, name, IFNAMSIZ) == 0))
+	    return dev;
+    }
+    return NULL;
+}
+
+
+struct rtcan_device *rtcan_dev_get_by_name(const char *name)
+{
+    struct rtcan_device *dev;
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lockctx_t context;
+#endif
+
+
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+#endif
+
+    dev = __rtcan_dev_get_by_name(name);
+
+#ifdef RTCAN_USE_REFCOUNT
+    if (dev != NULL)
+	atomic_inc(&dev->refcount);
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+#endif
+
+    return dev;
+}
+
+
+static inline struct rtcan_device *__rtcan_dev_get_by_index(int ifindex)
+{
+    return rtcan_devices[ifindex - 1];
+}
+
+
+struct rtcan_device *rtcan_dev_get_by_index(int ifindex)
+{
+    struct rtcan_device *dev;
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lockctx_t context;
+#endif
+
+
+    if ((ifindex <= 0) || (ifindex > RTCAN_MAX_DEVICES))
+	return NULL;
+
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+#endif
+
+    dev = __rtcan_dev_get_by_index(ifindex);
+
+#ifdef RTCAN_USE_REFCOUNT
+    if (dev != NULL)
+	atomic_inc(&dev->refcount);
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+#endif
+
+    return dev;
+}
+
+
+void rtcan_dev_alloc_name(struct rtcan_device *dev, const char *mask)
+{
+    char buf[IFNAMSIZ];
+    struct rtcan_device *tmp;
+    int i;
+
+
+    for (i = 0; i < RTCAN_MAX_DEVICES; i++) {
+	ksformat(buf, IFNAMSIZ, mask, i);
+	if ((tmp = rtcan_dev_get_by_name(buf)) == NULL) {
+	    strncpy(dev->name, buf, IFNAMSIZ);
+	    break;
+	}
+#ifdef RTCAN_USE_REFCOUNT
+	else
+	    rtcan_dev_dereference(tmp);
+#endif
+    }
+}
+
+
+struct rtcan_device *rtcan_dev_alloc(int sizeof_priv, int sizeof_board_priv)
+{
+    struct rtcan_device *dev;
+    struct rtcan_recv *recv_list_elem;
+    int alloc_size;
+    int j;
+
+
+    alloc_size = sizeof(*dev) + sizeof_priv + sizeof_board_priv;
+
+    dev = (struct rtcan_device *)kmalloc(alloc_size, GFP_KERNEL);
+    if (dev == NULL) {
+	printk(KERN_ERR "rtcan: cannot allocate rtcan device\n");
+	return NULL;
+    }
+
+    memset(dev, 0, alloc_size);
+
+    sema_init(&dev->nrt_lock, 1);
+
+    rtdm_lock_init(&dev->device_lock);
+
+    /* Init TX Semaphore, will be destroyed forthwith
+     * when setting stop mode */
+    rtdm_sem_init(&dev->tx_sem, 0);
+#ifdef RTCAN_USE_REFCOUNT
+    atomic_set(&dev->refcount, 0);
+#endif
+
+    /* Initialize receive list */
+    dev->empty_list = recv_list_elem = dev->receivers;
+    for (j = 0; j < RTCAN_MAX_RECEIVERS - 1; j++, recv_list_elem++)
+	recv_list_elem->next = recv_list_elem + 1;
+    recv_list_elem->next = NULL;
+    dev->free_entries = RTCAN_MAX_RECEIVERS;
+
+    if (sizeof_priv)
+	dev->priv = (void *)((unsigned long)dev + sizeof(*dev));
+    if (sizeof_board_priv)
+	dev->board_priv = (void *)((unsigned long)dev + sizeof(*dev) + sizeof_priv);
+
+    return dev;
+}
+
+void rtcan_dev_free (struct rtcan_device *dev)
+{
+    if (dev != NULL) {
+	rtdm_sem_destroy(&dev->tx_sem);
+	kfree(dev);
+    }
+}
+
+
+static inline int __rtcan_dev_new_index(void)
+{
+    int i;
+
+
+    for (i = 0; i < RTCAN_MAX_DEVICES; i++)
+	if (rtcan_devices[i] == NULL)
+	     return i+1;
+
+    return -ENOMEM;
+}
+
+
+int rtcan_dev_register(struct rtcan_device *dev)
+{
+    rtdm_lockctx_t context;
+    int ret;
+
+    down(&rtcan_devices_nrt_lock);
+
+    rtcan_global_init();
+
+    if ((ret = __rtcan_dev_new_index()) < 0) {
+	up(&rtcan_devices_nrt_lock);
+	return ret;
+    }
+    dev->ifindex = ret;
+
+    if (strchr(dev->name,'%') != NULL)
+	rtcan_dev_alloc_name(dev, dev->name);
+
+    if (__rtcan_dev_get_by_name(dev->name) != NULL) {
+	up(&rtcan_devices_nrt_lock);
+	return -EEXIST;
+    }
+
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+
+    rtcan_devices[dev->ifindex - 1] = dev;
+
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+    rtcan_dev_create_proc(dev);
+
+    up(&rtcan_devices_nrt_lock);
+
+    printk("rtcan: registered %s\n", dev->name);
+
+    return 0;
+}
+
+
+int rtcan_dev_unregister(struct rtcan_device *dev)
+{
+    rtdm_lockctx_t context;
+
+
+    RTCAN_ASSERT(dev->ifindex != 0,
+		 printk("RTCAN: device %s/%p was not registered\n",
+			dev->name, dev); return -ENODEV;);
+
+    /* If device is running, close it first. */
+    if (CAN_STATE_OPERATING(dev->state))
+	return -EBUSY;
+
+    down(&rtcan_devices_nrt_lock);
+
+    rtcan_dev_remove_proc(dev);
+
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+
+#ifdef RTCAN_USE_REFCOUNT
+    while (atomic_read(&dev->refcount) > 0) {
+	rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+	up(&rtcan_devices_nrt_lock);
+
+	RTCAN_DBG("RTCAN: unregistering %s deferred (refcount = %d)\n",
+		  dev->name, atomic_read(&dev->refcount));
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(1*HZ); /* wait a second */
+
+	down(&rtcan_devices_nrt_lock);
+	rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+    }
+#endif
+    rtcan_devices[dev->ifindex - 1] = NULL;
+
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+    up(&rtcan_devices_nrt_lock);
+
+#ifdef RTCAN_USE_REFCOUNT
+    RTCAN_ASSERT(atomic_read(&dev->refcount) == 0,
+		 printk("RTCAN: dev reference counter < 0!\n"););
+#endif
+
+    printk("RTCAN: unregistered %s\n", dev->name);
+
+    return 0;
+}
+
+
+EXPORT_SYMBOL_GPL(rtcan_socket_lock);
+EXPORT_SYMBOL_GPL(rtcan_recv_list_lock);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_free);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_alloc);
+EXPORT_SYMBOL_GPL(rtcan_dev_alloc_name);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_register);
+EXPORT_SYMBOL_GPL(rtcan_dev_unregister);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_get_by_name);
+EXPORT_SYMBOL_GPL(rtcan_dev_get_by_index);
+++ linux-patched/drivers/xenomai/can/rtcan_virt.c	2022-03-21 12:58:29.192890882 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_version.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+
+#define RTCAN_DEV_NAME          "rtcan%d"
+#define RTCAN_DRV_NAME          "VIRT"
+#define RTCAN_MAX_VIRT_DEVS     8
+
+#define VIRT_TX_BUFS            1
+
+static char *virt_ctlr_name  = "<virtual>";
+static char *virt_board_name = "<virtual>";
+
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_DESCRIPTION("Virtual RT-Socket-CAN driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int devices = 2;
+
+module_param(devices, uint, 0400);
+MODULE_PARM_DESC(devices, "Number of devices on the virtual bus");
+
+static struct rtcan_device *rtcan_virt_devs[RTCAN_MAX_VIRT_DEVS];
+
+
+static int rtcan_virt_start_xmit(struct rtcan_device *tx_dev,
+				 can_frame_t *tx_frame)
+{
+	int i;
+	struct rtcan_device *rx_dev;
+	struct rtcan_skb skb;
+	struct rtcan_rb_frame *rx_frame = &skb.rb_frame;
+	rtdm_lockctx_t lock_ctx;
+
+	/* we can transmit immediately again */
+	rtdm_sem_up(&tx_dev->tx_sem);
+
+	skb.rb_frame_size = EMPTY_RB_FRAME_SIZE;
+
+	rx_frame->can_dlc = tx_frame->can_dlc;
+	rx_frame->can_id  = tx_frame->can_id;
+
+	if (!(tx_frame->can_id & CAN_RTR_FLAG)) {
+		memcpy(rx_frame->data, tx_frame->data, tx_frame->can_dlc);
+		skb.rb_frame_size += tx_frame->can_dlc;
+	}
+
+	rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+	rtdm_lock_get(&rtcan_socket_lock);
+
+
+	/* Deliver to all other devices on the virtual bus */
+	for (i = 0; i < devices; i++) {
+		rx_dev = rtcan_virt_devs[i];
+		if (rx_dev->state == CAN_STATE_ACTIVE) {
+			if (tx_dev != rx_dev) {
+				rx_frame->can_ifindex = rx_dev->ifindex;
+				rtcan_rcv(rx_dev, &skb);
+			} else if (rtcan_loopback_pending(tx_dev))
+				rtcan_loopback(tx_dev);
+		}
+	}
+	rtdm_lock_put(&rtcan_socket_lock);
+	rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+	return 0;
+}
+
+
+static int rtcan_virt_set_mode(struct rtcan_device *dev, can_mode_t mode,
+			       rtdm_lockctx_t *lock_ctx)
+{
+	int err = 0;
+
+	switch (mode) {
+	case CAN_MODE_STOP:
+		dev->state = CAN_STATE_STOPPED;
+		/* Wake up waiting senders */
+		rtdm_sem_destroy(&dev->tx_sem);
+		break;
+
+	case CAN_MODE_START:
+		rtdm_sem_init(&dev->tx_sem, VIRT_TX_BUFS);
+		dev->state = CAN_STATE_ACTIVE;
+		break;
+
+	default:
+		err = -EOPNOTSUPP;
+	}
+
+	return err;
+}
+
+
+static int __init rtcan_virt_init_one(int idx)
+{
+	struct rtcan_device *dev;
+	int err;
+
+	if ((dev = rtcan_dev_alloc(0, 0)) == NULL)
+		return -ENOMEM;
+
+	dev->ctrl_name = virt_ctlr_name;
+	dev->board_name = virt_board_name;
+
+	rtcan_virt_set_mode(dev, CAN_MODE_STOP, NULL);
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	dev->hard_start_xmit = rtcan_virt_start_xmit;
+	dev->do_set_mode = rtcan_virt_set_mode;
+
+	/* Register RTDM device */
+	err = rtcan_dev_register(dev);
+	if (err) {
+	    printk(KERN_ERR "ERROR %d while trying to register RTCAN device!\n", err);
+		goto error_out;
+	}
+
+	/* Remember initialized devices */
+	rtcan_virt_devs[idx] = dev;
+
+	printk("%s: %s driver loaded\n", dev->name, RTCAN_DRV_NAME);
+
+	return 0;
+
+ error_out:
+	rtcan_dev_free(dev);
+	return err;
+}
+
+
+/** Init module */
+static int __init rtcan_virt_init(void)
+{
+	int i, err = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (i = 0; i < devices; i++) {
+		err = rtcan_virt_init_one(i);
+		if (err) {
+			while (--i >= 0) {
+				struct rtcan_device *dev = rtcan_virt_devs[i];
+
+				rtcan_dev_unregister(dev);
+				rtcan_dev_free(dev);
+			}
+			break;
+		}
+	}
+
+	return err;
+}
+
+
+/** Cleanup module */
+static void __exit rtcan_virt_exit(void)
+{
+	int i;
+	struct rtcan_device *dev;
+
+	for (i = 0; i < devices; i++) {
+		dev = rtcan_virt_devs[i];
+
+		printk("Unloading %s device %s\n", RTCAN_DRV_NAME, dev->name);
+
+		rtcan_virt_set_mode(dev, CAN_MODE_STOP, NULL);
+		rtcan_dev_unregister(dev);
+		rtcan_dev_free(dev);
+	}
+}
+
+module_init(rtcan_virt_init);
+module_exit(rtcan_virt_exit);
+++ linux-patched/drivers/xenomai/can/rtcan_version.h	2022-03-21 12:58:29.189890912 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/can/rtcan_raw.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_VERSION_H_
+#define __RTCAN_VERSION_H_
+
+#define RTCAN_MAJOR_VER    0
+#define RTCAN_MINOR_VER   90
+#define RTCAN_BUGFIX_VER   2
+
+#endif /* __RTCAN_VERSION_H_ */
+++ linux-patched/drivers/xenomai/can/rtcan_raw.h	2022-03-21 12:58:29.185890951 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpiopwm/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_RAW_H_
+#define __RTCAN_RAW_H_
+
+#ifdef __KERNEL__
+
+int rtcan_raw_ioctl_dev(struct rtdm_fd *fd, int request, void *arg);
+
+int rtcan_raw_check_filter(struct rtcan_socket *sock,
+			   int ifindex, struct rtcan_filter_list *flist);
+int rtcan_raw_add_filter(struct rtcan_socket *sock, int ifindex);
+void rtcan_raw_remove_filter(struct rtcan_socket *sock);
+
+void rtcan_rcv(struct rtcan_device *rtcandev, struct rtcan_skb *skb);
+
+void rtcan_loopback(struct rtcan_device *rtcandev);
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+#define rtcan_loopback_enabled(sock) (sock->loopback)
+#define rtcan_loopback_pending(dev) (dev->tx_socket)
+#else /* !CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+#define rtcan_loopback_enabled(sock) (0)
+#define rtcan_loopback_pending(dev) (0)
+#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR
+void __rtcan_raw_enable_bus_err(struct rtcan_socket *sock);
+static inline void rtcan_raw_enable_bus_err(struct rtcan_socket *sock)
+{
+    if ((sock->err_mask & CAN_ERR_BUSERROR))
+	__rtcan_raw_enable_bus_err(sock);
+}
+#else
+#define rtcan_raw_enable_bus_err(sock)
+#endif
+
+int __init rtcan_raw_proto_register(void);
+void __exit rtcan_raw_proto_unregister(void);
+
+#endif  /* __KERNEL__ */
+
+#endif  /* __RTCAN_RAW_H_ */
+++ linux-patched/drivers/xenomai/gpiopwm/Kconfig	2022-03-21 12:58:29.182890980 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpiopwm/gpiopwm.c	1970-01-01 01:00:00.000000000 +0100
+menu "GPIOPWM support"
+
+config XENO_DRIVERS_GPIOPWM
+	tristate "GPIOPWM driver"
+	help
+
+	An RTDM-based GPIO PWM generator driver
+
+endmenu
+++ linux-patched/drivers/xenomai/gpiopwm/gpiopwm.c	2022-03-21 12:58:29.178891019 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpiopwm/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/gpiopwm.h>
+
+MODULE_AUTHOR("Jorge Ramirez <jro@xenomai.org>");
+MODULE_DESCRIPTION("PWM driver");
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL");
+
+#define MAX_DUTY_CYCLE		100
+#define MAX_SAMPLES		(MAX_DUTY_CYCLE + 1)
+
+struct gpiopwm_base_signal {
+	unsigned long period;
+};
+
+struct gpiopwm_duty_signal {
+	unsigned int range_min;
+	unsigned int range_max;
+	unsigned long period;
+	unsigned int cycle;
+};
+
+struct gpiopwm_control {
+	struct gpiopwm_duty_signal duty;
+	unsigned int configured;
+	unsigned int update;
+};
+
+struct gpiopwm_priv {
+	struct gpiopwm_base_signal base;
+	struct gpiopwm_duty_signal duty;
+	struct gpiopwm_control ctrl;
+
+	rtdm_timer_t base_timer;
+	rtdm_timer_t duty_timer;
+
+	int gpio;
+};
+
+static inline int div100(long long dividend)
+{
+	const long long divisor = 0x28f5c29;
+	return ((divisor * dividend) >> 32) & 0xffffffff;
+}
+
+static inline unsigned long duty_period(struct gpiopwm_duty_signal *p)
+{
+	unsigned long period;
+
+	period = p->range_min + div100((p->range_max - p->range_min) * p->cycle);
+	return period * 1000;
+}
+
+static void gpiopwm_handle_base_timer(rtdm_timer_t *timer)
+{
+	struct gpiopwm_priv *ctx = container_of(timer, struct gpiopwm_priv,
+						base_timer);
+	gpio_set_value(ctx->gpio, 1);
+
+	/* one shot timer to avoid carrying over errors */
+	rtdm_timer_start_in_handler(&ctx->duty_timer, ctx->duty.period, 0,
+		RTDM_TIMERMODE_RELATIVE);
+
+	if (ctx->ctrl.update) {
+		ctx->duty.period = ctx->ctrl.duty.period;
+		ctx->duty.cycle = ctx->ctrl.duty.cycle;
+		ctx->ctrl.update = 0;
+	}
+}
+
+static void gpiopwm_handle_duty_timer(rtdm_timer_t *timer)
+{
+	struct gpiopwm_priv *ctx = container_of(timer, struct gpiopwm_priv,
+						duty_timer);
+	gpio_set_value(ctx->gpio, 0);
+}
+
+static inline int gpiopwm_config(struct rtdm_fd *fd, struct gpiopwm *conf)
+{
+	struct rtdm_dev_context *dev_ctx = rtdm_fd_to_context(fd);
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+	int ret;
+
+	if (ctx->ctrl.configured)
+		return -EINVAL;
+
+	if (conf->duty_cycle > MAX_DUTY_CYCLE)
+		return -EINVAL;
+
+	ret = gpio_request(conf->gpio, dev_ctx->device->name);
+	if (ret < 0) {
+		ctx->gpio = -1;
+		return ret;
+	}
+
+	ret = gpio_direction_output(conf->gpio, 0);
+	if (ret < 0)
+		return ret;
+
+	gpio_set_value(conf->gpio, 0);
+
+	ctx->duty.range_min = ctx->ctrl.duty.range_min = conf->range_min;
+	ctx->duty.range_max = ctx->ctrl.duty.range_max = conf->range_max;
+	ctx->duty.cycle = conf->duty_cycle;
+	ctx->base.period = conf->period;
+	ctx->gpio = conf->gpio;
+	ctx->duty.period = duty_period(&ctx->duty);
+
+	rtdm_timer_init(&ctx->base_timer, gpiopwm_handle_base_timer, "base_timer");
+	rtdm_timer_init(&ctx->duty_timer, gpiopwm_handle_duty_timer, "duty_timer");
+
+	ctx->ctrl.configured = 1;
+
+	return 0;
+}
+
+static inline int gpiopwm_change_duty_cycle(struct gpiopwm_priv *ctx, unsigned int cycle)
+{
+	if (cycle > MAX_DUTY_CYCLE)
+		return -EINVAL;
+
+	/* prepare the new data on the calling thread */
+	ctx->ctrl.duty.cycle = cycle;
+	ctx->ctrl.duty.period = duty_period(&ctx->ctrl.duty);
+
+	/* update data on the next base signal timeout */
+	ctx->ctrl.update = 1;
+
+	return 0;
+}
+
+static inline int gpiopwm_stop(struct rtdm_fd *fd)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	if (!ctx->ctrl.configured)
+		return -EINVAL;
+
+	gpio_set_value(ctx->gpio, 0);
+
+	rtdm_timer_stop(&ctx->base_timer);
+	rtdm_timer_stop(&ctx->duty_timer);
+
+	return 0;
+}
+
+static inline int gpiopwm_start(struct rtdm_fd *fd)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	if (!ctx->ctrl.configured)
+		return -EINVAL;
+
+	/* update duty cycle on next timeout */
+	ctx->ctrl.update = 1;
+
+	/* start the base signal tick */
+	rtdm_timer_start(&ctx->base_timer, ctx->base.period, ctx->base.period,
+			 RTDM_TIMERMODE_RELATIVE);
+
+	return 0;
+}
+
+static int gpiopwm_ioctl_rt(struct rtdm_fd *fd, unsigned int request, void __user *arg)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case GPIOPWM_RTIOC_SET_CONFIG:
+		return -ENOSYS;
+	case GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE:
+		return gpiopwm_change_duty_cycle(ctx, (unsigned long) arg);
+	case GPIOPWM_RTIOC_START:
+		return gpiopwm_start(fd);
+	case GPIOPWM_RTIOC_STOP:
+		return gpiopwm_stop(fd);
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int gpiopwm_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void __user *arg)
+{
+	struct gpiopwm conf;
+
+	switch (request) {
+	case GPIOPWM_RTIOC_SET_CONFIG:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(conf)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &conf, arg, sizeof(conf));
+		return gpiopwm_config(fd, &conf);
+	case GPIOPWM_RTIOC_GET_CONFIG:
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int gpiopwm_open(struct rtdm_fd *fd, int oflags)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	ctx->ctrl.configured = 0;
+	ctx->gpio = -1;
+
+	return 0;
+}
+
+static void gpiopwm_close(struct rtdm_fd *fd)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	if (ctx->gpio >= 0)
+		gpio_free(ctx->gpio);
+
+	if (!ctx->ctrl.configured)
+		return;
+
+	rtdm_timer_destroy(&ctx->base_timer);
+	rtdm_timer_destroy(&ctx->duty_timer);
+}
+
+static struct rtdm_driver gpiopwm_driver = {
+	.profile_info           = RTDM_PROFILE_INFO(gpiopwm,
+						    RTDM_CLASS_PWM,
+						    RTDM_SUBCLASS_GENERIC,
+						    RTPWM_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 8,
+	.context_size		= sizeof(struct gpiopwm_priv),
+	.ops = {
+		.open		= gpiopwm_open,
+		.close		= gpiopwm_close,
+		.ioctl_rt	= gpiopwm_ioctl_rt,
+		.ioctl_nrt	= gpiopwm_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device[8] = {
+	[0 ... 7] = {
+		.driver = &gpiopwm_driver,
+		.label = "gpiopwm%d",
+	}
+};
+
+static int __init __gpiopwm_init(void)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++) {
+		ret = rtdm_dev_register(device + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(device + i);
+
+	return ret;
+}
+
+static void __exit __gpiopwm_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++)
+		rtdm_dev_unregister(device + i);
+}
+
+module_init(__gpiopwm_init);
+module_exit(__gpiopwm_exit);
+++ linux-patched/drivers/xenomai/gpiopwm/Makefile	2022-03-21 12:58:29.174891058 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/ipc/internal.h	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/kernel -I$(srctree)/include/xenomai/
+
+obj-$(CONFIG_XENO_DRIVERS_GPIOPWM) += xeno_gpiopwm.o
+
+xeno_gpiopwm-y := gpiopwm.o
+++ linux-patched/drivers/xenomai/ipc/internal.h	2022-03-21 12:58:29.171891087 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/ipc/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTIPC_INTERNAL_H
+#define _RTIPC_INTERNAL_H
+
+#include <linux/uio.h>
+#include <linux/time.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/compat.h>
+#include <rtdm/driver.h>
+
+struct rtipc_protocol;
+
+struct rtipc_private {
+	struct rtipc_protocol *proto;
+	DECLARE_XNSELECT(send_block);
+	DECLARE_XNSELECT(recv_block);
+	void *state;
+};
+
+struct rtipc_protocol {
+	const char *proto_name;
+	int proto_statesz;
+	int (*proto_init)(void);
+	void (*proto_exit)(void);
+	struct {
+		int (*socket)(struct rtdm_fd *fd);
+		void (*close)(struct rtdm_fd *fd);
+		ssize_t (*recvmsg)(struct rtdm_fd *fd,
+				   struct user_msghdr *msg, int flags);
+		ssize_t (*sendmsg)(struct rtdm_fd *fd,
+				   const struct user_msghdr *msg, int flags);
+		ssize_t (*read)(struct rtdm_fd *fd,
+				void *buf, size_t len);
+		ssize_t (*write)(struct rtdm_fd *fd,
+				 const void *buf, size_t len);
+		int (*ioctl)(struct rtdm_fd *fd,
+			     unsigned int request, void *arg);
+		unsigned int (*pollstate)(struct rtdm_fd *fd);
+	} proto_ops;
+};
+
+static inline void *rtipc_fd_to_state(struct rtdm_fd *fd)
+{
+	struct rtipc_private *p = rtdm_fd_to_private(fd);
+	return p->state;
+}
+
+static inline nanosecs_rel_t rtipc_timeval_to_ns(const struct __kernel_old_timeval *tv)
+{
+	nanosecs_rel_t ns = tv->tv_usec * 1000;
+
+	if (tv->tv_sec)
+		ns += (nanosecs_rel_t)tv->tv_sec * 1000000000UL;
+
+	return ns;
+}
+
+static inline void rtipc_ns_to_timeval(struct __kernel_old_timeval *tv, nanosecs_rel_t ns)
+{
+	unsigned long nsecs;
+
+	tv->tv_sec = xnclock_divrem_billion(ns, &nsecs);
+	tv->tv_usec = nsecs / 1000;
+}
+
+int rtipc_get_sockaddr(struct rtdm_fd *fd,
+		       struct sockaddr_ipc **saddrp,
+		       const void *arg);
+
+int rtipc_put_sockaddr(struct rtdm_fd *fd, void *arg,
+		       const struct sockaddr_ipc *saddr);
+
+int rtipc_get_sockoptout(struct rtdm_fd *fd,
+			 struct _rtdm_getsockopt_args *sopt,
+			 const void *arg);
+
+int rtipc_put_sockoptout(struct rtdm_fd *fd, void *arg,
+			 const struct _rtdm_getsockopt_args *sopt);
+
+int rtipc_get_sockoptin(struct rtdm_fd *fd,
+			struct _rtdm_setsockopt_args *sopt,
+			const void *arg);
+
+int rtipc_get_timeval(struct rtdm_fd *fd, struct __kernel_old_timeval *tv,
+		      const void *arg, size_t arglen);
+
+int rtipc_put_timeval(struct rtdm_fd *fd, void *arg,
+		      const struct __kernel_old_timeval *tv, size_t arglen);
+
+int rtipc_get_length(struct rtdm_fd *fd, size_t *lenp,
+		     const void *arg, size_t arglen);
+
+int rtipc_get_arg(struct rtdm_fd *fd, void *dst, const void *src,
+		  size_t len);
+
+int rtipc_put_arg(struct rtdm_fd *fd, void *dst, const void *src,
+		  size_t len);
+
+extern struct rtipc_protocol xddp_proto_driver;
+
+extern struct rtipc_protocol iddp_proto_driver;
+
+extern struct rtipc_protocol bufp_proto_driver;
+
+extern struct xnptree rtipc_ptree;
+
+#define rtipc_wait_context		xnthread_wait_context
+#define rtipc_prepare_wait		xnthread_prepare_wait
+#define rtipc_get_wait_context		xnthread_get_wait_context
+#define rtipc_peek_wait_head(obj)	xnsynch_peek_pendq(&(obj)->synch_base)
+
+#define COMPAT_CASE(__op)	case __op __COMPAT_CASE(__op  ## _COMPAT)
+
+#endif /* !_RTIPC_INTERNAL_H */
+++ linux-patched/drivers/xenomai/ipc/Kconfig	2022-03-21 12:58:29.167891126 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/ipc/rtipc.c	1970-01-01 01:00:00.000000000 +0100
+menu "Real-time IPC drivers"
+
+config XENO_DRIVERS_RTIPC
+	tristate "RTIPC protocol family"
+	help
+
+	This driver provides the real-time IPC protocol family
+	(PF_RTIPC) over RTDM.
+
+config XENO_DRIVERS_RTIPC_XDDP
+	depends on XENO_DRIVERS_RTIPC
+	select XENO_OPT_PIPE
+	default y
+	bool "XDDP cross-domain datagram protocol"
+	help
+
+	Xenomai's XDDP protocol enables threads to exchange datagrams
+	across the Xenomai/Linux domain boundary, using "message
+	pipes".
+
+	Message pipes are bi-directional FIFO communication channels
+	allowing data exchange between real-time Xenomai threads and
+	regular (i.e. non real-time) user-space processes. Message
+	pipes are datagram-based and thus natively preserve message
+	boundaries, but they can also be used in byte stream mode when
+	sending from the real-time to the non real-time domain.
+
+	The maximum number of communication ports available in the
+	system can be configured using the XENO_OPT_PIPE_NRDEV option
+	from the Nucleus menu.
+
+config XENO_DRIVERS_RTIPC_IDDP
+	depends on XENO_DRIVERS_RTIPC
+	select XENO_OPT_MAP
+	default y
+	bool "IDDP intra-domain datagram protocol"
+	help
+
+	Xenomai's IDDP protocol enables real-time threads to exchange
+	datagrams within the Xenomai domain.
+
+config XENO_OPT_IDDP_NRPORT
+	depends on XENO_DRIVERS_RTIPC_IDDP
+	int "Number of IDDP communication ports"
+	default 32
+	help
+
+	This parameter defines the number of IDDP ports available in
+	the system for creating receiver endpoints. Port numbers range
+	from 0 to CONFIG_XENO_OPT_IDDP_NRPORT - 1.
+
+config XENO_DRIVERS_RTIPC_BUFP
+	depends on XENO_DRIVERS_RTIPC
+	select XENO_OPT_MAP
+	default y
+	bool "Buffer protocol"
+	help
+
+	The buffer protocol implements a byte-oriented, one-way
+	Producer-Consumer data path, which makes it a bit faster than
+	datagram-oriented protocols. All messages written are buffered
+	into a single memory area in strict FIFO order, until read by
+	the consumer.
+
+	This protocol prevents short writes, and only allows short
+	reads when a potential deadlock situation arises (i.e. readers
+	and writers waiting for each other indefinitely), which
+	usually means that the buffer size does not fit the use peer
+	threads are making from the protocol.
+
+config XENO_OPT_BUFP_NRPORT
+	depends on XENO_DRIVERS_RTIPC_BUFP
+	int "Number of BUFP communication ports"
+	default 32
+	help
+
+	This parameter defines the number of BUFP ports available in
+	the system for creating receiver endpoints. Port numbers range
+	from 0 to CONFIG_XENO_OPT_BUFP_NRPORT - 1.
+
+endmenu
+++ linux-patched/drivers/xenomai/ipc/rtipc.c	2022-03-21 12:58:29.164891155 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/ipc/bufp.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <rtdm/ipc.h>
+#include <rtdm/compat.h>
+#include "internal.h"
+
+MODULE_DESCRIPTION("Real-time IPC interface");
+MODULE_AUTHOR("Philippe Gerum <rpm@xenomai.org>");
+MODULE_LICENSE("GPL");
+
+static struct rtipc_protocol *protocols[IPCPROTO_MAX] = {
+#ifdef CONFIG_XENO_DRIVERS_RTIPC_XDDP
+	[IPCPROTO_XDDP - 1] = &xddp_proto_driver,
+#endif
+#ifdef CONFIG_XENO_DRIVERS_RTIPC_IDDP
+	[IPCPROTO_IDDP - 1] = &iddp_proto_driver,
+#endif
+#ifdef CONFIG_XENO_DRIVERS_RTIPC_BUFP
+	[IPCPROTO_BUFP - 1] = &bufp_proto_driver,
+#endif
+};
+
+DEFINE_XNPTREE(rtipc_ptree, "rtipc");
+
+int rtipc_get_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(dst, src, len);
+		return 0;
+	}
+
+	return rtdm_copy_from_user(fd, dst, src, len);
+}
+
+int rtipc_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(dst, src, len);
+		return 0;
+	}
+
+	return rtdm_copy_to_user(fd, dst, src, len);
+}
+
+int rtipc_get_sockaddr(struct rtdm_fd *fd, struct sockaddr_ipc **saddrp,
+		       const void *arg)
+{
+	const struct _rtdm_setsockaddr_args *p;
+	struct _rtdm_setsockaddr_args sreq;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		p = arg;
+		if (p->addrlen > 0) {
+			if (p->addrlen != sizeof(**saddrp))
+				return -EINVAL;
+			memcpy(*saddrp, p->addr, sizeof(**saddrp));
+		} else {
+			if (p->addr)
+				return -EINVAL;
+			*saddrp = NULL;
+		}
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_setsockaddr_args csreq;
+		ret = rtdm_safe_copy_from_user(fd, &csreq, arg, sizeof(csreq));
+		if (ret)
+			return ret;
+		if (csreq.addrlen > 0) {
+			if (csreq.addrlen != sizeof(**saddrp))
+				return -EINVAL;
+			return rtdm_safe_copy_from_user(fd, *saddrp,
+							compat_ptr(csreq.addr),
+							sizeof(**saddrp));
+		}
+		if (csreq.addr)
+			return -EINVAL;
+
+		*saddrp = NULL;
+
+		return 0;
+	}
+#endif
+
+	ret = rtdm_safe_copy_from_user(fd, &sreq, arg, sizeof(sreq));
+	if (ret)
+		return ret;
+	if (sreq.addrlen > 0) {
+		if (sreq.addrlen != sizeof(**saddrp))
+			return -EINVAL;
+		return rtdm_safe_copy_from_user(fd, *saddrp,
+						sreq.addr, sizeof(**saddrp));
+	}
+	if (sreq.addr)
+		return -EINVAL;
+
+	*saddrp = NULL;
+
+	return 0;
+}
+
+int rtipc_put_sockaddr(struct rtdm_fd *fd, void *arg,
+		       const struct sockaddr_ipc *saddr)
+{
+	const struct _rtdm_getsockaddr_args *p;
+	struct _rtdm_getsockaddr_args sreq;
+	socklen_t len;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		p = arg;
+		if (*p->addrlen < sizeof(*saddr))
+			return -EINVAL;
+		memcpy(p->addr, saddr, sizeof(*saddr));
+		*p->addrlen = sizeof(*saddr);
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_getsockaddr_args csreq;
+		ret = rtdm_safe_copy_from_user(fd, &csreq, arg, sizeof(csreq));
+		if (ret)
+			return ret;
+
+		ret = rtdm_safe_copy_from_user(fd, &len,
+					       compat_ptr(csreq.addrlen),
+					       sizeof(len));
+		if (ret)
+			return ret;
+
+		if (len < sizeof(*saddr))
+			return -EINVAL;
+
+		ret = rtdm_safe_copy_to_user(fd, compat_ptr(csreq.addr),
+					     saddr, sizeof(*saddr));
+		if (ret)
+			return ret;
+
+		len = sizeof(*saddr);
+		return rtdm_safe_copy_to_user(fd, compat_ptr(csreq.addrlen),
+					      &len, sizeof(len));
+	}
+#endif
+
+	sreq.addr = NULL;
+	sreq.addrlen = NULL;
+	ret = rtdm_safe_copy_from_user(fd, &sreq, arg, sizeof(sreq));
+	if (ret)
+		return ret;
+
+	ret = rtdm_safe_copy_from_user(fd, &len, sreq.addrlen, sizeof(len));
+	if (ret)
+		return ret;
+
+	if (len < sizeof(*saddr))
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_to_user(fd, sreq.addr, saddr, sizeof(*saddr));
+	if (ret)
+		return ret;
+
+	len = sizeof(*saddr);
+
+	return rtdm_safe_copy_to_user(fd, sreq.addrlen, &len, sizeof(len));
+}
+
+int rtipc_get_sockoptout(struct rtdm_fd *fd, struct _rtdm_getsockopt_args *sopt,
+			 const void *arg)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		*sopt = *(struct _rtdm_getsockopt_args *)arg;
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_getsockopt_args csopt;
+		int ret;
+		ret = rtdm_safe_copy_from_user(fd, &csopt, arg, sizeof(csopt));
+		if (ret)
+			return ret;
+		sopt->level = csopt.level;
+		sopt->optname = csopt.optname;
+		sopt->optval = compat_ptr(csopt.optval);
+		sopt->optlen = compat_ptr(csopt.optlen);
+		return 0;
+	}
+#endif
+
+	return rtdm_safe_copy_from_user(fd, sopt, arg, sizeof(*sopt));
+}
+
+int rtipc_put_sockoptout(struct rtdm_fd *fd, void *arg,
+			 const struct _rtdm_getsockopt_args *sopt)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		*(struct _rtdm_getsockopt_args *)arg = *sopt;
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_getsockopt_args csopt;
+		int ret;
+		csopt.level = sopt->level;
+		csopt.optname = sopt->optname;
+		csopt.optval = ptr_to_compat(sopt->optval);
+		csopt.optlen = ptr_to_compat(sopt->optlen);
+		ret = rtdm_safe_copy_to_user(fd, arg, &csopt, sizeof(csopt));
+		if (ret)
+			return ret;
+		return 0;
+	}
+#endif
+
+	return rtdm_safe_copy_to_user(fd, arg, sopt, sizeof(*sopt));
+}
+
+int rtipc_get_sockoptin(struct rtdm_fd *fd, struct _rtdm_setsockopt_args *sopt,
+			const void *arg)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		*sopt = *(struct _rtdm_setsockopt_args *)arg;
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_setsockopt_args csopt;
+		int ret;
+		ret = rtdm_safe_copy_from_user(fd, &csopt, arg, sizeof(csopt));
+		if (ret)
+			return ret;
+		sopt->level = csopt.level;
+		sopt->optname = csopt.optname;
+		sopt->optval = compat_ptr(csopt.optval);
+		sopt->optlen = csopt.optlen;
+		return 0;
+	}
+#endif
+
+	return rtdm_safe_copy_from_user(fd, sopt, arg, sizeof(*sopt));
+}
+
+int rtipc_get_timeval(struct rtdm_fd *fd, struct __kernel_old_timeval *tv,
+		      const void *arg, size_t arglen)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		if (arglen != sizeof(struct old_timeval32))
+			return -EINVAL;
+		return sys32_get_timeval(tv, arg);
+	}
+#endif
+
+	if (arglen != sizeof(*tv))
+		return -EINVAL;
+
+	if (!rtdm_fd_is_user(fd)) {
+		*tv = *(struct __kernel_old_timeval *)arg;
+		return 0;
+	}
+
+	return rtdm_safe_copy_from_user(fd, tv, arg, sizeof(*tv));
+}
+
+int rtipc_put_timeval(struct rtdm_fd *fd, void *arg,
+		      const struct __kernel_old_timeval *tv, size_t arglen)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		if (arglen != sizeof(struct old_timeval32))
+			return -EINVAL;
+		return sys32_put_timeval(arg, tv);
+	}
+#endif
+
+	if (arglen != sizeof(*tv))
+		return -EINVAL;
+
+	if (!rtdm_fd_is_user(fd)) {
+		*(struct __kernel_old_timeval *)arg = *tv;
+		return 0;
+	}
+
+	return rtdm_safe_copy_to_user(fd, arg, tv, sizeof(*tv));
+}
+
+int rtipc_get_length(struct rtdm_fd *fd, size_t *lenp,
+		     const void *arg, size_t arglen)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		const compat_size_t *csz;
+		if (arglen != sizeof(*csz))
+			return -EINVAL;
+		csz = arg;
+		return csz == NULL ||
+			!access_rok(csz, sizeof(*csz)) ||
+			__xn_get_user(*lenp, csz) ? -EFAULT : 0;
+	}
+#endif
+
+	if (arglen != sizeof(size_t))
+		return -EINVAL;
+
+	if (!rtdm_fd_is_user(fd)) {
+		*lenp = *(size_t *)arg;
+		return 0;
+	}
+
+	return rtdm_safe_copy_from_user(fd, lenp, arg, sizeof(*lenp));
+}
+
+static int rtipc_socket(struct rtdm_fd *fd, int protocol)
+{
+	struct rtipc_protocol *proto;
+	struct rtipc_private *priv;
+	int ret;
+
+	if (protocol < 0 || protocol >= IPCPROTO_MAX)
+		return -EPROTONOSUPPORT;
+
+	if (protocol == IPCPROTO_IPC)
+		/* Default protocol is IDDP */
+		protocol = IPCPROTO_IDDP;
+
+	proto = protocols[protocol - 1];
+	if (proto == NULL)	/* Not compiled in? */
+		return -ENOPROTOOPT;
+
+	priv = rtdm_fd_to_private(fd);
+	priv->proto = proto;
+	priv->state = kmalloc(proto->proto_statesz, GFP_KERNEL);
+	if (priv->state == NULL)
+		return -ENOMEM;
+
+	xnselect_init(&priv->send_block);
+	xnselect_init(&priv->recv_block);
+
+	ret = proto->proto_ops.socket(fd);
+	if (ret)
+		kfree(priv->state);
+
+	return ret;
+}
+
+static void rtipc_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	/*
+	 * CAUTION: priv->state shall be released by the
+	 * proto_ops.close() handler when appropriate (which may be
+	 * done asynchronously later, see XDDP).
+	 */
+	priv->proto->proto_ops.close(fd);
+	xnselect_destroy(&priv->recv_block);
+	xnselect_destroy(&priv->send_block);
+}
+
+static ssize_t rtipc_recvmsg(struct rtdm_fd *fd,
+			     struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.recvmsg(fd, msg, flags);
+}
+
+static ssize_t rtipc_sendmsg(struct rtdm_fd *fd,
+			     const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.sendmsg(fd, msg, flags);
+}
+
+static ssize_t rtipc_read(struct rtdm_fd *fd,
+			  void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.read(fd, buf, len);
+}
+
+static ssize_t rtipc_write(struct rtdm_fd *fd,
+			   const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.write(fd, buf, len);
+}
+
+static int rtipc_ioctl(struct rtdm_fd *fd,
+		       unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.ioctl(fd, request, arg);
+}
+
+static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector,
+			unsigned int type, unsigned int index)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xnselect_binding *binding;
+	unsigned int pollstate, mask;
+	struct xnselect *block;
+	spl_t s;
+	int ret;
+	
+	if (type != XNSELECT_READ && type != XNSELECT_WRITE)
+		return -EINVAL;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (binding == NULL)
+		return -ENOMEM;
+
+	cobalt_atomic_enter(s);
+
+	pollstate = priv->proto->proto_ops.pollstate(fd);
+
+	if (type == XNSELECT_READ) {
+		mask = pollstate & POLLIN;
+		block = &priv->recv_block;
+	} else {
+		mask = pollstate & POLLOUT;
+		block = &priv->send_block;
+	}
+
+	ret = xnselect_bind(block, binding, selector, type, index, mask);
+
+	cobalt_atomic_leave(s);
+
+	if (ret)
+		xnfree(binding);
+
+	return ret;
+}
+
+static struct rtdm_driver rtipc_driver = {
+	.profile_info		=	RTDM_PROFILE_INFO(rtipc,
+							  RTDM_CLASS_RTIPC,
+							  RTDM_SUBCLASS_GENERIC,
+							  1),
+	.device_flags		=	RTDM_PROTOCOL_DEVICE,
+	.device_count		=	1,
+	.context_size		=	sizeof(struct rtipc_private),
+	.protocol_family	=	PF_RTIPC,
+	.socket_type		=	SOCK_DGRAM,
+	.ops = {
+		.socket		=	rtipc_socket,
+		.close		=	rtipc_close,
+		.recvmsg_rt	=	rtipc_recvmsg,
+		.recvmsg_nrt	=	NULL,
+		.sendmsg_rt	=	rtipc_sendmsg,
+		.sendmsg_nrt	=	NULL,
+		.ioctl_rt	=	rtipc_ioctl,
+		.ioctl_nrt	=	rtipc_ioctl,
+		.read_rt	=	rtipc_read,
+		.read_nrt	=	NULL,
+		.write_rt	=	rtipc_write,
+		.write_nrt	=	NULL,
+		.select		=	rtipc_select,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &rtipc_driver,
+	.label = "rtipc",
+};
+
+int __init __rtipc_init(void)
+{
+	int ret, n;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (n = 0; n < IPCPROTO_MAX; n++) {
+		if (protocols[n] && protocols[n]->proto_init) {
+			ret = protocols[n]->proto_init();
+			if (ret)
+				return ret;
+		}
+	}
+
+	return rtdm_dev_register(&device);
+}
+
+void __exit __rtipc_exit(void)
+{
+	int n;
+
+	rtdm_dev_unregister(&device);
+
+	for (n = 0; n < IPCPROTO_MAX; n++) {
+		if (protocols[n] && protocols[n]->proto_exit)
+			protocols[n]->proto_exit();
+	}
+}
+
+module_init(__rtipc_init);
+module_exit(__rtipc_exit);
+++ linux-patched/drivers/xenomai/ipc/bufp.c	2022-03-21 12:58:29.160891194 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/ipc/Makefile	1970-01-01 01:00:00.000000000 +0100
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/map.h>
+#include <cobalt/kernel/bufd.h>
+#include <rtdm/ipc.h>
+#include "internal.h"
+
+#define BUFP_SOCKET_MAGIC 0xa61a61a6
+
+struct bufp_socket {
+	int magic;
+	struct sockaddr_ipc name;
+	struct sockaddr_ipc peer;
+
+	void *bufmem;
+	size_t bufsz;
+	u_long status;
+	xnhandle_t handle;
+	char label[XNOBJECT_NAME_LEN];
+
+	off_t rdoff;
+	off_t rdrsvd;
+	int rdsem;
+	off_t wroff;
+	off_t wrrsvd;
+	int wrsem;
+	size_t fillsz;
+	rtdm_event_t i_event;
+	rtdm_event_t o_event;
+
+	nanosecs_rel_t rx_timeout;
+	nanosecs_rel_t tx_timeout;
+
+	struct rtipc_private *priv;
+};
+
+struct bufp_wait_context {
+	struct rtipc_wait_context wc;
+	size_t len;
+	struct bufp_socket *sk;
+};
+
+static struct sockaddr_ipc nullsa = {
+	.sipc_family = AF_RTIPC,
+	.sipc_port = -1
+};
+
+static struct xnmap *portmap;
+
+#define _BUFP_BINDING   0
+#define _BUFP_BOUND     1
+#define _BUFP_CONNECTED 2
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static char *__bufp_link_target(void *obj)
+{
+	struct bufp_socket *sk = obj;
+
+	return kasformat("%d", sk->name.sipc_port);
+}
+
+extern struct xnptree rtipc_ptree;
+
+static struct xnpnode_link __bufp_pnode = {
+	.node = {
+		.dirname = "bufp",
+		.root = &rtipc_ptree,
+		.ops = &xnregistry_vlink_ops,
+	},
+	.target = __bufp_link_target,
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __bufp_pnode = {
+	.node = {
+		.dirname = "bufp",
+	},
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static int bufp_socket(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state;
+
+	sk->magic = BUFP_SOCKET_MAGIC;
+	sk->name = nullsa;	/* Unbound */
+	sk->peer = nullsa;
+	sk->bufmem = NULL;
+	sk->bufsz = 0;
+	sk->rdoff = 0;
+	sk->wroff = 0;
+	sk->fillsz = 0;
+	sk->rdrsvd = 0;
+	sk->wrrsvd = 0;
+	sk->rdsem = 0;
+	sk->wrsem = 0;
+	sk->status = 0;
+	sk->handle = 0;
+	sk->rx_timeout = RTDM_TIMEOUT_INFINITE;
+	sk->tx_timeout = RTDM_TIMEOUT_INFINITE;
+	*sk->label = 0;
+	rtdm_event_init(&sk->i_event, 0);
+	rtdm_event_init(&sk->o_event, 0);
+	sk->priv = priv;
+
+	return 0;
+}
+
+static void bufp_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state;
+	rtdm_lockctx_t s;
+
+	rtdm_event_destroy(&sk->i_event);
+	rtdm_event_destroy(&sk->o_event);
+
+	if (test_bit(_BUFP_BOUND, &sk->status)) {
+		if (sk->name.sipc_port > -1) {
+			cobalt_atomic_enter(s);
+			xnmap_remove(portmap, sk->name.sipc_port);
+			cobalt_atomic_leave(s);
+		}
+
+		if (sk->handle)
+			xnregistry_remove(sk->handle);
+
+		if (sk->bufmem)
+			xnheap_vfree(sk->bufmem);
+	}
+
+	kfree(sk);
+}
+
+static ssize_t __bufp_readbuf(struct bufp_socket *sk,
+			      struct xnbufd *bufd,
+			      int flags)
+{
+	struct bufp_wait_context wait, *bufwc;
+	struct rtipc_wait_context *wc;
+	struct xnthread *waiter;
+	size_t rbytes, n, avail;
+	ssize_t len, ret, xret;
+	rtdm_toseq_t toseq;
+	rtdm_lockctx_t s;
+	off_t rdoff;
+	int resched;
+
+	len = bufd->b_len;
+
+	rtdm_toseq_init(&toseq, sk->rx_timeout);
+
+	cobalt_atomic_enter(s);
+redo:
+	for (;;) {
+		/*
+		 * We should be able to read a complete message of the
+		 * requested length, or block.
+		 */
+		avail = sk->fillsz - sk->rdrsvd;
+		if (avail < len)
+			goto wait;
+
+		/* Reserve a read slot into the circular buffer. */
+		rdoff = sk->rdoff;
+		sk->rdoff = (rdoff + len) % sk->bufsz;
+		sk->rdrsvd += len;
+		sk->rdsem++;
+		rbytes = ret = len;
+
+		do {
+			if (rdoff + rbytes > sk->bufsz)
+				n = sk->bufsz - rdoff;
+			else
+				n = rbytes;
+			/*
+			 * Drop the lock before copying data to
+			 * user. The read slot is consumed in any
+			 * case: the non-copied portion of the message
+			 * is lost on bad write.
+			 */
+			cobalt_atomic_leave(s);
+			xret = xnbufd_copy_from_kmem(bufd, sk->bufmem + rdoff, n);
+			cobalt_atomic_enter(s);
+			if (xret < 0) {
+				ret = -EFAULT;
+				break;
+			}
+
+			rbytes -= n;
+			rdoff = (rdoff + n) % sk->bufsz;
+		} while (rbytes > 0);
+
+		if (--sk->rdsem > 0)
+			goto out;
+
+		resched = 0;
+		if (sk->fillsz == sk->bufsz) /* -> becomes writable */
+			resched |= xnselect_signal(&sk->priv->send_block, POLLOUT);
+
+		sk->fillsz -= sk->rdrsvd;
+		sk->rdrsvd = 0;
+
+		if (sk->fillsz == 0) /* -> becomes non-readable */
+			resched |= xnselect_signal(&sk->priv->recv_block, 0);
+
+		/*
+		 * Wake up all threads pending on the output wait
+		 * queue, if we freed enough room for the leading one
+		 * to post its message.
+		 */
+		waiter = rtipc_peek_wait_head(&sk->o_event);
+		if (waiter == NULL)
+			goto out;
+
+		wc = rtipc_get_wait_context(waiter);
+		XENO_BUG_ON(COBALT, wc == NULL);
+		bufwc = container_of(wc, struct bufp_wait_context, wc);
+		if (bufwc->len + sk->fillsz <= sk->bufsz)
+			/* This call rescheds internally. */
+			rtdm_event_pulse(&sk->o_event);
+		else if (resched)
+			xnsched_run();
+		/*
+		 * We cannot fail anymore once some data has been
+		 * copied via the buffer descriptor, so no need to
+		 * check for any reason to invalidate the latter.
+		 */
+		goto out;
+
+	wait:
+		if (flags & MSG_DONTWAIT) {
+			ret = -EWOULDBLOCK;
+			break;
+		}
+
+		/*
+		 * Check whether writers are already waiting for
+		 * sending data, while we are about to wait for
+		 * receiving some. In such a case, we have a
+		 * pathological use of the buffer. We must allow for a
+		 * short read to prevent a deadlock.
+		 */
+		if (sk->fillsz > 0 && rtipc_peek_wait_head(&sk->o_event)) {
+			len = sk->fillsz;
+			goto redo;
+		}
+
+		wait.len = len;
+		wait.sk = sk;
+		rtipc_prepare_wait(&wait.wc);
+		/*
+		 * Keep the nucleus lock across the wait call, so that
+		 * we don't miss a pulse.
+		 */
+		ret = rtdm_event_timedwait(&sk->i_event,
+					   sk->rx_timeout, &toseq);
+		if (unlikely(ret))
+			break;
+	}
+out:
+	cobalt_atomic_leave(s);
+
+	return ret;
+}
+
+static ssize_t __bufp_recvmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      struct sockaddr_ipc *saddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state;
+	ssize_t len, wrlen, vlen, ret;
+	struct xnbufd bufd;
+	int nvec;
+
+	if (!test_bit(_BUFP_BOUND, &sk->status))
+		return -EAGAIN;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+	/*
+	 * We may only return complete messages to readers, so there
+	 * is no point in waiting for messages which are larger than
+	 * what the buffer can hold.
+	 */
+	if (len > sk->bufsz)
+		return -EINVAL;
+
+	/*
+	 * Write "len" bytes from the buffer to the vector cells. Each
+	 * cell is handled as a separate message.
+	 */
+	for (nvec = 0, wrlen = len; nvec < iovlen && wrlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_readbuf(sk, &bufd, flags);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_readbuf(sk, &bufd, flags);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			return ret;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		wrlen -= vlen;
+		if (ret < vlen)
+			/* Short reads may happen in rare cases. */
+			break;
+	}
+
+	/*
+	 * There is no way to determine who the sender was since we
+	 * process data in byte-oriented mode, so we just copy our own
+	 * sockaddr to send back a valid address.
+	 */
+	if (saddr)
+		*saddr = sk->name;
+
+	return len - wrlen;
+}
+
+static ssize_t bufp_recvmsg(struct rtdm_fd *fd,
+			    struct user_msghdr *msg, int flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct sockaddr_ipc saddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen < sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+	} else if (msg->msg_namelen != 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __bufp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy the updated I/O vector back */
+	if (rtdm_put_iovec(fd, iov, msg, iov_fast))
+		return -EFAULT;
+
+	/* Copy the source address if required. */
+	if (msg->msg_name) {
+		if (rtipc_put_arg(fd, msg->msg_name,
+				  &saddr, sizeof(saddr)))
+			return -EFAULT;
+		msg->msg_namelen = sizeof(struct sockaddr_ipc);
+	}
+
+	return ret;
+}
+
+static ssize_t bufp_read(struct rtdm_fd *fd, void *buf, size_t len)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = len };
+
+	return __bufp_recvmsg(fd, &iov, 1, 0, NULL);
+}
+
+static ssize_t __bufp_writebuf(struct bufp_socket *rsk,
+			       struct bufp_socket *sk,
+			       struct xnbufd *bufd,
+			       int flags)
+{
+	struct bufp_wait_context wait, *bufwc;
+	struct rtipc_wait_context *wc;
+	struct xnthread *waiter;
+	size_t wbytes, n, avail;
+	ssize_t len, ret, xret;
+	rtdm_toseq_t toseq;
+	rtdm_lockctx_t s;
+	off_t wroff;
+	int resched;
+
+	len = bufd->b_len;
+
+	rtdm_toseq_init(&toseq, sk->tx_timeout);
+
+	cobalt_atomic_enter(s);
+
+	for (;;) {
+		/*
+		 * No short or scattered writes: we should write the
+		 * entire message atomically or block.
+		 */
+		avail = rsk->fillsz + rsk->wrrsvd;
+		if (avail + len > rsk->bufsz)
+			goto wait;
+
+		/* Reserve a write slot into the circular buffer. */
+		wroff = rsk->wroff;
+		rsk->wroff = (wroff + len) % rsk->bufsz;
+		rsk->wrrsvd += len;
+		rsk->wrsem++;
+		wbytes = ret = len;
+
+		do {
+			if (wroff + wbytes > rsk->bufsz)
+				n = rsk->bufsz - wroff;
+			else
+				n = wbytes;
+			/*
+			 * We have to drop the lock while reading in
+			 * data, but we can't rollback on bad read
+			 * from user because some other thread might
+			 * have populated the memory ahead of our
+			 * write slot already: bluntly clear the
+			 * unavailable bytes on copy error.
+			 */
+			cobalt_atomic_leave(s);
+			xret = xnbufd_copy_to_kmem(rsk->bufmem + wroff, bufd, n);
+			cobalt_atomic_enter(s);
+			if (xret < 0) {
+				memset(rsk->bufmem + wroff, 0, n);
+				ret = -EFAULT;
+				break;
+			}
+
+			wbytes -= n;
+			wroff = (wroff + n) % rsk->bufsz;
+		} while (wbytes > 0);
+
+		if (--rsk->wrsem > 0)
+			goto out;
+
+		resched = 0;
+		if (rsk->fillsz == 0) /* -> becomes readable */
+			resched |= xnselect_signal(&rsk->priv->recv_block, POLLIN);
+
+		rsk->fillsz += rsk->wrrsvd;
+		rsk->wrrsvd = 0;
+
+		if (rsk->fillsz == rsk->bufsz) /* becomes non-writable */
+			resched |= xnselect_signal(&rsk->priv->send_block, 0);
+		/*
+		 * Wake up all threads pending on the input wait
+		 * queue, if we accumulated enough data to feed the
+		 * leading one.
+		 */
+		waiter = rtipc_peek_wait_head(&rsk->i_event);
+		if (waiter == NULL)
+			goto out;
+
+		wc = rtipc_get_wait_context(waiter);
+		XENO_BUG_ON(COBALT, wc == NULL);
+		bufwc = container_of(wc, struct bufp_wait_context, wc);
+		if (bufwc->len <= rsk->fillsz)
+			rtdm_event_pulse(&rsk->i_event);
+		else if (resched)
+			xnsched_run();
+		/*
+		 * We cannot fail anymore once some data has been
+		 * copied via the buffer descriptor, so no need to
+		 * check for any reason to invalidate the latter.
+		 */
+		goto out;
+	wait:
+		if (flags & MSG_DONTWAIT) {
+			ret = -EWOULDBLOCK;
+			break;
+		}
+
+		wait.len = len;
+		wait.sk = rsk;
+		rtipc_prepare_wait(&wait.wc);
+		/*
+		 * Keep the nucleus lock across the wait call, so that
+		 * we don't miss a pulse.
+		 */
+		ret = rtdm_event_timedwait(&rsk->o_event,
+					   sk->tx_timeout, &toseq);
+		if (unlikely(ret))
+			break;
+	}
+out:
+	cobalt_atomic_leave(s);
+
+	return ret;
+}
+
+static ssize_t __bufp_sendmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      const struct sockaddr_ipc *daddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state, *rsk;
+	ssize_t len, rdlen, vlen, ret = 0;
+	struct rtdm_fd *rfd;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+	int nvec;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+
+	cobalt_atomic_enter(s);
+	rfd = xnmap_fetch_nocheck(portmap, daddr->sipc_port);
+	if (rfd && rtdm_fd_lock(rfd) < 0)
+		rfd = NULL;
+	cobalt_atomic_leave(s);
+	if (rfd == NULL)
+		return -ECONNRESET;
+
+	rsk = rtipc_fd_to_state(rfd);
+	if (!test_bit(_BUFP_BOUND, &rsk->status)) {
+		rtdm_fd_unlock(rfd);
+		return -ECONNREFUSED;
+	}
+
+	/*
+	 * We may only send complete messages, so there is no point in
+	 * accepting messages which are larger than what the buffer
+	 * can hold.
+	 */
+	if (len > rsk->bufsz) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/*
+	 * Read "len" bytes to the buffer from the vector cells. Each
+	 * cell is handled as a separate message.
+	 */
+	for (nvec = 0, rdlen = len; nvec < iovlen && rdlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_writebuf(rsk, sk, &bufd, flags);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_writebuf(rsk, sk, &bufd, flags);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto fail;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		rdlen -= vlen;
+	}
+
+	rtdm_fd_unlock(rfd);
+
+	return len - rdlen;
+fail:
+	rtdm_fd_unlock(rfd);
+
+	return ret;
+}
+
+static ssize_t bufp_sendmsg(struct rtdm_fd *fd,
+			    const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct bufp_socket *sk = priv->state;
+	struct sockaddr_ipc daddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen != sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+
+		/* Fetch the destination address to send to. */
+		if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr)))
+			return -EFAULT;
+
+		if (daddr.sipc_port < 0 ||
+		    daddr.sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT)
+			return -EINVAL;
+	} else {
+		if (msg->msg_namelen != 0)
+			return -EINVAL;
+		daddr = sk->peer;
+		if (daddr.sipc_port < 0)
+			return -EDESTADDRREQ;
+	}
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __bufp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy updated I/O vector back */
+	return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret;
+}
+
+static ssize_t bufp_write(struct rtdm_fd *fd,
+			  const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
+	struct bufp_socket *sk = priv->state;
+
+	if (sk->peer.sipc_port < 0)
+		return -EDESTADDRREQ;
+
+	return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+}
+
+static int __bufp_bind_socket(struct rtipc_private *priv,
+			      struct sockaddr_ipc *sa)
+{
+	struct bufp_socket *sk = priv->state;
+	int ret = 0, port;
+	struct rtdm_fd *fd;
+	rtdm_lockctx_t s;
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT)
+		return -EINVAL;
+
+	cobalt_atomic_enter(s);
+	if (test_bit(_BUFP_BOUND, &sk->status) ||
+	    __test_and_set_bit(_BUFP_BINDING, &sk->status))
+		ret = -EADDRINUSE;
+	cobalt_atomic_leave(s);
+	
+	if (ret)
+		return ret;
+
+	/* Will auto-select a free port number if unspec (-1). */
+	port = sa->sipc_port;
+	fd = rtdm_private_to_fd(priv);
+	cobalt_atomic_enter(s);
+	port = xnmap_enter(portmap, port, fd);
+	cobalt_atomic_leave(s);
+	if (port < 0)
+		return port == -EEXIST ? -EADDRINUSE : -ENOMEM;
+
+	sa->sipc_port = port;
+
+	/*
+	 * The caller must have told us how much memory is needed for
+	 * buffer space via setsockopt(), before we got there.
+	 */
+	if (sk->bufsz == 0)
+		return -ENOBUFS;
+
+	sk->bufmem = xnheap_vmalloc(sk->bufsz);
+	if (sk->bufmem == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	sk->name = *sa;
+	/* Set default destination if unset at binding time. */
+	if (sk->peer.sipc_port < 0)
+		sk->peer = *sa;
+
+	if (*sk->label) {
+		ret = xnregistry_enter(sk->label, sk,
+				       &sk->handle, &__bufp_pnode.node);
+		if (ret) {
+			xnheap_vfree(sk->bufmem);
+			goto fail;
+		}
+	}
+
+	cobalt_atomic_enter(s);
+	__clear_bit(_BUFP_BINDING, &sk->status);
+	__set_bit(_BUFP_BOUND, &sk->status);
+	if (xnselect_signal(&priv->send_block, POLLOUT))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+fail:
+	xnmap_remove(portmap, port);
+	clear_bit(_BUFP_BINDING, &sk->status);
+
+	return ret;
+}
+
+static int __bufp_connect_socket(struct bufp_socket *sk,
+				 struct sockaddr_ipc *sa)
+{
+	struct sockaddr_ipc _sa;
+	struct bufp_socket *rsk;
+	int ret, resched = 0;
+	rtdm_lockctx_t s;
+	xnhandle_t h;
+
+	if (sa == NULL) {
+		_sa = nullsa;
+		sa = &_sa;
+		goto set_assoc;
+	}
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT)
+		return -EINVAL;
+	/*
+	 * - If a valid sipc_port is passed in the [0..NRPORT-1] range,
+	 * it is used verbatim and the connection succeeds
+	 * immediately, regardless of whether the destination is
+	 * bound at the time of the call.
+	 *
+	 * - If sipc_port is -1 and a label was set via BUFP_LABEL,
+	 * connect() blocks for the requested amount of time (see
+	 * SO_RCVTIMEO) until a socket is bound to the same label.
+	 *
+	 * - If sipc_port is -1 and no label is given, the default
+	 * destination address is cleared, meaning that any subsequent
+	 * write() to the socket will return -EDESTADDRREQ, until a
+	 * valid destination address is set via connect() or bind().
+	 *
+	 * - In all other cases, -EINVAL is returned.
+	 */
+	if (sa->sipc_port < 0 && *sk->label) {
+		ret = xnregistry_bind(sk->label,
+				      sk->rx_timeout, XN_RELATIVE, &h);
+		if (ret)
+			return ret;
+
+		cobalt_atomic_enter(s);
+		rsk = xnregistry_lookup(h, NULL);
+		if (rsk == NULL || rsk->magic != BUFP_SOCKET_MAGIC)
+			ret = -EINVAL;
+		else {
+			/* Fetch labeled port number. */
+			sa->sipc_port = rsk->name.sipc_port;
+			resched = xnselect_signal(&sk->priv->send_block, POLLOUT);
+		}
+		cobalt_atomic_leave(s);
+		if (ret)
+			return ret;
+	} else if (sa->sipc_port < 0)
+		sa = &nullsa;
+set_assoc:
+	cobalt_atomic_enter(s);
+	if (!test_bit(_BUFP_BOUND, &sk->status))
+		/* Set default name. */
+		sk->name = *sa;
+	/* Set default destination. */
+	sk->peer = *sa;
+	if (sa->sipc_port < 0)
+		__clear_bit(_BUFP_CONNECTED, &sk->status);
+	else
+		__set_bit(_BUFP_CONNECTED, &sk->status);
+	if (resched)
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __bufp_setsockopt(struct bufp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_setsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	size_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptin(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->rx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->tx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_BUFP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case BUFP_BUFSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len == 0)
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may not do this more than once, and we have to
+		 * do this before the first binding.
+		 */
+		if (test_bit(_BUFP_BOUND, &sk->status) ||
+		    test_bit(_BUFP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else
+			sk->bufsz = len;
+		cobalt_atomic_leave(s);
+		break;
+
+	case BUFP_LABEL:
+		if (sopt.optlen < sizeof(plabel))
+			return -EINVAL;
+		if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel)))
+			return -EFAULT;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may attach a label to a client socket which was
+		 * previously bound in BUFP.
+		 */
+		if (test_bit(_BUFP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else {
+			strcpy(sk->label, plabel.label);
+			sk->label[XNOBJECT_NAME_LEN-1] = 0;
+		}
+		cobalt_atomic_leave(s);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __bufp_getsockopt(struct bufp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_getsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	socklen_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptout(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len)))
+		return -EFAULT;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->rx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->tx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_BUFP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case BUFP_LABEL:
+		if (len < sizeof(plabel))
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		strcpy(plabel.label, sk->label);
+		cobalt_atomic_leave(s);
+		if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel)))
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __bufp_ioctl(struct rtdm_fd *fd,
+			unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct sockaddr_ipc saddr, *saddrp = &saddr;
+	struct bufp_socket *sk = priv->state;
+	int ret = 0;
+
+	switch (request) {
+
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+		  return ret;
+		ret = __bufp_connect_socket(sk, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_BIND):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+			return ret;
+		if (saddrp == NULL)
+			return -EFAULT;
+		ret = __bufp_bind_socket(priv, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->name);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETPEERNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->peer);
+		break;
+
+	COMPAT_CASE(_RTIOC_SETSOCKOPT):
+		ret = __bufp_setsockopt(sk, fd, arg);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKOPT):
+		ret = __bufp_getsockopt(sk, fd, arg);
+		break;
+
+	case _RTIOC_LISTEN:
+	COMPAT_CASE(_RTIOC_ACCEPT):
+		ret = -EOPNOTSUPP;
+		break;
+
+	case _RTIOC_SHUTDOWN:
+		ret = -ENOTCONN;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int bufp_ioctl(struct rtdm_fd *fd,
+		      unsigned int request, void *arg)
+{
+	int ret;
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		if (rtdm_in_rt_context())
+			return -ENOSYS;	/* Try downgrading to NRT */
+		fallthrough;
+	default:
+		ret = __bufp_ioctl(fd, request, arg);
+	}
+
+	return ret;
+}
+
+static unsigned int bufp_pollstate(struct rtdm_fd *fd) /* atomic */
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state, *rsk;
+	unsigned int mask = 0;
+	struct rtdm_fd *rfd;
+
+	if (test_bit(_BUFP_BOUND, &sk->status) && sk->fillsz > 0)
+		mask |= POLLIN;
+
+	/*
+	 * If the socket is connected, POLLOUT means that the peer
+	 * exists, is bound and can receive data. Otherwise POLLOUT is
+	 * always set, assuming the client is likely to use explicit
+	 * addressing in send operations.
+	 */
+	if (test_bit(_BUFP_CONNECTED, &sk->status)) {
+		rfd = xnmap_fetch_nocheck(portmap, sk->peer.sipc_port);
+		if (rfd) {
+			rsk = rtipc_fd_to_state(rfd);
+			if (rsk->fillsz < rsk->bufsz)
+				mask |= POLLOUT;
+		}
+	} else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+static int bufp_init(void)
+{
+	portmap = xnmap_create(CONFIG_XENO_OPT_BUFP_NRPORT, 0, 0);
+	if (portmap == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void bufp_exit(void)
+{
+	xnmap_delete(portmap);
+}
+
+struct rtipc_protocol bufp_proto_driver = {
+	.proto_name = "bufp",
+	.proto_statesz = sizeof(struct bufp_socket),
+	.proto_init = bufp_init,
+	.proto_exit = bufp_exit,
+	.proto_ops = {
+		.socket = bufp_socket,
+		.close = bufp_close,
+		.recvmsg = bufp_recvmsg,
+		.sendmsg = bufp_sendmsg,
+		.read = bufp_read,
+		.write = bufp_write,
+		.ioctl = bufp_ioctl,
+		.pollstate = bufp_pollstate,
+	}
+};
+++ linux-patched/drivers/xenomai/ipc/Makefile	2022-03-21 12:58:29.157891224 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/ipc/xddp.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENO_DRIVERS_RTIPC) += xeno_rtipc.o
+
+xeno_rtipc-y := rtipc.o
+
+xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_XDDP) += xddp.o
+xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_IDDP) += iddp.o
+xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_BUFP) += bufp.o
+++ linux-patched/drivers/xenomai/ipc/xddp.c	2022-03-21 12:58:29.153891263 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/ipc/iddp.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/pipe.h>
+#include <rtdm/ipc.h>
+#include "internal.h"
+
+#define XDDP_SOCKET_MAGIC 0xa21a21a2
+
+struct xddp_message {
+	struct xnpipe_mh mh;
+	char data[];
+};
+
+struct xddp_socket {
+	int magic;
+	struct sockaddr_ipc name;
+	struct sockaddr_ipc peer;
+
+	int minor;
+	size_t poolsz;
+	xnhandle_t handle;
+	char label[XNOBJECT_NAME_LEN];
+	struct rtdm_fd *fd;			/* i.e. RTDM socket fd */
+
+	struct xddp_message *buffer;
+	int buffer_port;
+	struct xnheap *bufpool;
+	struct xnheap privpool;
+	size_t fillsz;
+	size_t curbufsz;	/* Current streaming buffer size */
+	u_long status;
+	rtdm_lock_t lock;
+
+	nanosecs_rel_t timeout;	/* connect()/recvmsg() timeout */
+	size_t reqbufsz;	/* Requested streaming buffer size */
+
+	int (*monitor)(struct rtdm_fd *fd, int event, long arg);
+	struct rtipc_private *priv;
+};
+
+static struct sockaddr_ipc nullsa = {
+	.sipc_family = AF_RTIPC,
+	.sipc_port = -1
+};
+
+static struct rtdm_fd *portmap[CONFIG_XENO_OPT_PIPE_NRDEV]; /* indexes RTDM fildes */
+
+#define _XDDP_SYNCWAIT  0
+#define _XDDP_ATOMIC    1
+#define _XDDP_BINDING   2
+#define _XDDP_BOUND     3
+#define _XDDP_CONNECTED 4
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static char *__xddp_link_target(void *obj)
+{
+	struct xddp_socket *sk = obj;
+
+	return kasformat("/dev/rtp%d", sk->minor);
+}
+
+extern struct xnptree rtipc_ptree;
+
+static struct xnpnode_link __xddp_pnode = {
+	.node = {
+		.dirname = "xddp",
+		.root = &rtipc_ptree,
+		.ops = &xnregistry_vlink_ops,
+	},
+	.target = __xddp_link_target,
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __xddp_pnode = {
+	.node = {
+		.dirname = "xddp",
+	},
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static void *__xddp_alloc_handler(size_t size, void *skarg) /* nklock free */
+{
+	struct xddp_socket *sk = skarg;
+	void *buf;
+
+	/* Try to allocate memory for the incoming message. */
+	buf = xnheap_alloc(sk->bufpool, size);
+	if (unlikely(buf == NULL)) {
+		if (sk->monitor)
+			sk->monitor(sk->fd, XDDP_EVTNOBUF, size);
+		if (size > xnheap_get_size(sk->bufpool))
+			buf = (void *)-1; /* Will never succeed. */
+	}
+
+	return buf;
+}
+
+static int __xddp_resize_streambuf(struct xddp_socket *sk) /* sk->lock held */
+{
+	if (sk->buffer)
+		xnheap_free(sk->bufpool, sk->buffer);
+
+	if (sk->reqbufsz == 0) {
+		sk->buffer = NULL;
+		sk->curbufsz = 0;
+		return 0;
+	}
+
+	sk->buffer = xnheap_alloc(sk->bufpool, sk->reqbufsz);
+	if (sk->buffer == NULL) {
+		sk->curbufsz = 0;
+		return -ENOMEM;
+	}
+
+	sk->curbufsz = sk->reqbufsz;
+
+	return 0;
+}
+
+static void __xddp_free_handler(void *buf, void *skarg) /* nklock free */
+{
+	struct xddp_socket *sk = skarg;
+	rtdm_lockctx_t s;
+
+	if (buf != sk->buffer) {
+		xnheap_free(sk->bufpool, buf);
+		return;
+	}
+
+	/* Reset the streaming buffer. */
+
+	rtdm_lock_get_irqsave(&sk->lock, s);
+
+	sk->fillsz = 0;
+	sk->buffer_port = -1;
+	__clear_bit(_XDDP_SYNCWAIT, &sk->status);
+	__clear_bit(_XDDP_ATOMIC, &sk->status);
+
+	/*
+	 * If a XDDP_BUFSZ request is pending, resize the streaming
+	 * buffer on-the-fly.
+	 */
+	if (unlikely(sk->curbufsz != sk->reqbufsz))
+		__xddp_resize_streambuf(sk);
+
+	rtdm_lock_put_irqrestore(&sk->lock, s);
+}
+
+static void __xddp_output_handler(struct xnpipe_mh *mh, void *skarg) /* nklock held */
+{
+	struct xddp_socket *sk = skarg;
+
+	if (sk->monitor)
+		sk->monitor(sk->fd, XDDP_EVTOUT, xnpipe_m_size(mh));
+}
+
+static int __xddp_input_handler(struct xnpipe_mh *mh, int retval, void *skarg) /* nklock held */
+{
+	struct xddp_socket *sk = skarg;
+
+	if (sk->monitor) {
+		if (retval == 0)
+			/* Callee may alter the return value passed to userland. */
+			retval = sk->monitor(sk->fd, XDDP_EVTIN, xnpipe_m_size(mh));
+		else if (retval == -EPIPE && mh == NULL)
+			sk->monitor(sk->fd, XDDP_EVTDOWN, 0);
+	}
+
+	if (retval == 0 &&
+	    (__xnpipe_pollstate(sk->minor) & POLLIN) != 0 &&
+	    xnselect_signal(&sk->priv->recv_block, POLLIN))
+		xnsched_run();
+
+	return retval;
+}
+
+static void __xddp_release_handler(void *skarg) /* nklock free */
+{
+	struct xddp_socket *sk = skarg;
+	void *poolmem;
+	u32 poolsz;
+
+	if (sk->bufpool == &sk->privpool) {
+		poolmem = xnheap_get_membase(&sk->privpool);
+		poolsz = xnheap_get_size(&sk->privpool);
+		xnheap_destroy(&sk->privpool);
+		xnheap_vfree(poolmem);
+	} else if (sk->buffer)
+		xnfree(sk->buffer);
+
+	kfree(sk);
+}
+
+static int xddp_socket(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_socket *sk = priv->state;
+
+	sk->magic = XDDP_SOCKET_MAGIC;
+	sk->name = nullsa;	/* Unbound */
+	sk->peer = nullsa;
+	sk->minor = -1;
+	sk->handle = 0;
+	*sk->label = 0;
+	sk->poolsz = 0;
+	sk->buffer = NULL;
+	sk->buffer_port = -1;
+	sk->bufpool = NULL;
+	sk->fillsz = 0;
+	sk->status = 0;
+	sk->timeout = RTDM_TIMEOUT_INFINITE;
+	sk->curbufsz = 0;
+	sk->reqbufsz = 0;
+	sk->monitor = NULL;
+	rtdm_lock_init(&sk->lock);
+	sk->priv = priv;
+
+	return 0;
+}
+
+static void xddp_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_socket *sk = priv->state;
+	rtdm_lockctx_t s;
+
+	sk->monitor = NULL;
+
+	if (!test_bit(_XDDP_BOUND, &sk->status))
+		return;
+
+	cobalt_atomic_enter(s);
+	portmap[sk->name.sipc_port] = NULL;
+	cobalt_atomic_leave(s);
+
+	if (sk->handle)
+		xnregistry_remove(sk->handle);
+
+	xnpipe_disconnect(sk->minor);
+}
+
+static ssize_t __xddp_recvmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      struct sockaddr_ipc *saddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_message *mbuf = NULL; /* Fake GCC */
+	struct xddp_socket *sk = priv->state;
+	ssize_t maxlen, len, wrlen, vlen;
+	nanosecs_rel_t timeout;
+	struct xnpipe_mh *mh;
+	int nvec, rdoff, ret;
+	struct xnbufd bufd;
+	spl_t s;
+
+	if (!test_bit(_XDDP_BOUND, &sk->status))
+		return -EAGAIN;
+
+	maxlen = rtdm_get_iov_flatlen(iov, iovlen);
+	if (maxlen == 0)
+		return 0;
+
+	timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sk->timeout;
+	/* Pull heading message from the input queue. */
+	len = xnpipe_recv(sk->minor, &mh, timeout);
+	if (len < 0)
+		return len == -EIDRM ? 0 : len;
+	if (len > maxlen) {
+		ret = -ENOBUFS;
+		goto out;
+	}
+
+	mbuf = container_of(mh, struct xddp_message, mh);
+
+	if (saddr)
+		*saddr = sk->name;
+
+	/* Write "len" bytes from mbuf->data to the vector cells */
+	for (ret = 0, nvec = 0, rdoff = 0, wrlen = len;
+	     nvec < iovlen && wrlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto out;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		wrlen -= vlen;
+		rdoff += vlen;
+	}
+out:
+	xnheap_free(sk->bufpool, mbuf);
+	cobalt_atomic_enter(s);
+	if ((__xnpipe_pollstate(sk->minor) & POLLIN) == 0 &&
+	    xnselect_signal(&priv->recv_block, 0))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return ret ?: len;
+}
+
+static ssize_t xddp_recvmsg(struct rtdm_fd *fd,
+			    struct user_msghdr *msg, int flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct sockaddr_ipc saddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen < sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+	} else if (msg->msg_namelen != 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __xddp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy the updated I/O vector back */
+	if (rtdm_put_iovec(fd, iov, msg, iov_fast))
+		return -EFAULT;
+
+	/* Copy the source address if required. */
+	if (msg->msg_name) {
+		if (rtipc_put_arg(fd, msg->msg_name, &saddr, sizeof(saddr)))
+			return -EFAULT;
+		msg->msg_namelen = sizeof(struct sockaddr_ipc);
+	}
+
+	return ret;
+}
+
+static ssize_t xddp_read(struct rtdm_fd *fd, void *buf, size_t len)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = len };
+
+	return __xddp_recvmsg(fd, &iov, 1, 0, NULL);
+}
+
+static ssize_t __xddp_stream(struct xddp_socket *sk,
+			     int from, struct xnbufd *bufd)
+{
+	struct xddp_message *mbuf;
+	size_t fillptr, rembytes;
+	rtdm_lockctx_t s;
+	ssize_t outbytes;
+	int ret;
+
+	/*
+	 * xnpipe_msend() and xnpipe_mfixup() routines will only grab
+	 * the nklock directly or indirectly, so holding our socket
+	 * lock across those calls is fine.
+	 */
+	rtdm_lock_get_irqsave(&sk->lock, s);
+
+	/*
+	 * There are two cases in which we must remove the cork
+	 * unconditionally and send the incoming data as a standalone
+	 * datagram: the destination port does not support streaming,
+	 * or its streaming buffer is already filled with data issued
+	 * from another port.
+	 */
+	if (sk->curbufsz == 0 ||
+	    (sk->buffer_port >= 0 && sk->buffer_port != from)) {
+		/* This will end up into a standalone datagram. */
+		outbytes = 0;
+		goto out;
+	}
+
+	mbuf = sk->buffer;
+	rembytes = sk->curbufsz - sizeof(*mbuf) - sk->fillsz;
+	outbytes = bufd->b_len > rembytes ? rembytes : bufd->b_len;
+	if (likely(outbytes > 0)) {
+	repeat:
+		/* Mark the beginning of a should-be-atomic section. */
+		__set_bit(_XDDP_ATOMIC, &sk->status);
+		fillptr = sk->fillsz;
+		sk->fillsz += outbytes;
+
+		rtdm_lock_put_irqrestore(&sk->lock, s);
+		ret = xnbufd_copy_to_kmem(mbuf->data + fillptr,
+					  bufd, outbytes);
+		rtdm_lock_get_irqsave(&sk->lock, s);
+
+		if (ret < 0) {
+			outbytes = ret;
+			__clear_bit(_XDDP_ATOMIC, &sk->status);
+			goto out;
+		}
+
+		/* We haven't been atomic, let's try again. */
+		if (!__test_and_clear_bit(_XDDP_ATOMIC, &sk->status))
+			goto repeat;
+
+		if (__test_and_set_bit(_XDDP_SYNCWAIT, &sk->status))
+			outbytes = xnpipe_mfixup(sk->minor,
+						 &mbuf->mh, outbytes);
+		else {
+			sk->buffer_port = from;
+			outbytes = xnpipe_send(sk->minor, &mbuf->mh,
+					       outbytes + sizeof(*mbuf),
+					       XNPIPE_NORMAL);
+			if (outbytes > 0)
+				outbytes -= sizeof(*mbuf);
+		}
+	}
+
+out:
+	rtdm_lock_put_irqrestore(&sk->lock, s);
+
+	return outbytes;
+}
+
+static ssize_t __xddp_sendmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      const struct sockaddr_ipc *daddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	ssize_t len, rdlen, wrlen, vlen, ret, sublen;
+	struct xddp_socket *sk = priv->state;
+	struct xddp_message *mbuf;
+	struct xddp_socket *rsk;
+	struct rtdm_fd *rfd;
+	int nvec, to, from;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+
+	from = sk->name.sipc_port;
+	to = daddr->sipc_port;
+
+	cobalt_atomic_enter(s);
+	rfd = portmap[to];
+	if (rfd && rtdm_fd_lock(rfd) < 0)
+		rfd = NULL;
+	cobalt_atomic_leave(s);
+
+	if (rfd == NULL)
+		return -ECONNRESET;
+
+	rsk = rtipc_fd_to_state(rfd);
+	if (!test_bit(_XDDP_BOUND, &rsk->status)) {
+		rtdm_fd_unlock(rfd);
+		return -ECONNREFUSED;
+	}
+
+	sublen = len;
+	nvec = 0;
+
+	/*
+	 * If active, the streaming buffer is already pending on the
+	 * output queue, so we basically have nothing to do during a
+	 * MSG_MORE -> MSG_NONE transition. Therefore, we only have to
+	 * take care of filling that buffer when MSG_MORE is
+	 * given. Yummie.
+	 */
+	if (flags & MSG_MORE) {
+		for (rdlen = sublen, wrlen = 0;
+		     nvec < iovlen && rdlen > 0; nvec++) {
+			if (iov[nvec].iov_len == 0)
+				continue;
+			vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+			if (rtdm_fd_is_user(fd)) {
+				xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+				ret = __xddp_stream(rsk, from, &bufd);
+				xnbufd_unmap_uread(&bufd);
+			} else {
+				xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+				ret = __xddp_stream(rsk, from, &bufd);
+				xnbufd_unmap_kread(&bufd);
+			}
+			if (ret < 0)
+				goto fail_unlock;
+			wrlen += ret;
+			rdlen -= ret;
+			iov[nvec].iov_base += ret;
+			iov[nvec].iov_len -= ret;
+			/*
+			 * In case of a short write to the streaming
+			 * buffer, send the unsent part as a
+			 * standalone datagram.
+			 */
+			if (ret < vlen) {
+				sublen = rdlen;
+				goto nostream;
+			}
+		}
+		len = wrlen;
+		goto done;
+	}
+
+nostream:
+	mbuf = xnheap_alloc(rsk->bufpool, sublen + sizeof(*mbuf));
+	if (unlikely(mbuf == NULL)) {
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	/*
+	 * Move "sublen" bytes to mbuf->data from the vector cells
+	 */
+	for (rdlen = sublen, wrlen = 0; nvec < iovlen && rdlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wrlen, &bufd, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wrlen, &bufd, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto fail_freebuf;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		rdlen -= vlen;
+		wrlen += vlen;
+	}
+
+	ret = xnpipe_send(rsk->minor, &mbuf->mh,
+			  sublen + sizeof(*mbuf),
+			  (flags & MSG_OOB) ?
+			  XNPIPE_URGENT : XNPIPE_NORMAL);
+
+	if (unlikely(ret < 0)) {
+	fail_freebuf:
+		xnheap_free(rsk->bufpool, mbuf);
+	fail_unlock:
+		rtdm_fd_unlock(rfd);
+		return ret;
+	}
+done:
+	rtdm_fd_unlock(rfd);
+
+	return len;
+}
+
+static ssize_t xddp_sendmsg(struct rtdm_fd *fd,
+			    const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct xddp_socket *sk = priv->state;
+	struct sockaddr_ipc daddr;
+	ssize_t ret;
+
+	/*
+	 * We accept MSG_DONTWAIT, but do not care about it, since
+	 * writing to the real-time endpoint of a message pipe must be
+	 * a non-blocking operation.
+	 */
+	if (flags & ~(MSG_MORE | MSG_OOB | MSG_DONTWAIT))
+		return -EINVAL;
+
+	/*
+	 * MSG_MORE and MSG_OOB are mutually exclusive in our
+	 * implementation.
+	 */
+	if ((flags & (MSG_MORE | MSG_OOB)) == (MSG_MORE | MSG_OOB))
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen != sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+
+		/* Fetch the destination address to send to. */
+		if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr)))
+			return -EFAULT;
+
+		if (daddr.sipc_port < 0 ||
+		    daddr.sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV)
+			return -EINVAL;
+	} else {
+		if (msg->msg_namelen != 0)
+			return -EINVAL;
+		daddr = sk->peer;
+		if (daddr.sipc_port < 0)
+			return -EDESTADDRREQ;
+	}
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __xddp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy updated I/O vector back */
+	return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret;
+}
+
+static ssize_t xddp_write(struct rtdm_fd *fd,
+			  const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
+	struct xddp_socket *sk = priv->state;
+
+	if (sk->peer.sipc_port < 0)
+		return -EDESTADDRREQ;
+
+	return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+}
+
+static int __xddp_bind_socket(struct rtipc_private *priv,
+			      struct sockaddr_ipc *sa)
+{
+	struct xddp_socket *sk = priv->state;
+	struct xnpipe_operations ops;
+	rtdm_lockctx_t s;
+	size_t poolsz;
+	void *poolmem;
+	int ret = 0;
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	/* Allow special port -1 for auto-selection. */
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV)
+		return -EINVAL;
+
+	cobalt_atomic_enter(s);
+	if (test_bit(_XDDP_BOUND, &sk->status) ||
+	    __test_and_set_bit(_XDDP_BINDING, &sk->status))
+		ret = -EADDRINUSE;
+	cobalt_atomic_leave(s);
+	if (ret)
+		return ret;
+
+	poolsz = sk->poolsz;
+	if (poolsz > 0) {
+		poolsz = PAGE_ALIGN(poolsz);
+		poolsz += PAGE_ALIGN(sk->reqbufsz);
+		poolmem = xnheap_vmalloc(poolsz);
+		if (poolmem == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		ret = xnheap_init(&sk->privpool, poolmem, poolsz);
+		if (ret) {
+			xnheap_vfree(poolmem);
+			goto fail;
+		}
+
+		sk->bufpool = &sk->privpool;
+	} else
+		sk->bufpool = &cobalt_heap;
+
+	if (sk->reqbufsz > 0) {
+		sk->buffer = xnheap_alloc(sk->bufpool, sk->reqbufsz);
+		if (sk->buffer == NULL) {
+			ret = -ENOMEM;
+			goto fail_freeheap;
+		}
+		sk->curbufsz = sk->reqbufsz;
+	}
+
+	sk->fd = rtdm_private_to_fd(priv);
+
+	ops.output = &__xddp_output_handler;
+	ops.input = &__xddp_input_handler;
+	ops.alloc_ibuf = &__xddp_alloc_handler;
+	ops.free_ibuf = &__xddp_free_handler;
+	ops.free_obuf = &__xddp_free_handler;
+	ops.release = &__xddp_release_handler;
+
+	ret = xnpipe_connect(sa->sipc_port, &ops, sk);
+	if (ret < 0) {
+		if (ret == -EBUSY)
+			ret = -EADDRINUSE;
+	fail_freeheap:
+		if (poolsz > 0) {
+			xnheap_destroy(&sk->privpool);
+			xnheap_vfree(poolmem);
+		}
+	fail:
+		clear_bit(_XDDP_BINDING, &sk->status);
+		return ret;
+	}
+
+	sk->minor = ret;
+	sa->sipc_port = ret;
+	sk->name = *sa;
+	/* Set default destination if unset at binding time. */
+	if (sk->peer.sipc_port < 0)
+		sk->peer = *sa;
+
+	if (poolsz > 0)
+		xnheap_set_name(sk->bufpool, "xddp-pool@%d", sa->sipc_port);
+
+	if (*sk->label) {
+		ret = xnregistry_enter(sk->label, sk, &sk->handle,
+				       &__xddp_pnode.node);
+		if (ret) {
+			/* The release handler will cleanup the pool for us. */
+			xnpipe_disconnect(sk->minor);
+			return ret;
+		}
+	}
+
+	cobalt_atomic_enter(s);
+	portmap[sk->minor] = rtdm_private_to_fd(priv);
+	__clear_bit(_XDDP_BINDING, &sk->status);
+	__set_bit(_XDDP_BOUND, &sk->status);
+	if (xnselect_signal(&priv->send_block, POLLOUT))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __xddp_connect_socket(struct xddp_socket *sk,
+				 struct sockaddr_ipc *sa)
+{
+	struct sockaddr_ipc _sa;
+	struct xddp_socket *rsk;
+	int ret, resched = 0;
+	rtdm_lockctx_t s;
+	xnhandle_t h;
+
+	if (sa == NULL) {
+		_sa = nullsa;
+		sa = &_sa;
+		goto set_assoc;
+	}
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV)
+		return -EINVAL;
+	/*
+	 * - If a valid sipc_port is passed in the [0..NRDEV-1] range,
+	 * it is used verbatim and the connection succeeds
+	 * immediately, regardless of whether the destination is
+	 * bound at the time of the call.
+	 *
+	 * - If sipc_port is -1 and a label was set via XDDP_LABEL,
+	 * connect() blocks for the requested amount of time (see
+	 * SO_RCVTIMEO) until a socket is bound to the same label.
+	 *
+	 * - If sipc_port is -1 and no label is given, the default
+	 * destination address is cleared, meaning that any subsequent
+	 * write() to the socket will return -EDESTADDRREQ, until a
+	 * valid destination address is set via connect() or bind().
+	 *
+	 * - In all other cases, -EINVAL is returned.
+	 */
+	if (sa->sipc_port < 0 && *sk->label) {
+		ret = xnregistry_bind(sk->label,
+				      sk->timeout, XN_RELATIVE, &h);
+		if (ret)
+			return ret;
+
+		cobalt_atomic_enter(s);
+		rsk = xnregistry_lookup(h, NULL);
+		if (rsk == NULL || rsk->magic != XDDP_SOCKET_MAGIC)
+			ret = -EINVAL;
+		else {
+			/* Fetch labeled port number. */
+			sa->sipc_port = rsk->minor;
+			resched = xnselect_signal(&sk->priv->send_block, POLLOUT);
+		}
+		cobalt_atomic_leave(s);
+		if (ret)
+			return ret;
+	} else if (sa->sipc_port < 0)
+		sa = &nullsa;
+set_assoc:
+	cobalt_atomic_enter(s);
+	if (!test_bit(_XDDP_BOUND, &sk->status))
+		/* Set default name. */
+		sk->name = *sa;
+	/* Set default destination. */
+	sk->peer = *sa;
+	if (sa->sipc_port < 0)
+		__clear_bit(_XDDP_CONNECTED, &sk->status);
+	else
+		__set_bit(_XDDP_CONNECTED, &sk->status);
+	if (resched)
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __xddp_setsockopt(struct xddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	int (*monitor)(struct rtdm_fd *fd, int event, long arg);
+	struct _rtdm_setsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	size_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptin(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_XDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case XDDP_BUFSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len > 0) {
+			len += sizeof(struct xddp_message);
+			if (sk->bufpool &&
+			    len > xnheap_get_size(sk->bufpool)) {
+				return -EINVAL;
+			}
+		}
+		rtdm_lock_get_irqsave(&sk->lock, s);
+		sk->reqbufsz = len;
+		if (len != sk->curbufsz &&
+		    !test_bit(_XDDP_SYNCWAIT, &sk->status) &&
+		    test_bit(_XDDP_BOUND, &sk->status))
+			ret = __xddp_resize_streambuf(sk);
+		rtdm_lock_put_irqrestore(&sk->lock, s);
+		break;
+
+	case XDDP_POOLSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len == 0)
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		if (test_bit(_XDDP_BOUND, &sk->status) ||
+		    test_bit(_XDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else
+			sk->poolsz = len;
+		cobalt_atomic_leave(s);
+		break;
+
+	case XDDP_MONITOR:
+		/* Monitoring is available from kernel-space only. */
+		if (rtdm_fd_is_user(fd))
+			return -EPERM;
+		if (sopt.optlen != sizeof(monitor))
+			return -EINVAL;
+		if (rtipc_get_arg(NULL, &monitor, sopt.optval, sizeof(monitor)))
+			return -EFAULT;
+		sk->monitor = monitor;
+		break;
+
+	case XDDP_LABEL:
+		if (sopt.optlen < sizeof(plabel))
+			return -EINVAL;
+		if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel)))
+			return -EFAULT;
+		cobalt_atomic_enter(s);
+		if (test_bit(_XDDP_BOUND, &sk->status) ||
+		    test_bit(_XDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else {
+			strcpy(sk->label, plabel.label);
+			sk->label[XNOBJECT_NAME_LEN-1] = 0;
+		}
+		cobalt_atomic_leave(s);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __xddp_getsockopt(struct xddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_getsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	socklen_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptout(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len)))
+		return -EFAULT;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_XDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case XDDP_LABEL:
+		if (len < sizeof(plabel))
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		strcpy(plabel.label, sk->label);
+		cobalt_atomic_leave(s);
+		if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel)))
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __xddp_ioctl(struct rtdm_fd *fd,
+			unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct sockaddr_ipc saddr, *saddrp = &saddr;
+	struct xddp_socket *sk = priv->state;
+	int ret = 0;
+
+	switch (request) {
+
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret == 0)
+			ret = __xddp_connect_socket(sk, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_BIND):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+			return ret;
+		if (saddrp == NULL)
+			return -EFAULT;
+		ret = __xddp_bind_socket(priv, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->name);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETPEERNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->peer);
+		break;
+
+	COMPAT_CASE(_RTIOC_SETSOCKOPT):
+		ret = __xddp_setsockopt(sk, fd, arg);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKOPT):
+		ret = __xddp_getsockopt(sk, fd, arg);
+		break;
+
+	case _RTIOC_LISTEN:
+	COMPAT_CASE(_RTIOC_ACCEPT):
+		ret = -EOPNOTSUPP;
+		break;
+
+	case _RTIOC_SHUTDOWN:
+		ret = -ENOTCONN;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int xddp_ioctl(struct rtdm_fd *fd,
+		      unsigned int request, void *arg)
+{
+	int ret;
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		if (rtdm_in_rt_context())
+			return -ENOSYS;	/* Try downgrading to NRT */
+		fallthrough;
+	default:
+		ret = __xddp_ioctl(fd, request, arg);
+	}
+
+	return ret;
+}
+
+static unsigned int xddp_pollstate(struct rtdm_fd *fd) /* atomic */
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_socket *sk = priv->state, *rsk;
+	unsigned int mask = 0, pollstate;
+	struct rtdm_fd *rfd;
+
+	pollstate = __xnpipe_pollstate(sk->minor);
+	if (test_bit(_XDDP_BOUND, &sk->status))
+		mask |= (pollstate & POLLIN);
+
+	/*
+	 * If the socket is connected, POLLOUT means that the peer
+	 * exists, is bound and can receive data. Otherwise POLLOUT is
+	 * always set, assuming the client is likely to use explicit
+	 * addressing in send operations.
+	 */
+	if (test_bit(_XDDP_CONNECTED, &sk->status)) {
+		rfd = portmap[sk->peer.sipc_port];
+		if (rfd) {
+			rsk = rtipc_fd_to_state(rfd);
+			mask |= (pollstate & POLLOUT);
+		}
+	} else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+struct rtipc_protocol xddp_proto_driver = {
+	.proto_name = "xddp",
+	.proto_statesz = sizeof(struct xddp_socket),
+	.proto_ops = {
+		.socket = xddp_socket,
+		.close = xddp_close,
+		.recvmsg = xddp_recvmsg,
+		.sendmsg = xddp_sendmsg,
+		.read = xddp_read,
+		.write = xddp_write,
+		.ioctl = xddp_ioctl,
+		.pollstate = xddp_pollstate,
+	}
+};
+++ linux-patched/drivers/xenomai/ipc/iddp.c	2022-03-21 12:58:29.150891292 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/Makefile	2022-03-02 11:48:10.000000000 +0100
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/map.h>
+#include <rtdm/ipc.h>
+#include "internal.h"
+
+#define IDDP_SOCKET_MAGIC 0xa37a37a8
+
+struct iddp_message {
+	struct list_head next;
+	int from;
+	size_t rdoff;
+	size_t len;
+	char data[];
+};
+
+struct iddp_socket {
+	int magic;
+	struct sockaddr_ipc name;
+	struct sockaddr_ipc peer;
+	struct xnheap *bufpool;
+	struct xnheap privpool;
+	rtdm_waitqueue_t *poolwaitq;
+	rtdm_waitqueue_t privwaitq;
+	size_t poolsz;
+	rtdm_sem_t insem;
+	struct list_head inq;
+	u_long status;
+	xnhandle_t handle;
+	char label[XNOBJECT_NAME_LEN];
+	nanosecs_rel_t rx_timeout;
+	nanosecs_rel_t tx_timeout;
+	unsigned long stalls;	/* Buffer stall counter. */
+	struct rtipc_private *priv;
+};
+
+static struct sockaddr_ipc nullsa = {
+	.sipc_family = AF_RTIPC,
+	.sipc_port = -1
+};
+
+static struct xnmap *portmap;
+
+static rtdm_waitqueue_t poolwaitq;
+
+#define _IDDP_BINDING   0
+#define _IDDP_BOUND     1
+#define _IDDP_CONNECTED 2
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static char *__iddp_link_target(void *obj)
+{
+	struct iddp_socket *sk = obj;
+
+	return kasformat("%d", sk->name.sipc_port);
+}
+
+extern struct xnptree rtipc_ptree;
+
+static struct xnpnode_link __iddp_pnode = {
+	.node = {
+		.dirname = "iddp",
+		.root = &rtipc_ptree,
+		.ops = &xnregistry_vlink_ops,
+	},
+	.target = __iddp_link_target,
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __iddp_pnode = {
+	.node = {
+		.dirname = "iddp",
+	},
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static inline void __iddp_init_mbuf(struct iddp_message *mbuf, size_t len)
+{
+	mbuf->rdoff = 0;
+	mbuf->len = len;
+	INIT_LIST_HEAD(&mbuf->next);
+}
+
+static struct iddp_message *
+__iddp_alloc_mbuf(struct iddp_socket *sk, size_t len,
+		  nanosecs_rel_t timeout, int flags, int *pret)
+{
+	struct iddp_message *mbuf = NULL;
+	rtdm_toseq_t timeout_seq;
+	rtdm_lockctx_t s;
+	int ret = 0;
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+
+	for (;;) {
+		mbuf = xnheap_alloc(sk->bufpool, len + sizeof(*mbuf));
+		if (mbuf) {
+			__iddp_init_mbuf(mbuf, len);
+			break;
+		}
+		if (flags & MSG_DONTWAIT) {
+			ret = -EAGAIN;
+			break;
+		}
+		/*
+		 * No luck, no buffer free. Wait for a buffer to be
+		 * released and retry. Admittedly, we might create a
+		 * thundering herd effect if many waiters put a lot of
+		 * memory pressure on the pool, but in this case, the
+		 * pool size should be adjusted.
+		 */
+		rtdm_waitqueue_lock(sk->poolwaitq, s);
+		++sk->stalls;
+		ret = rtdm_timedwait_locked(sk->poolwaitq, timeout, &timeout_seq);
+		rtdm_waitqueue_unlock(sk->poolwaitq, s);
+		if (unlikely(ret == -EIDRM))
+			ret = -ECONNRESET;
+		if (ret)
+			break;
+	}
+
+	*pret = ret;
+
+	return mbuf;
+}
+
+static void __iddp_free_mbuf(struct iddp_socket *sk,
+			     struct iddp_message *mbuf)
+{
+	xnheap_free(sk->bufpool, mbuf);
+	rtdm_waitqueue_broadcast(sk->poolwaitq);
+}
+
+static int iddp_socket(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+
+	sk->magic = IDDP_SOCKET_MAGIC;
+	sk->name = nullsa;	/* Unbound */
+	sk->peer = nullsa;
+	sk->bufpool = &cobalt_heap;
+	sk->poolwaitq = &poolwaitq;
+	sk->poolsz = 0;
+	sk->status = 0;
+	sk->handle = 0;
+	sk->rx_timeout = RTDM_TIMEOUT_INFINITE;
+	sk->tx_timeout = RTDM_TIMEOUT_INFINITE;
+	sk->stalls = 0;
+	*sk->label = 0;
+	INIT_LIST_HEAD(&sk->inq);
+	rtdm_sem_init(&sk->insem, 0);
+	rtdm_waitqueue_init(&sk->privwaitq);
+	sk->priv = priv;
+
+	return 0;
+}
+
+static void iddp_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	struct iddp_message *mbuf;
+	rtdm_lockctx_t s;
+	void *poolmem;
+	u32 poolsz;
+
+	rtdm_sem_destroy(&sk->insem);
+	rtdm_waitqueue_destroy(&sk->privwaitq);
+
+	if (test_bit(_IDDP_BOUND, &sk->status)) {
+		if (sk->handle)
+			xnregistry_remove(sk->handle);
+		if (sk->name.sipc_port > -1) {
+			cobalt_atomic_enter(s);
+			xnmap_remove(portmap, sk->name.sipc_port);
+			cobalt_atomic_leave(s);
+		}
+		if (sk->bufpool != &cobalt_heap) {
+			poolmem = xnheap_get_membase(&sk->privpool);
+			poolsz = xnheap_get_size(&sk->privpool);
+			xnheap_destroy(&sk->privpool);
+			xnheap_vfree(poolmem);
+			return;
+		}
+	}
+
+	/* Send unread datagrams back to the system heap. */
+	while (!list_empty(&sk->inq)) {
+		mbuf = list_entry(sk->inq.next, struct iddp_message, next);
+		list_del(&mbuf->next);
+		xnheap_free(&cobalt_heap, mbuf);
+	}
+
+	kfree(sk);
+
+	return;
+}
+
+static ssize_t __iddp_recvmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      struct sockaddr_ipc *saddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	ssize_t maxlen, len, wrlen, vlen;
+	rtdm_toseq_t timeout_seq, *toseq;
+	int nvec, rdoff, ret, dofree;
+	struct iddp_message *mbuf;
+	nanosecs_rel_t timeout;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+
+	if (!test_bit(_IDDP_BOUND, &sk->status))
+		return -EAGAIN;
+
+	maxlen = rtdm_get_iov_flatlen(iov, iovlen);
+	if (maxlen == 0)
+		return 0;
+
+	if (flags & MSG_DONTWAIT) {
+		timeout = RTDM_TIMEOUT_NONE;
+		toseq = NULL;
+	} else {
+		timeout = sk->rx_timeout;
+		toseq = &timeout_seq;
+	}
+
+	/* We want to pick one buffer from the queue. */
+	
+	for (;;) {
+		ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq);
+		if (unlikely(ret)) {
+			if (ret == -EIDRM)
+				return -ECONNRESET;
+			return ret;
+		}
+		/* We may have spurious wakeups. */
+		cobalt_atomic_enter(s);
+		if (!list_empty(&sk->inq))
+			break;
+		cobalt_atomic_leave(s);
+	}
+
+	/* Pull heading message from input queue. */
+	mbuf = list_entry(sk->inq.next, struct iddp_message, next);
+	rdoff = mbuf->rdoff;
+	len = mbuf->len - rdoff;
+	if (saddr) {
+		saddr->sipc_family = AF_RTIPC;
+		saddr->sipc_port = mbuf->from;
+	}
+	if (maxlen >= len) {
+		list_del(&mbuf->next);
+		dofree = 1;
+		if (list_empty(&sk->inq)) /* -> non-readable */
+			xnselect_signal(&priv->recv_block, 0);
+
+	} else {
+		/* Buffer is only partially read: repost. */
+		mbuf->rdoff += maxlen;
+		len = maxlen;
+		dofree = 0;
+	}
+
+	if (!dofree)
+		rtdm_sem_up(&sk->insem);
+
+	cobalt_atomic_leave(s);
+
+	/* Now, write "len" bytes from mbuf->data to the vector cells */
+	for (nvec = 0, wrlen = len; nvec < iovlen && wrlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			break;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		wrlen -= vlen;
+		rdoff += vlen;
+	}
+
+	if (dofree)
+		__iddp_free_mbuf(sk, mbuf);
+
+	return ret ?: len;
+}
+
+static ssize_t iddp_recvmsg(struct rtdm_fd *fd,
+			    struct user_msghdr *msg, int flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct sockaddr_ipc saddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen < sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+	} else if (msg->msg_namelen != 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __iddp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy the updated I/O vector back */
+	if (rtdm_put_iovec(fd, iov, msg, iov_fast))
+		return -EFAULT;
+
+	/* Copy the source address if required. */
+	if (msg->msg_name) {
+		if (rtipc_put_arg(fd, msg->msg_name, &saddr, sizeof(saddr)))
+			return -EFAULT;
+		msg->msg_namelen = sizeof(struct sockaddr_ipc);
+	}
+
+	return ret;
+}
+
+static ssize_t iddp_read(struct rtdm_fd *fd, void *buf, size_t len)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = len };
+
+	return __iddp_recvmsg(fd, &iov, 1, 0, NULL);
+}
+
+static ssize_t __iddp_sendmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      const struct sockaddr_ipc *daddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state, *rsk;
+	struct iddp_message *mbuf;
+	ssize_t len, rdlen, vlen;
+	int nvec, wroff, ret;
+	struct rtdm_fd *rfd;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+
+	cobalt_atomic_enter(s);
+	rfd = xnmap_fetch_nocheck(portmap, daddr->sipc_port);
+	if (rfd && rtdm_fd_lock(rfd) < 0)
+		rfd = NULL;
+	cobalt_atomic_leave(s);
+	if (rfd == NULL)
+		return -ECONNRESET;
+
+	rsk = rtipc_fd_to_state(rfd);
+	if (!test_bit(_IDDP_BOUND, &rsk->status)) {
+		rtdm_fd_unlock(rfd);
+		return -ECONNREFUSED;
+	}
+
+	mbuf = __iddp_alloc_mbuf(rsk, len, sk->tx_timeout, flags, &ret);
+	if (unlikely(ret)) {
+		rtdm_fd_unlock(rfd);
+		return ret;
+	}
+
+	/* Now, move "len" bytes to mbuf->data from the vector cells */
+	for (nvec = 0, rdlen = len, wroff = 0;
+	     nvec < iovlen && rdlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wroff, &bufd, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wroff, &bufd, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto fail;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		rdlen -= vlen;
+		wroff += vlen;
+	}
+
+	cobalt_atomic_enter(s);
+
+	/*
+	 * CAUTION: we must remain atomic from the moment we signal
+	 * POLLIN, until sem_up has happened.
+	 */
+	if (list_empty(&rsk->inq)) /* -> readable */
+		xnselect_signal(&rsk->priv->recv_block, POLLIN);
+
+	mbuf->from = sk->name.sipc_port;
+
+	if (flags & MSG_OOB)
+		list_add(&mbuf->next, &rsk->inq);
+	else
+		list_add_tail(&mbuf->next, &rsk->inq);
+
+	rtdm_sem_up(&rsk->insem); /* Will resched. */
+
+	cobalt_atomic_leave(s);
+
+	rtdm_fd_unlock(rfd);
+
+	return len;
+
+fail:
+	__iddp_free_mbuf(rsk, mbuf);
+
+	rtdm_fd_unlock(rfd);
+
+	return ret;
+}
+
+static ssize_t iddp_sendmsg(struct rtdm_fd *fd,
+			    const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct iddp_socket *sk = priv->state;
+	struct sockaddr_ipc daddr;
+	ssize_t ret;
+
+	if (flags & ~(MSG_OOB | MSG_DONTWAIT))
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen != sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+
+		/* Fetch the destination address to send to. */
+		if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr)))
+			return -EFAULT;
+
+		if (daddr.sipc_port < 0 ||
+		    daddr.sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT)
+			return -EINVAL;
+	} else {
+		if (msg->msg_namelen != 0)
+			return -EINVAL;
+		daddr = sk->peer;
+		if (daddr.sipc_port < 0)
+			return -EDESTADDRREQ;
+	}
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __iddp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy updated I/O vector back */
+	return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret;
+}
+
+static ssize_t iddp_write(struct rtdm_fd *fd,
+			  const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
+	struct iddp_socket *sk = priv->state;
+
+	if (sk->peer.sipc_port < 0)
+		return -EDESTADDRREQ;
+
+	return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+}
+
+static int __iddp_bind_socket(struct rtdm_fd *fd,
+			      struct sockaddr_ipc *sa)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	int ret = 0, port;
+	rtdm_lockctx_t s;
+	void *poolmem;
+	size_t poolsz;
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT)
+		return -EINVAL;
+
+	cobalt_atomic_enter(s);
+	if (test_bit(_IDDP_BOUND, &sk->status) ||
+	    __test_and_set_bit(_IDDP_BINDING, &sk->status))
+		ret = -EADDRINUSE;
+	cobalt_atomic_leave(s);
+	if (ret)
+		return ret;
+
+	/* Will auto-select a free port number if unspec (-1). */
+	port = sa->sipc_port;
+	cobalt_atomic_enter(s);
+	port = xnmap_enter(portmap, port, fd);
+	cobalt_atomic_leave(s);
+	if (port < 0)
+		return port == -EEXIST ? -EADDRINUSE : -ENOMEM;
+
+	sa->sipc_port = port;
+
+	/*
+	 * Allocate a local buffer pool if we were told to do so via
+	 * setsockopt() before we got there.
+	 */
+	poolsz = sk->poolsz;
+	if (poolsz > 0) {
+		poolsz = PAGE_ALIGN(poolsz);
+		poolmem = xnheap_vmalloc(poolsz);
+		if (poolmem == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		ret = xnheap_init(&sk->privpool, poolmem, poolsz);
+		if (ret) {
+			xnheap_vfree(poolmem);
+			goto fail;
+		}
+		xnheap_set_name(&sk->privpool, "iddp-pool@%d", port);
+		sk->poolwaitq = &sk->privwaitq;
+		sk->bufpool = &sk->privpool;
+	}
+
+	sk->name = *sa;
+	/* Set default destination if unset at binding time. */
+	if (sk->peer.sipc_port < 0)
+		sk->peer = *sa;
+
+	if (*sk->label) {
+		ret = xnregistry_enter(sk->label, sk,
+				       &sk->handle, &__iddp_pnode.node);
+		if (ret) {
+			if (poolsz > 0) {
+				xnheap_destroy(&sk->privpool);
+				xnheap_vfree(poolmem);
+			}
+			goto fail;
+		}
+	}
+
+	cobalt_atomic_enter(s);
+	__clear_bit(_IDDP_BINDING, &sk->status);
+	__set_bit(_IDDP_BOUND, &sk->status);
+	if (xnselect_signal(&priv->send_block, POLLOUT))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+fail:
+	xnmap_remove(portmap, port);
+	clear_bit(_IDDP_BINDING, &sk->status);
+
+	return ret;
+}
+
+static int __iddp_connect_socket(struct iddp_socket *sk,
+				 struct sockaddr_ipc *sa)
+{
+	struct sockaddr_ipc _sa;
+	struct iddp_socket *rsk;
+	int ret, resched = 0;
+	rtdm_lockctx_t s;
+	xnhandle_t h;
+
+	if (sa == NULL) {
+		_sa = nullsa;
+		sa = &_sa;
+		goto set_assoc;
+	}
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT)
+		return -EINVAL;
+	/*
+	 * - If a valid sipc_port is passed in the [0..NRPORT-1] range,
+	 * it is used verbatim and the connection succeeds
+	 * immediately, regardless of whether the destination is
+	 * bound at the time of the call.
+	 *
+	 * - If sipc_port is -1 and a label was set via IDDP_LABEL,
+	 * connect() blocks for the requested amount of time (see
+	 * SO_RCVTIMEO) until a socket is bound to the same label.
+	 *
+	 * - If sipc_port is -1 and no label is given, the default
+	 * destination address is cleared, meaning that any subsequent
+	 * write() to the socket will return -EDESTADDRREQ, until a
+	 * valid destination address is set via connect() or bind().
+	 *
+	 * - In all other cases, -EINVAL is returned.
+	 */
+	if (sa->sipc_port < 0 && *sk->label) {
+		ret = xnregistry_bind(sk->label,
+				      sk->rx_timeout, XN_RELATIVE, &h);
+		if (ret)
+			return ret;
+
+		cobalt_atomic_enter(s);
+		rsk = xnregistry_lookup(h, NULL);
+		if (rsk == NULL || rsk->magic != IDDP_SOCKET_MAGIC)
+			ret = -EINVAL;
+		else {
+			/* Fetch labeled port number. */
+			sa->sipc_port = rsk->name.sipc_port;
+			resched = xnselect_signal(&sk->priv->send_block, POLLOUT);
+		}
+		cobalt_atomic_leave(s);
+		if (ret)
+			return ret;
+	} else if (sa->sipc_port < 0)
+		sa = &nullsa;
+set_assoc:
+	cobalt_atomic_enter(s);
+	if (!test_bit(_IDDP_BOUND, &sk->status))
+		/* Set default name. */
+		sk->name = *sa;
+	/* Set default destination. */
+	sk->peer = *sa;
+	if (sa->sipc_port < 0)
+		__clear_bit(_IDDP_CONNECTED, &sk->status);
+	else
+		__set_bit(_IDDP_CONNECTED, &sk->status);
+	if (resched)
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __iddp_setsockopt(struct iddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_setsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	size_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptin(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->rx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->tx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_IDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case IDDP_POOLSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len == 0)
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may not do this more than once, and we have to
+		 * do this before the first binding.
+		 */
+		if (test_bit(_IDDP_BOUND, &sk->status) ||
+		    test_bit(_IDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else
+			sk->poolsz = len;
+		cobalt_atomic_leave(s);
+		break;
+
+	case IDDP_LABEL:
+		if (sopt.optlen < sizeof(plabel))
+			return -EINVAL;
+		if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel)))
+			return -EFAULT;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may attach a label to a client socket which was
+		 * previously bound in IDDP.
+		 */
+		if (test_bit(_IDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else {
+			strcpy(sk->label, plabel.label);
+			sk->label[XNOBJECT_NAME_LEN-1] = 0;
+		}
+		cobalt_atomic_leave(s);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __iddp_getsockopt(struct iddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_getsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	socklen_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptout(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	ret = rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len));
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->rx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->tx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_IDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case IDDP_LABEL:
+		if (len < sizeof(plabel))
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		strcpy(plabel.label, sk->label);
+		cobalt_atomic_leave(s);
+		if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel)))
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __iddp_ioctl(struct rtdm_fd *fd,
+			unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct sockaddr_ipc saddr, *saddrp = &saddr;
+	struct iddp_socket *sk = priv->state;
+	int ret = 0;
+
+	switch (request) {
+
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+		  return ret;
+		ret = __iddp_connect_socket(sk, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_BIND):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+			return ret;
+		if (saddrp == NULL)
+			return -EFAULT;
+		ret = __iddp_bind_socket(fd, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->name);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETPEERNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->peer);
+		break;
+
+	COMPAT_CASE(_RTIOC_SETSOCKOPT):
+		ret = __iddp_setsockopt(sk, fd, arg);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKOPT):
+		ret = __iddp_getsockopt(sk, fd, arg);
+		break;
+
+	case _RTIOC_LISTEN:
+	COMPAT_CASE(_RTIOC_ACCEPT):
+		ret = -EOPNOTSUPP;
+		break;
+
+	case _RTIOC_SHUTDOWN:
+		ret = -ENOTCONN;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int iddp_ioctl(struct rtdm_fd *fd,
+		      unsigned int request, void *arg)
+{
+	int ret;
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		if (rtdm_in_rt_context())
+			return -ENOSYS;	/* Try downgrading to NRT */
+		fallthrough;
+	default:
+		ret = __iddp_ioctl(fd, request, arg);
+	}
+
+	return ret;
+}
+
+static int iddp_init(void)
+{
+	portmap = xnmap_create(CONFIG_XENO_OPT_IDDP_NRPORT, 0, 0);
+	if (portmap == NULL)
+		return -ENOMEM;
+
+	rtdm_waitqueue_init(&poolwaitq);
+
+	return 0;
+}
+
+static void iddp_exit(void)
+{
+	rtdm_waitqueue_destroy(&poolwaitq);
+	xnmap_delete(portmap);
+}
+
+static unsigned int iddp_pollstate(struct rtdm_fd *fd) /* atomic */
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	unsigned int mask = 0;
+	struct rtdm_fd *rfd;
+
+	if (test_bit(_IDDP_BOUND, &sk->status) && !list_empty(&sk->inq))
+		mask |= POLLIN;
+
+	/*
+	 * If the socket is connected, POLLOUT means that the peer
+	 * exists. Otherwise POLLOUT is always set, assuming the
+	 * client is likely to use explicit addressing in send
+	 * operations.
+	 *
+	 * If the peer exists, we still can't really know whether
+	 * writing to the socket would block as it depends on the
+	 * message size and other highly dynamic factors, so pretend
+	 * it would not.
+	 */
+	if (test_bit(_IDDP_CONNECTED, &sk->status)) {
+		rfd = xnmap_fetch_nocheck(portmap, sk->peer.sipc_port);
+		if (rfd)
+			mask |= POLLOUT;
+	} else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+struct rtipc_protocol iddp_proto_driver = {
+	.proto_name = "iddp",
+	.proto_statesz = sizeof(struct iddp_socket),
+	.proto_init = iddp_init,
+	.proto_exit = iddp_exit,
+	.proto_ops = {
+		.socket = iddp_socket,
+		.close = iddp_close,
+		.recvmsg = iddp_recvmsg,
+		.sendmsg = iddp_sendmsg,
+		.read = iddp_read,
+		.write = iddp_write,
+		.ioctl = iddp_ioctl,
+		.pollstate = iddp_pollstate,
+	}
+};
+++ linux-patched/drivers/Makefile	2022-03-21 12:58:28.202900536 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:190 @
 obj-$(CONFIG_INTERCONNECT)	+= interconnect/
 obj-$(CONFIG_COUNTER)		+= counter/
 obj-$(CONFIG_MOST)		+= most/
--- linux/arch/arm/include/dovetail/irq.h	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENOMAI) += xenomai/
+++ linux-patched/arch/arm/include/dovetail/irq.h	2022-03-21 12:58:28.695895729 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/include/dovetail/thread_info.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2017 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_DOVETAIL_IRQ_H
+#define _COBALT_DOVETAIL_IRQ_H
+
+#ifdef CONFIG_XENOMAI
+
+#include <cobalt/kernel/sched.h>
+
+/* hard irqs off. */
+static inline void irq_enter_pipeline(void)
+{
+	struct xnsched *sched = xnsched_current();
+
+	sched->lflags |= XNINIRQ;
+}
+
+/* hard irqs off. */
+static inline void irq_exit_pipeline(void)
+{
+	struct xnsched *sched = xnsched_current();
+
+	sched->lflags &= ~XNINIRQ;
+
+	/*
+	 * CAUTION: Switching stages as a result of rescheduling may
+	 * re-enable irqs, shut them off before returning if so.
+	 */
+	if ((sched->status|sched->lflags) & XNRESCHED) {
+		xnsched_run();
+		if (!hard_irqs_disabled())
+			hard_local_irq_disable();
+	}
+}
+
+#else  /* !CONFIG_XENOMAI */
+
+static inline void irq_enter_pipeline(void)
+{
+}
+
+static inline void irq_exit_pipeline(void)
+{
+}
+
+#endif	/* !CONFIG_XENOMAI */
+
+#endif /* !_COBALT_DOVETAIL_IRQ_H */
+++ linux-patched/arch/arm/include/dovetail/thread_info.h	2022-03-21 12:58:28.680895875 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (c) Siemens AG, 2020
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_DOVETAIL_THREAD_INFO_H
+#define _COBALT_DOVETAIL_THREAD_INFO_H
+
+struct xnthread;
+struct cobalt_process;
+
+struct oob_thread_state {
+	/* Core thread backlink. */
+	struct xnthread *thread;
+	/* User process backlink. NULL for core threads. */
+	struct cobalt_process *process;
+};
+
+#endif /* !_COBALT_DOVETAIL_THREAD_INFO_H */
+++ linux-patched/arch/arm/xenomai/Kconfig	2022-03-21 12:58:28.659896080 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/switch.S	1970-01-01 01:00:00.000000000 +0100
+source "kernel/xenomai/Kconfig"
+source "drivers/xenomai/Kconfig"
+
+config XENO_ARCH_FPU
+	def_bool VFP
+
+config XENO_ARCH_SYS3264
+        def_bool n
+
+config XENO_ARCH_OUTOFLINE_XNLOCK
+       bool
+       default y
+
+config XENO_ARCH_IPIPE_COMPAT
+       def_bool DOVETAIL
+       select IPIPE_COMPAT
+++ linux-patched/arch/arm/xenomai/ipipe/switch.S	2022-03-21 12:58:28.644896226 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/thread.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Stelian Pop.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/version.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/tls.h>
+#ifdef CONFIG_VFP
+#include <asm/vfpmacros.h>
+#endif
+
+	.macro fpu_switch tmp
+#ifdef CONFIG_VFP
+#if __LINUX_ARM_ARCH__ <= 6
+#ifdef CONFIG_JUMP_LABEL
+9998:	nop
+	.pushsection __jump_table, "aw"
+	.word	9998b, 9999f, __xeno_vfp_key
+	.popsection
+#else
+	ldr	\tmp, =elf_hwcap
+	ldr	\tmp, [\tmp]
+	tst	\tmp, #HWCAP_VFP
+	beq	9999f
+#endif
+#endif
+	@ Always disable VFP so we can lazily save/restore the old
+	@ state. This occurs in the context of the previous thread.
+	VFPFMRX \tmp, FPEXC
+	bic     \tmp, \tmp, #FPEXC_EN
+	VFPFMXR FPEXC, \tmp
+#if __LINUX_ARM_ARCH__ <= 6
+9999:
+#endif
+#endif
+	.endm
+
+	.text
+
+#if defined(CONFIG_VFP) && defined(CONFIG_XENO_ARCH_FPU)
+/* Copied from vfp_save_state in arch/arm/vfp/vfphw.S
+ * r0 = pointer to union vfp_state, r1 = fpexc
+ */
+ENTRY(__asm_vfp_save)
+	VFPFSTMIA	r0, r2		@ save the working registers
+	VFPFMRX		r2, FPSCR	@ current status
+	tst		r1, #FPEXC_EX	@ is there additional state to save?
+	beq		1f
+	VFPFMRX		r3, FPINST	@ FPINST (only if FPEXC.EX is set)
+	tst		r1, #FPEXC_FP2V	@ is there an FPINST2 to read?
+	beq		1f
+	VFPFMRX		r12, FPINST2	@ FPINST2 if needed (and present)
+1:
+	stmia		r0, {r1, r2, r3, r12}	@ save FPEXC, FPSCR, FPINST, FPINST2
+	mov		pc, lr
+ENDPROC(__asm_vfp_save)
+
+/* Copied from no_old_VFP_process in arch/arm/vfp/vfphw.S
+ * r0 = pointer to union vfp_state
+ * r1 = current cpu
+ */
+ENTRY(__asm_vfp_load)
+#ifdef CONFIG_SMP
+	str		r1, [r0, #VFP_CPU]
+#endif
+	VFPFLDMIA	r0, r2		@ reload the working registers while
+					@ FPEXC is in a safe state
+	ldmia		r0, {r1, r2, r3, r12}	@ load FPEXC, FPSCR, FPINST, FPINST2
+	tst		r1, #FPEXC_EX	@ is there additional state to restore?
+	beq		1f
+	VFPFMXR		FPINST, r3	@ restore FPINST (only if FPEXC.EX is set)
+	tst		r1, #FPEXC_FP2V	@ is there an FPINST2 to write?
+	beq		1f
+	VFPFMXR		FPINST2, r12	@ FPINST2 if needed (and present)
+1:
+	VFPFMXR		FPSCR, r2	@ restore status
+	mov		pc, lr
+ENDPROC(__asm_vfp_load)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+	.macro load_tls base, tp, tpuser
+	ldr     \tp, [\base, #TI_TP_VALUE]
+	.endm
+
+	.macro switch_tls base, tp, tpuser, tmp1, tmp2
+	set_tls \tp, \tmp1, \tmp2
+	.endm
+#else
+	.macro load_tls base, tp, tpuser
+	ldr	\tp, [\base, #TI_TP_VALUE]
+	ldr	\tpuser, [\base, #TI_TP_VALUE + 4]
+	.endm
+#endif
+
+/*
+ * Switch context routine.
+ *
+ * Registers according to the ARM procedure call standard:
+ *   Reg    Description
+ *   r0-r3  argument/scratch registers
+ *   r4-r9  variable register
+ *   r10=sl stack limit/variable register
+ *   r11=fp frame pointer/variable register
+ *   r12=ip intra-procedure-call scratch register
+ *   r13=sp stack pointer (auto preserved)
+ *   r14=lr link register
+ *   r15=pc program counter (auto preserved)
+ *
+ * Copied from __switch_to, arch/arm/kernel/entry-armv.S.
+ * Right now it is identical, but who knows what the
+ * future reserves us...
+ *
+ * XXX: All the following config options are NOT tested:
+ *      CONFIG_IWMMXT
+ *
+ *  Calling args:
+ * r0 = previous thread_info, r1 = next thread_info
+ *
+ * FIXME: this is ugly, at some point we should stop duplicating
+ * what __switch_to() does, dropping specific fpu management from
+ * Cobalt in the same move; this will have to wait until Dovetail
+ * is substituted to the I-pipe though, since the former already
+ * provides the support we need for this. --rpm
+ */
+ENTRY(__asm_thread_switch)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+	add     ip, r0, #TI_CPU_SAVE
+ ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
+ THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
+ THUMB(	str	sp, [ip], #4		   )
+ THUMB(	str	lr, [ip], #4		   )
+	load_tls r1, r4, r5
+#ifdef CONFIG_CPU_USE_DOMAINS
+	ldr     r6, [r1, #TI_CPU_DOMAIN]
+#endif
+	switch_tls r0, r4, r5, r3, r7
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	ldr	r7, [r1, #TI_TASK]
+	ldr	r8, =__stack_chk_guard
+	ldr	r7, [r7, #TSK_STACK_CANARY]
+#endif
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
+#endif
+	fpu_switch r4
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	str	r7, [r8]
+#endif
+ ARM(	add	r4, r1, #TI_CPU_SAVE	   )
+ ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
+ THUMB(	add	ip, r1, #TI_CPU_SAVE	   )
+ THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
+ THUMB(	ldr	sp, [ip], #4		   )
+ THUMB(	ldr	pc, [ip]		   )
+ENDPROC(__asm_thread_switch)
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) */
+
+#include <asm/unwind.h>
+
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	add	ip, r0, #TI_CPU_SAVE
+ ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
+ THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
+ THUMB(	str	sp, [ip], #4		   )
+ THUMB(	str	lr, [ip], #4		   )
+	ldr	r4, [r1, #TI_TP_VALUE]
+	ldr	r5, [r1, #TI_TP_VALUE + 4]
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r0, #TI_CPU_DOMAIN]	@ Save old domain register
+	ldr	r6, [r1, #TI_CPU_DOMAIN]
+#endif
+	switch_tls r0, r4, r5, r3, r7
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	ldr	r7, [r1, #TI_TASK]
+	ldr	r8, =__stack_chk_guard
+	.if (TSK_STACK_CANARY > IMM12_MASK)
+	add	r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
+	.endif
+	ldr	r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
+#endif
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
+#endif
+	mov	r5, r0
+	fpu_switch r4
+	add	r4, r1, #TI_CPU_SAVE
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	str	r7, [r8]
+#endif
+ THUMB(	mov	ip, r4			   )
+	mov	r0, r5
+ ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
+ THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
+ THUMB(	ldr	sp, [ip], #4		   )
+ THUMB(	ldr	pc, [ip]		   )
+ UNWIND(.fnend		)
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) */
+++ linux-patched/arch/arm/xenomai/ipipe/thread.c	2022-03-21 12:58:28.627896392 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+#include <linux/mm.h>
+#include <linux/jump_label.h>
+#include <asm/mmu_context.h>
+#include <cobalt/kernel/thread.h>
+
+struct static_key __xeno_vfp_key = STATIC_KEY_INIT_TRUE;
+
+asmlinkage void __asm_thread_switch(struct thread_info *out,
+				    struct thread_info *in);
+
+asmlinkage void __asm_thread_trampoline(void);
+
+#if defined(CONFIG_XENO_ARCH_FPU) && defined(CONFIG_VFP)
+
+static unsigned int vfp_checked;
+static DEFINE_MUTEX(vfp_check_lock);
+
+asmlinkage void __asm_vfp_save(union vfp_state *vfp, unsigned int fpexc);
+
+asmlinkage void __asm_vfp_load(union vfp_state *vfp, unsigned int cpu);
+
+#define do_vfp_fmrx(_vfp_)						\
+	({								\
+		u32 __v;						\
+		asm volatile("mrc p10, 7, %0, " __stringify(_vfp_)	\
+			     ", cr0, 0 @ fmrx %0, " #_vfp_:		\
+			     "=r" (__v));				\
+		__v;							\
+	})
+
+#define do_vfp_fmxr(_vfp_,_var_)				\
+	asm volatile("mcr p10, 7, %0, " __stringify(_vfp_)	\
+		     ", cr0, 0 @ fmxr " #_vfp_ ", %0":		\
+		     /* */ : "r" (_var_))
+
+extern union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+static inline union vfp_state *get_fpu_owner(void)
+{
+	union vfp_state *vfp_owner;
+	unsigned int cpu;
+#ifdef CONFIG_SMP
+	unsigned int fpexc;
+#endif
+
+#if __LINUX_ARM_ARCH__ <= 6
+	if (!static_key_true(&__xeno_vfp_key))
+		return NULL;
+#endif
+
+#ifdef CONFIG_SMP
+	fpexc = do_vfp_fmrx(FPEXC);
+	if (!(fpexc & FPEXC_EN))
+		return NULL;
+#endif
+
+	cpu = raw_smp_processor_id();
+	vfp_owner = vfp_current_hw_state[cpu];
+	if (!vfp_owner)
+		return NULL;
+
+#ifdef CONFIG_SMP
+	if (vfp_owner->hard.cpu != cpu)
+		return NULL;
+#endif /* SMP */
+
+	return vfp_owner;
+}
+
+#define do_disable_vfp(fpexc)					\
+	do_vfp_fmxr(FPEXC, fpexc & ~FPEXC_EN)
+
+#define XNARCH_VFP_ANY_EXC						\
+	(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)
+
+#define do_enable_vfp()							\
+	({								\
+		unsigned _fpexc = do_vfp_fmrx(FPEXC) | FPEXC_EN;	\
+		do_vfp_fmxr(FPEXC, _fpexc & ~XNARCH_VFP_ANY_EXC);	\
+		_fpexc;							\
+	})
+
+int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+{
+	/* This function does the same thing to decode the faulting instruct as
+	   "call_fpe" in arch/arm/entry-armv.S */
+	static unsigned copro_to_exc[16] = {
+		IPIPE_TRAP_UNDEFINSTR,
+		/* FPE */
+		IPIPE_TRAP_FPU, IPIPE_TRAP_FPU,
+		IPIPE_TRAP_UNDEFINSTR,
+#ifdef CONFIG_CRUNCH
+		IPIPE_TRAP_FPU, IPIPE_TRAP_FPU, IPIPE_TRAP_FPU,
+#else /* !CONFIG_CRUNCH */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+#endif /* !CONFIG_CRUNCH */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+#ifdef CONFIG_VFP
+		IPIPE_TRAP_VFP, IPIPE_TRAP_VFP,
+#else /* !CONFIG_VFP */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+#endif /* !CONFIG_VFP */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+	};
+	unsigned instr, exc, cp;
+	char *pc;
+
+	if (d->exception == IPIPE_TRAP_FPU)
+		return 1;
+
+	if (d->exception == IPIPE_TRAP_VFP)
+		goto trap_vfp;
+
+	if (d->exception != IPIPE_TRAP_UNDEFINSTR)
+		return 0;
+
+	pc = (char *) xnarch_fault_pc(d);
+	if (unlikely(thumb_mode(d->regs))) {
+		unsigned short thumbh, thumbl;
+
+#if defined(CONFIG_ARM_THUMB) && __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_V7)
+#if __LINUX_ARM_ARCH__ < 7
+		if (cpu_architecture() < CPU_ARCH_ARMv7)
+#else
+		if (0)
+#endif /* arch < 7 */
+#endif /* thumb && arch >= 6 && cpu_v7 */
+			return 0;
+
+		thumbh = *(unsigned short *) pc;
+		thumbl = *((unsigned short *) pc + 1);
+
+		if ((thumbh & 0x0000f800) < 0x0000e800)
+			return 0;
+		instr = (thumbh << 16) | thumbl;
+
+#ifdef CONFIG_NEON
+		if ((instr & 0xef000000) == 0xef000000
+		    || (instr & 0xff100000) == 0xf9000000)
+			goto trap_vfp;
+#endif
+	} else {
+		instr = *(unsigned *) pc;
+
+#ifdef CONFIG_NEON
+		if ((instr & 0xfe000000) == 0xf2000000
+		    || (instr & 0xff100000) == 0xf4000000)
+			goto trap_vfp;
+#endif
+	}
+
+	if ((instr & 0x0c000000) != 0x0c000000)
+		return 0;
+
+	cp = (instr & 0x00000f00) >> 8;
+#ifdef CONFIG_IWMMXT
+	/* We need something equivalent to _TIF_USING_IWMMXT for Xenomai kernel
+	   threads */
+	if (cp <= 1) {
+		d->exception = IPIPE_TRAP_FPU;
+		return 1;
+	}
+#endif
+
+	exc = copro_to_exc[cp];
+	if (exc == IPIPE_TRAP_VFP) {
+	  trap_vfp:
+		/* If an exception is pending, the VFP fault is not really an
+		   "FPU unavailable" fault, so we return undefinstr in that
+		   case, the nucleus will let linux handle the fault. */
+		exc = do_vfp_fmrx(FPEXC);
+		if (exc & (FPEXC_EX|FPEXC_DEX)
+		    || ((exc & FPEXC_EN) && do_vfp_fmrx(FPSCR) & FPSCR_IXE))
+			exc = IPIPE_TRAP_UNDEFINSTR;
+		else
+			exc = IPIPE_TRAP_VFP;
+	}
+
+	d->exception = exc;
+	return exc != IPIPE_TRAP_UNDEFINSTR;
+}
+
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *rootcb = xnthread_archtcb(root);
+	rootcb->fpup = get_fpu_owner();
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	union vfp_state *const from_fpup = from ? from->tcb.fpup : NULL;
+	unsigned cpu = raw_smp_processor_id();
+
+	if (xnthread_test_state(to, XNROOT) == 0) {
+		union vfp_state *const to_fpup = to->tcb.fpup;
+		unsigned fpexc = do_enable_vfp();
+
+		if (from_fpup == to_fpup)
+			return;
+
+		if (from_fpup)
+			__asm_vfp_save(from_fpup, fpexc);
+
+		__asm_vfp_load(to_fpup, cpu);
+       } else {
+		/*
+		 * We are restoring the Linux current thread. The FPU
+		 * can be disabled, so that a fault will occur if the
+		 * newly switched thread uses the FPU, to allow the
+		 * kernel handler to pick the correct FPU context, and
+		 * save in the same move the last used RT context.
+		 */
+		vfp_current_hw_state[cpu] = from_fpup;
+#ifdef CONFIG_SMP
+		/*
+		 * On SMP, since returning to FPU disabled mode means
+		 * that we have to save fpu, avoid doing it if
+		 * current FPU context belongs to the task we are
+		 * switching to.
+		 */
+		if (from_fpup) {
+			union vfp_state *const current_task_fpup =
+				&to->tcb.core.tip->vfpstate;
+			const unsigned fpdis = do_vfp_fmrx(FPEXC);
+			const unsigned fpen = fpdis | FPEXC_EN;
+
+			do_vfp_fmxr(FPEXC, fpen & ~XNARCH_VFP_ANY_EXC);
+			if (from_fpup == current_task_fpup)
+				return;
+
+			__asm_vfp_save(from_fpup, fpen);
+			do_vfp_fmxr(FPEXC, fpdis);
+		}
+#endif
+	}
+}
+
+int xnarch_handle_fpu_fault(struct xnthread *from,
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	if (xnthread_test_state(to, XNFPU))
+		/* FPU is already enabled, probably an exception */
+               return 0;
+
+#if __LINUX_ARM_ARCH__ <= 6
+	if (!static_key_true(&__xeno_vfp_key))
+		/* VFP instruction emitted, on a cpu without VFP, this
+		   is an error */
+		return 0;
+#endif
+
+	xnlock_get(&nklock);
+	xnthread_set_state(to, XNFPU);
+	xnlock_put(&nklock);
+
+	xnarch_switch_fpu(from, to);
+
+	/* Retry faulting instruction */
+	d->regs->ARM_pc = xnarch_fault_pc(d);
+	return 1;
+}
+
+void xnarch_init_shadow_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+
+	tcb->fpup = &task_thread_info(tcb->core.host_task)->vfpstate;
+
+	if (vfp_checked == 0) {
+		mutex_lock(&vfp_check_lock);
+		if (vfp_checked == 0) {
+			if ((elf_hwcap & HWCAP_VFP) == 0)
+				static_key_slow_dec(&__xeno_vfp_key);
+			vfp_checked = 1;
+		}
+		mutex_unlock(&vfp_check_lock);
+	}
+
+	/* XNFPU is set upon first FPU fault */
+	xnthread_clear_state(thread, XNFPU);
+}
+
+void xnarch_init_root_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = &thread->tcb;
+	tcb->fpup = NULL;
+}
+
+#endif /* CONFIG_XENO_ARCH_FPU && CONFIG_VFP*/
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
+{
+	struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb;
+	struct mm_struct *prev_mm, *next_mm;
+	struct task_struct *next;
+
+	next = in_tcb->core.host_task;
+	prev_mm = out_tcb->core.active_mm;
+
+	next_mm = in_tcb->core.mm;
+	if (next_mm == NULL) {
+		in_tcb->core.active_mm = prev_mm;
+		enter_lazy_tlb(prev_mm, next);
+	} else {
+		ipipe_switch_mm_head(prev_mm, next_mm, next);
+		/*
+		 * We might be switching back to the root thread,
+		 * which we preempted earlier, shortly after "current"
+		 * dropped its mm context in the do_exit() path
+		 * (next->mm == NULL). In that particular case, the
+		 * kernel expects a lazy TLB state for leaving the mm.
+		 */
+		if (next->mm == NULL)
+			enter_lazy_tlb(prev_mm, next);
+	}
+
+	__asm_thread_switch(out_tcb->core.tip, in_tcb->core.tip);
+}
+++ linux-patched/arch/arm/xenomai/ipipe/Makefile	2022-03-21 12:58:28.611896548 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/arch/arm/xenomai/ipipe/machine.c	1970-01-01 01:00:00.000000000 +0100
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-$(CONFIG_IPIPE) := machine.o thread.o switch.o syscall.o
+
+ccflags-y := -I$(srctree)/arch/arm/xenomai/include -I$(srctree)/include/xenomai
+++ linux-patched/arch/arm/xenomai/ipipe/machine.c	2022-03-21 12:58:28.596896694 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/calibration.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+
+#include <linux/mm.h>
+#include <asm/xenomai/machine.h>
+
+static void mach_arm_prefault(struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	unsigned int flags;
+
+	if ((vma->vm_flags & VM_MAYREAD)) {
+		flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0;
+		for (addr = vma->vm_start;
+		     addr != vma->vm_end; addr += PAGE_SIZE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+			handle_mm_fault(vma->vm_mm, vma, addr, flags);
+#else
+			handle_mm_fault(vma, addr, flags);
+#endif
+	}
+}
+
+static const char *const fault_labels[] = {
+	[IPIPE_TRAP_ACCESS] = "Data or instruction access",
+	[IPIPE_TRAP_SECTION] = "Section fault",
+	[IPIPE_TRAP_DABT] = "Generic data abort",
+	[IPIPE_TRAP_UNKNOWN] = "Unknown exception",
+	[IPIPE_TRAP_BREAK] = "Instruction breakpoint",
+	[IPIPE_TRAP_FPU] = "Floating point exception",
+	[IPIPE_TRAP_VFP] = "VFP Floating point exception",
+	[IPIPE_TRAP_UNDEFINSTR] = "Undefined instruction",
+#ifdef IPIPE_TRAP_ALIGNMENT
+	[IPIPE_TRAP_ALIGNMENT] = "Unaligned access exception",
+#endif /* IPIPE_TRAP_ALIGNMENT */
+	[IPIPE_NR_FAULTS] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "arm",
+	.init = NULL,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = mach_arm_prefault,
+	.fault_labels = fault_labels,
+};
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/calibration.h	2022-03-21 12:58:28.581896840 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_CALIBRATION_H
+#define _COBALT_ARM_ASM_CALIBRATION_H
+
+unsigned int omap_rev(void);
+#define cpu_is_omap44xx() ((omap_rev() & 0xff) == 0x44)
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned int ulat;
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	ulat = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#elif defined(CONFIG_ARCH_AT91RM9200)
+	ulat = 8500;
+#elif defined(CONFIG_ARCH_AT91SAM9263)
+	ulat = 11000;
+#elif defined(CONFIG_SOC_IMX6Q)
+	ulat = 6000;
+#elif defined(CONFIG_ARCH_MX51)
+	ulat = 5000;
+#elif defined(CONFIG_ARCH_MX53)
+	ulat = 5000;
+#elif defined(CONFIG_ARCH_MX6)
+	ulat = 2000;
+#elif defined(CONFIG_SOC_IMX7)
+	ulat = 2000;
+#elif defined(CONFIG_SOC_LS1021A)
+	ulat = 2800;
+#elif defined(CONFIG_ARCH_OMAP)
+	ulat = cpu_is_omap44xx() ? 2500 : 5000;
+#elif defined(CONFIG_ARCH_STI)
+	ulat = 6000;
+#elif defined(CONFIG_ARCH_SOCFPGA)
+	ulat = 4500;
+#else
+	ulat = 9500;	/* XXX sane? */
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, ulat);
+	p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_ARM_ASM_CALIBRATION_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/wrappers.h	2022-03-21 12:58:28.567896977 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_WRAPPERS_H
+#define _COBALT_ARM_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __put_user_inatomic __put_user
+#define __get_user_inatomic __get_user
+
+#endif /* _COBALT_ARM_ASM_WRAPPERS_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/syscall.h	2022-03-21 12:58:28.552897123 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_SYSCALL_H
+#define _COBALT_ARM_ASM_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+#ifndef __ARM_NR_ipipe
+/* Legacy pipelines do not define this. */
+#define __ARM_NR_ipipe	(__NR_SYSCALL_BASE + XENO_ARM_SYSCALL)
+#endif
+
+/*
+ * Cobalt syscall numbers can be fetched from ARM_ORIG_r0 with ARM_r7
+ * containing the Xenomai syscall marker, Linux syscalls directly from
+ * ARM_r7 (may require the OABI tweak).
+ */
+#define __xn_reg_sys(__regs)	((__regs)->ARM_ORIG_r0)
+/* In OABI_COMPAT mode, handle both OABI and EABI userspace syscalls */
+#ifdef CONFIG_OABI_COMPAT
+#define __xn_syscall_p(__regs)	(((__regs)->ARM_r7 == __NR_OABI_SYSCALL_BASE + XENO_ARM_SYSCALL) || \
+				 ((__regs)->ARM_r7 == __ARM_NR_ipipe))
+#define __xn_abi_decode(__regs) ((__regs)->ARM_r7 - __NR_OABI_SYSCALL_BASE)
+#else /* !CONFIG_OABI_COMPAT */
+#define __xn_syscall_p(__regs)	((__regs)->ARM_r7 == __ARM_NR_ipipe)
+#define __xn_abi_decode(__regs) ((__regs)->ARM_r7)
+#endif /* !CONFIG_OABI_COMPAT */
+#define __xn_syscall(__regs)	(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)					\
+	({								\
+		*(__code) = __xn_abi_decode(__regs);			\
+		*(__code) < NR_syscalls || *(__code) >= __ARM_NR_BASE;	\
+	})
+	
+#define __xn_reg_rval(__regs)	((__regs)->ARM_r0)
+#define __xn_reg_arg1(__regs)	((__regs)->ARM_r1)
+#define __xn_reg_arg2(__regs)	((__regs)->ARM_r2)
+#define __xn_reg_arg3(__regs)	((__regs)->ARM_r3)
+#define __xn_reg_arg4(__regs)	((__regs)->ARM_r4)
+#define __xn_reg_arg5(__regs)	((__regs)->ARM_r5)
+#define __xn_reg_pc(__regs)	((__regs)->ARM_ip)
+#define __xn_reg_sp(__regs)	((__regs)->ARM_sp)
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5);
+
+#endif /* !_COBALT_ARM_ASM_SYSCALL_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/machine.h	2022-03-21 12:58:28.537897270 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/syscall32.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Copyright &copy; 2002-2004 Philippe Gerum.
+ *
+ *   ARM port
+ *     Copyright (C) 2005 Stelian Pop
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Xenomai; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_MACHINE_H
+#define _COBALT_ARM_ASM_MACHINE_H
+
+#include <linux/version.h>
+#include <asm/byteorder.h>
+
+#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq
+
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/switch_to.h>
+#include <asm/system_info.h>
+#include <asm/system_misc.h>
+#include <asm/timex.h>
+#include <asm/processor.h>
+#include <asm/ipipe.h>
+#include <asm/mach/irq.h>
+#include <asm/cacheflush.h>
+
+#define xnarch_cache_aliasing() cache_is_vivt()
+
+#if __LINUX_ARM_ARCH__ < 5
+static inline __attribute_const__ unsigned long ffnz(unsigned long x)
+{
+	int r = 0;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff)) {
+		x >>= 16;
+		r += 16;
+	}
+	if (!(x & 0xff)) {
+		x >>= 8;
+		r += 8;
+	}
+	if (!(x & 0xf)) {
+		x >>= 4;
+		r += 4;
+	}
+	if (!(x & 3)) {
+		x >>= 2;
+		r += 2;
+	}
+	if (!(x & 1)) {
+		x >>= 1;
+		r += 1;
+	}
+	return r;
+}
+#else
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	int __r;
+	__asm__("clz\t%0, %1" : "=r" (__r) : "r"(ul & (-ul)) : "cc");
+	return 31 - __r;
+}
+#endif
+
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_ARM_ASM_MACHINE_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/syscall32.h	2022-03-21 12:58:28.523897406 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/features.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_SYSCALL32_H
+#define _COBALT_ARM_ASM_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_ARM_ASM_SYSCALL32_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/features.h	2022-03-21 12:58:28.508897552 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/fptest.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_FEATURES_H
+#define _COBALT_ARM_ASM_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_ARM_ASM_FEATURES_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/fptest.h	2022-03-21 12:58:28.493897699 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/include/asm/xenomai/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_FPTEST_H
+#define _COBALT_ARM_ASM_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/hwcap.h>
+
+#ifdef CONFIG_VFP
+#define have_vfp (ELF_HWCAP & HWCAP_VFP)
+#else /* !CONFIG_VFP */
+#define have_vfp 0
+#endif /* !CONFIG_VFP */
+
+#include <asm/xenomai/uapi/fptest.h>
+
+static inline int fp_kernel_supported(void)
+{
+	return 1;
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return have_vfp ? __COBALT_HAVE_VFP : 0;
+}
+
+#endif /* _COBALT_ARM_ASM_FPTEST_H */
+++ linux-patched/arch/arm/xenomai/ipipe/include/asm/xenomai/thread.h	2022-03-21 12:58:28.479897835 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/ipipe/syscall.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_THREAD_H
+#define _COBALT_ARM_ASM_THREAD_H
+
+#include <asm-generic/xenomai/ipipe/thread.h>
+
+#ifdef CONFIG_XENO_ARCH_FPU
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif /* CONFIG_VFP */
+#endif /* !CONFIG_XENO_ARCH_FPU */
+
+struct xnarchtcb {
+	struct xntcb core;
+#ifdef CONFIG_XENO_ARCH_FPU
+#ifdef CONFIG_VFP
+	union vfp_state *fpup;
+#define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
+#else
+#define xnarch_fpu_ptr(tcb)     NULL
+#endif
+#endif
+};
+
+#define xnarch_fault_regs(d)	((d)->regs)
+#define xnarch_fault_trap(d)	((d)->exception)
+#define xnarch_fault_code(d)	(0)
+#define xnarch_fault_pc(d)	((d)->regs->ARM_pc - (thumb_mode((d)->regs) ? 2 : 4)) /* XXX ? */
+
+#define xnarch_fault_pf_p(d)	((d)->exception == IPIPE_TRAP_ACCESS)
+#define xnarch_fault_bp_p(d)	((current->ptrace & PT_PTRACED) &&	\
+				 ((d)->exception == IPIPE_TRAP_BREAK ||	\
+				  (d)->exception == IPIPE_TRAP_UNDEFINSTR))
+
+#define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d))
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->core.host_task;
+}
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in);
+
+static inline void xnarch_enter_root(struct xnthread *root) { }
+
+static inline int xnarch_escalate(void)
+{
+	if (ipipe_root_p) {
+		ipipe_raise_irq(cobalt_pipeline.escalate_virq);
+		return 1;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_XENO_ARCH_FPU) && defined(CONFIG_VFP)
+
+void xnarch_init_root_tcb(struct xnthread *thread);
+
+void xnarch_init_shadow_tcb(struct xnthread *thread);
+
+int xnarch_fault_fpu_p(struct ipipe_trap_data *d);
+
+void xnarch_leave_root(struct xnthread *root);
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
+
+int xnarch_handle_fpu_fault(struct xnthread *from, 
+			struct xnthread *to, struct ipipe_trap_data *d);
+
+#else /* !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */
+
+static inline void xnarch_init_root_tcb(struct xnthread *thread) { }
+static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { }
+
+/*
+ * Userland may raise FPU faults with FPU-enabled kernels, regardless
+ * of whether real-time threads actually use FPU, so we simply ignore
+ * these faults.
+ */
+static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+{
+	return 0;
+}
+
+static inline void xnarch_leave_root(struct xnthread *root) { }
+
+static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { }
+
+static inline int xnarch_handle_fpu_fault(struct xnthread *from, 
+					struct xnthread *to, struct ipipe_trap_data *d)
+{
+	return 0;
+}
+#endif /*  !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */
+
+static inline void xnarch_enable_kfpu(void) { }
+
+static inline void xnarch_disable_kfpu(void) { }
+
+#endif /* !_COBALT_ARM_ASM_THREAD_H */
+++ linux-patched/arch/arm/xenomai/ipipe/syscall.c	2022-03-21 12:58:28.464897981 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/include/asm/xenomai/uapi/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Stelian Pop
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/ipipe.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/uapi/tsc.h>
+
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5)
+{
+	struct ipipe_sysinfo ipipe_info;
+	struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc;
+	struct __xn_tscinfo info;
+	int ret;
+
+	if (a1 != XENOMAI_SYSARCH_TSCINFO)
+		return -EINVAL;
+
+	ret = ipipe_get_sysinfo(&ipipe_info);
+	if (ret)
+		return ret;
+
+	switch (p->type) {
+	case IPIPE_TSC_TYPE_DECREMENTER:
+		info.counter = p->u.dec.counter;
+		break;
+	case IPIPE_TSC_TYPE_NONE:
+		return -ENOSYS;
+	default:
+		info.counter = p->u.fr.counter;
+		break;
+	}
+
+	return cobalt_copy_to_user((void *)a2, &info, sizeof(info));
+}
+++ linux-patched/arch/arm/xenomai/include/asm/xenomai/uapi/arith.h	2022-03-21 12:58:28.449898128 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/include/asm/xenomai/uapi/tsc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_ARITH_H
+#define _COBALT_ARM_ASM_UAPI_ARITH_H
+
+#include <asm/xenomai/uapi/features.h>
+
+#if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE))
+static inline __attribute__((__const__)) unsigned long long
+mach_arm_nodiv_ullimd(const unsigned long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ);
+
+#define xnarch_nodiv_ullimd(op, frac, integ) \
+	mach_arm_nodiv_ullimd((op), (frac), (integ))
+
+static inline __attribute__((__const__)) long long
+mach_arm_nodiv_llimd(const long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ);
+
+#define xnarch_nodiv_llimd(op, frac, integ) \
+	mach_arm_nodiv_llimd((op), (frac), (integ))
+#else /* arm <= v3 */
+#define xnarch_add96and64(l0, l1, l2, s0, s1)		\
+	do {						\
+		__asm__ ("adds %2, %2, %4\n\t"		\
+			 "adcs %1, %1, %3\n\t"		\
+			 "adc %0, %0, #0\n\t"		\
+			 : "+r"(l0), "+r"(l1), "+r"(l2)	\
+			 : "r"(s0), "r"(s1): "cc");	\
+	} while (0)
+#endif /* arm <= v3 */
+
+#include <cobalt/uapi/asm-generic/arith.h>
+
+#if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE))
+#define mach_arm_nodiv_ullimd_str			\
+	"umull %[tl], %[rl], %[opl], %[fracl]\n\t"	\
+	"umull %[rm], %[rh], %[oph], %[frach]\n\t"	\
+	"adds %[rl], %[rl], %[tl], lsr #31\n\t"		\
+	"adcs %[rm], %[rm], #0\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umull %[tl], %[th], %[oph], %[fracl]\n\t"	\
+	"adds %[rl], %[rl], %[tl]\n\t"			\
+	"adcs %[rm], %[rm], %[th]\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umull %[tl], %[th], %[opl], %[frach]\n\t"	\
+	"adds %[rl], %[rl], %[tl]\n\t"			\
+	"adcs %[rm], %[rm], %[th]\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umlal %[rm], %[rh], %[opl], %[integ]\n\t"	\
+	"mla %[rh], %[oph], %[integ], %[rh]\n\t"
+
+static inline __attribute__((__const__)) unsigned long long
+mach_arm_nodiv_ullimd(const unsigned long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ)
+{
+	register unsigned rl __asm__("r5");
+	register unsigned rm __asm__("r0");
+	register unsigned rh __asm__("r1");
+	register unsigned fracl __asm__ ("r2");
+	register unsigned frach __asm__ ("r3");
+	register unsigned integ __asm__("r4") = rhs_integ;
+	register unsigned opl __asm__ ("r6");
+	register unsigned oph __asm__ ("r8");
+	register unsigned tl __asm__("r9");
+	register unsigned th __asm__("r10");
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ (mach_arm_nodiv_ullimd_str
+		 : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
+		   [tl]"=r"(tl), [th]"=r"(th)
+		 : [opl]"r"(opl), [oph]"r"(oph),
+		   [fracl]"r"(fracl), [frach]"r"(frach),
+		   [integ]"r"(integ)
+		 : "cc");
+
+	return xnarch_u64fromu32(rh, rm);
+}
+
+static inline __attribute__((__const__)) long long
+mach_arm_nodiv_llimd(const long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ)
+{
+	register unsigned rl __asm__("r5");
+	register unsigned rm __asm__("r0");
+	register unsigned rh __asm__("r1");
+	register unsigned fracl __asm__ ("r2");
+	register unsigned frach __asm__ ("r3");
+	register unsigned integ __asm__("r4") = rhs_integ;
+	register unsigned opl __asm__ ("r6");
+	register unsigned oph __asm__ ("r8");
+	register unsigned tl __asm__("r9");
+	register unsigned th __asm__("r10");
+	register unsigned s __asm__("r12");
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ ("movs %[s], %[oph], lsr #30\n\t"
+		 "beq 1f\n\t"
+		 "rsbs  %[opl], %[opl], #0\n\t"
+		 "sbc  %[oph], %[oph], %[oph], lsl #1\n"
+		 "1:\t"
+		 mach_arm_nodiv_ullimd_str
+		 "teq %[s], #0\n\t"
+		 "beq 2f\n\t"
+		 "rsbs  %[rm], %[rm], #0\n\t"
+		 "sbc  %[rh], %[rh], %[rh], lsl #1\n"
+		 "2:\t"
+		 : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
+		   [tl]"=r"(tl), [th]"=r"(th), [s]"=r"(s)
+		 : [opl]"r"(opl), [oph]"r"(oph),
+		   [fracl]"r"(fracl), [frach]"r"(frach),
+		   [integ]"r"(integ)
+		 : "cc");
+
+	return xnarch_u64fromu32(rh, rm);
+}
+#endif /* arm >= v4 */
+
+#endif /* _COBALT_ARM_ASM_UAPI_ARITH_H */
+++ linux-patched/arch/arm/xenomai/include/asm/xenomai/uapi/tsc.h	2022-03-21 12:58:28.434898274 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/include/asm/xenomai/uapi/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_TSC_H
+#define _COBALT_ARM_ASM_UAPI_TSC_H
+
+struct __xn_tscinfo {
+	volatile unsigned int *counter;
+};
+
+#endif /* !_COBALT_ARM_ASM_UAPI_TSC_H */
+++ linux-patched/arch/arm/xenomai/include/asm/xenomai/uapi/syscall.h	2022-03-21 12:58:28.420898410 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/include/asm/xenomai/uapi/features.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_SYSCALL_H
+#define _COBALT_ARM_ASM_UAPI_SYSCALL_H
+
+#define __xn_syscode(__nr)	(__COBALT_SYSCALL_BIT | (__nr))
+
+#define XENO_ARM_SYSCALL        0x000F0042	/* carefully chosen... */
+
+#define XENOMAI_SYSARCH_TSCINFO      4
+
+#endif /* !_COBALT_ARM_ASM_UAPI_SYSCALL_H */
+++ linux-patched/arch/arm/xenomai/include/asm/xenomai/uapi/features.h	2022-03-21 12:58:28.405898557 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/include/asm/xenomai/uapi/fptest.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_FEATURES_H
+#define _COBALT_ARM_ASM_UAPI_FEATURES_H
+
+/* The ABI revision level we use on this arch. */
+#define XENOMAI_ABI_REV   18UL
+
+#define XENOMAI_FEAT_DEP (__xn_feat_generic_mask)
+
+#define XENOMAI_FEAT_MAN (__xn_feat_generic_man_mask)
+
+#define XNARCH_HAVE_LLMULSHFT    1
+#define XNARCH_HAVE_NODIV_LLIMD  1
+
+struct cobalt_featinfo_archdep { /* no arch-specific feature */ };
+
+#include <cobalt/uapi/asm-generic/features.h>
+
+static inline const char *get_feature_label(unsigned int feature)
+{
+	return get_generic_feature_label(feature);
+}
+
+#endif /* !_COBALT_ARM_ASM_UAPI_FEATURES_H */
+++ linux-patched/arch/arm/xenomai/include/asm/xenomai/uapi/fptest.h	2022-03-21 12:58:28.391898693 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_FPTEST_H
+#define _COBALT_ARM_ASM_UAPI_FPTEST_H
+
+#define __COBALT_HAVE_VFP  0x1
+
+static inline void fp_regs_set(int features, unsigned int val)
+{
+	unsigned long long e[16];
+	unsigned int i;
+
+	if (features & __COBALT_HAVE_VFP) {
+		for (i = 0; i < 16; i++)
+			e[i] = val;
+
+		/* vldm %0!, {d0-d15},
+		   AKA fldmiax %0!, {d0-d15} */
+		__asm__ __volatile__("ldc p11, cr0, [%0],#32*4":
+				     "=r"(i)
+				     : "0"(&e[0])
+				     : "d0", "d1", "d2", "d3", "d4", "d5",
+				       "d6", "d7", "d8", "d9", "d10", "d11",
+				       "d12", "d13", "d14", "d15",
+				       "memory");
+	}
+}
+
+static inline unsigned int fp_regs_check(int features, unsigned int val,
+					 int (*report)(const char *fmt, ...))
+{
+	unsigned int result = val, i;
+	unsigned long long e[16];
+
+	if (features & __COBALT_HAVE_VFP) {
+		/* vstm %0!, {d0-d15},
+		   AKA fstmiax %0!, {d0-d15} */
+		__asm__ __volatile__("stc p11, cr0, [%0],#32*4":
+				     "=r"(i): "0"(&e[0]): "memory");
+
+		for (i = 0; i < 16; i++)
+			if (e[i] != val) {
+				report("d%d: %llu != %u\n", i, e[i], val);
+				result = e[i];
+			}
+	}
+
+	return result;
+}
+
+#endif /* !_COBALT_ARM_ASM_UAPI_FPTEST_H */
+++ linux-patched/arch/arm/xenomai/dovetail/Makefile	2022-03-21 12:58:28.376898839 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/arch/arm/xenomai/dovetail/machine.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+xenomai-y := machine.o
+
+ccflags-y := -I$(srctree)/arch/arm/xenomai/include -I$(srctree)/include/xenomai
+++ linux-patched/arch/arm/xenomai/dovetail/machine.c	2022-03-21 12:58:28.361898986 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/calibration.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2021 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/mm.h>
+#include <asm/xenomai/machine.h>
+
+static void mach_arm_prefault(struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	unsigned int flags;
+
+	if ((vma->vm_flags & VM_MAYREAD)) {
+		flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0;
+		for (addr = vma->vm_start;
+		     addr != vma->vm_end; addr += PAGE_SIZE)
+			handle_mm_fault(vma, addr, flags, NULL);
+	}
+}
+
+static const char *const fault_labels[] = {
+	[ARM_TRAP_ACCESS] = "Data or instruction access",
+	[ARM_TRAP_SECTION] = "Section fault",
+	[ARM_TRAP_DABT] = "Generic data abort",
+	[ARM_TRAP_PABT] = "Prefetch abort",
+	[ARM_TRAP_BREAK] = "Instruction breakpoint",
+	[ARM_TRAP_FPU] = "Floating point exception",
+	[ARM_TRAP_VFP] = "VFP Floating point exception",
+	[ARM_TRAP_UNDEFINSTR] = "Undefined instruction",
+	[ARM_TRAP_ALIGNMENT] = "Unaligned access exception",
+	[31] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "arm",
+	.init = NULL,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = mach_arm_prefault,
+	.fault_labels = fault_labels,
+};
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/calibration.h	2022-03-21 12:58:28.347899122 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_CALIBRATION_H
+#define _COBALT_ARM_DOVETAIL_CALIBRATION_H
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned int sched_latency;
+
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#else
+	sched_latency = 5000;
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, sched_latency);
+	p->kernel = xnclock_ns_to_ticks(&nkclock,
+					CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_ARM_DOVETAIL_CALIBRATION_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/wrappers.h	2022-03-21 12:58:28.332899269 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_WRAPPERS_H
+#define _COBALT_ARM_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __put_user_inatomic __put_user
+#define __get_user_inatomic __get_user
+
+#endif /* _COBALT_ARM_ASM_WRAPPERS_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/syscall.h	2022-03-21 12:58:28.317899415 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_SYSCALL_H
+#define _COBALT_ARM_DOVETAIL_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt syscall numbers can be fetched from ARM_ORIG_r0 with ARM_r7
+ * containing the Xenomai syscall marker, Linux syscalls directly from
+ * ARM_r7. Since we have to work with Dovetail whilst remaining binary
+ * compatible with applications built for the I-pipe, we retain the
+ * old syscall signature based on receiving XENO_ARM_SYSCALL in
+ * ARM_r7, possibly ORed with __COBALT_SYSCALL_BIT by Dovetail
+ * (IPIPE_COMPAT mode).
+ *
+ * FIXME: We also have __COBALT_SYSCALL_BIT (equal to
+ * __OOB_SYSCALL_BIT) present in the actual syscall number in r0,
+ * which is pretty much useless. Oh, well...  When support for the
+ * I-pipe is dropped, we may switch back to the regular convention
+ * Dovetail abides by, with the actual syscall number into r7 ORed
+ * with __OOB_SYSCALL_BIT, freeing r0 for passing a call argument.
+ */
+#define __xn_reg_sys(__regs)	((__regs)->ARM_ORIG_r0)
+#define __xn_syscall_p(__regs)	(((__regs)->ARM_r7 & ~__COBALT_SYSCALL_BIT) == XENO_ARM_SYSCALL)
+#define __xn_syscall(__regs)	(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)					\
+	({								\
+		*(__code) = (__regs)->ARM_r7;				\
+		*(__code) < NR_syscalls || *(__code) >= __ARM_NR_BASE;	\
+	})
+
+#define __xn_reg_rval(__regs)	((__regs)->ARM_r0)
+#define __xn_reg_pc(__regs)	((__regs)->ARM_ip)
+#define __xn_reg_sp(__regs)	((__regs)->ARM_sp)
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+static inline
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			unsigned long a3, unsigned long a4,
+			unsigned long a5)
+{
+	/* We need none of these with Dovetail. */
+	return -ENOSYS;
+}
+
+#define pipeline_get_syscall_args pipeline_get_syscall_args
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	args[0] = regs->ARM_r1;
+	args[1] = regs->ARM_r2;
+	args[2] = regs->ARM_r3;
+	args[3] = regs->ARM_r4;
+	args[4] = regs->ARM_r5;
+}
+
+#endif /* !_COBALT_ARM_DOVETAIL_SYSCALL_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/machine.h	2022-03-21 12:58:28.301899571 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/syscall32.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Copyright &copy; 2002-2004 Philippe Gerum.
+ *
+ *   ARM port
+ *     Copyright (C) 2005 Stelian Pop
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Xenomai; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_MACHINE_H
+#define _COBALT_ARM_DOVETAIL_MACHINE_H
+
+#include <linux/version.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+
+#define xnarch_cache_aliasing() cache_is_vivt()
+
+#if __LINUX_ARM_ARCH__ < 5
+static inline __attribute_const__ unsigned long ffnz(unsigned long x)
+{
+	int r = 0;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff)) {
+		x >>= 16;
+		r += 16;
+	}
+	if (!(x & 0xff)) {
+		x >>= 8;
+		r += 8;
+	}
+	if (!(x & 0xf)) {
+		x >>= 4;
+		r += 4;
+	}
+	if (!(x & 3)) {
+		x >>= 2;
+		r += 2;
+	}
+	if (!(x & 1)) {
+		x >>= 1;
+		r += 1;
+	}
+	return r;
+}
+#else
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	int __r;
+	__asm__("clz\t%0, %1" : "=r" (__r) : "r"(ul & (-ul)) : "cc");
+	return 31 - __r;
+}
+#endif
+
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_ARM_DOVETAIL_MACHINE_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/syscall32.h	2022-03-21 12:58:28.286899717 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/features.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_SYSCALL32_H
+#define _COBALT_ARM_ASM_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_ARM_ASM_SYSCALL32_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/features.h	2022-03-21 12:58:28.271899863 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/fptest.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_FEATURES_H
+#define _COBALT_ARM_DOVETAIL_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_ARM_DOVETAIL_FEATURES_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/fptest.h	2022-03-21 12:58:28.256900010 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/xenomai/dovetail/include/asm/xenomai/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_FPTEST_H
+#define _COBALT_ARM_DOVETAIL_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/hwcap.h>
+
+#ifdef CONFIG_VFP
+#define have_vfp (elf_hwcap & HWCAP_VFP)
+#else /* !CONFIG_VFP */
+#define have_vfp 0
+#endif /* !CONFIG_VFP */
+
+#include <asm/xenomai/uapi/fptest.h>
+
+static inline int fp_kernel_supported(void)
+{
+	return 0;
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return have_vfp ? __COBALT_HAVE_VFP : 0;
+}
+
+#endif /* _COBALT_ARM_DOVETAIL_FPTEST_H */
+++ linux-patched/arch/arm/xenomai/dovetail/include/asm/xenomai/thread.h	2022-03-21 12:58:28.241900156 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/arch/arm/Makefile	2022-03-02 11:48:10.000000000 +0100
+/*
+ * Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_THREAD_H
+#define _COBALT_ARM_DOVETAIL_THREAD_H
+
+#include <asm-generic/xenomai/dovetail/thread.h>
+#include <asm/traps.h>
+
+#define xnarch_fault_pc(__regs)	((__regs)->ARM_pc - (thumb_mode(__regs) ? 2 : 4))
+#define xnarch_fault_pf_p(__nr)	((__nr) == ARM_TRAP_ACCESS)
+#define xnarch_fault_bp_p(__nr)	((current->ptrace & PT_PTRACED) &&	\
+					((__nr) == ARM_TRAP_BREAK ||	\
+						(__nr) == ARM_TRAP_UNDEFINSTR))
+#define xnarch_fault_notify(__nr) (!xnarch_fault_bp_p(__nr))
+
+#endif /* !_COBALT_ARM_DOVETAIL_THREAD_H */
+++ linux-patched/arch/arm/Makefile	2022-03-21 12:58:28.184900712 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:343 @
   echo  '                  install to $$(INSTALL_PATH) and run lilo'
   echo  '  vdso_install  - Install unstripped vdso.so to $$(INSTALL_MOD_PATH)/vdso'
 endef
--- linux/init/Kconfig	2022-03-21 12:57:23.886527715 +0100
+
+KBUILD_CFLAGS += -I$(srctree)/arch/$(SRCARCH)/xenomai/include -I$(srctree)/arch/$(SRCARCH)/xenomai/dovetail/include -I$(srctree)/include/xenomai
+core-$(CONFIG_XENOMAI) += arch/arm/xenomai/dovetail/
+++ linux-patched/init/Kconfig	2022-03-21 12:58:28.160900946 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2391 @
 # <asm/syscall_wrapper.h>.
 config ARCH_HAS_SYSCALL_WRAPPER
 	def_bool n
+menuconfig XENOMAI
+	depends on X86_TSC || !X86
+	bool "Xenomai/cobalt"
+	select IPIPE if HAVE_IPIPE_SUPPORT
+	select IPIPE_WANT_APIREV_2 if IPIPE
+	select DOVETAIL if HAVE_DOVETAIL
+	select DOVETAIL_LEGACY_SYSCALL_RANGE if HAVE_DOVETAIL
+	default y
+	help
+	  Xenomai's Cobalt core is a real-time extension to the Linux
+	  kernel, which exhibits very short interrupt and scheduling
+	  latency, without affecting the regular kernel services.
+
+	  This option enables the set of extended kernel services
+	  required to run the real-time applications in user-space,
+	  over the Xenomai libraries.
+
+	  Please visit http://xenomai.org for more information.
+
+if XENOMAI
+source "arch/arm/xenomai/Kconfig"
+endif
+
+if MIGRATION
+comment "WARNING! Page migration (CONFIG_MIGRATION) may increase"
+comment "latency."
+endif
+
+if APM || CPU_FREQ || ACPI_PROCESSOR || INTEL_IDLE
+comment "WARNING! At least one of APM, CPU frequency scaling, ACPI 'processor'"
+comment "or CPU idle features is enabled. Any of these options may"
+comment "cause troubles with Xenomai. You should disable them."
+endif
+
+config XENO_VERSION_MAJOR
+       int
+       default 3
+
+config XENO_VERSION_MINOR
+       int
+       default 2
+
+config XENO_REVISION_LEVEL
+       int
+       default 1
+
+config XENO_VERSION_STRING
+       string
+       default "3.2.1"