summaryrefslogtreecommitdiff
path: root/target/linux/lantiq/patches-2.6.39/250-mt-vpe.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/lantiq/patches-2.6.39/250-mt-vpe.patch')
-rw-r--r--target/linux/lantiq/patches-2.6.39/250-mt-vpe.patch1179
1 files changed, 1179 insertions, 0 deletions
diff --git a/target/linux/lantiq/patches-2.6.39/250-mt-vpe.patch b/target/linux/lantiq/patches-2.6.39/250-mt-vpe.patch
new file mode 100644
index 0000000..0a19289
--- /dev/null
+++ b/target/linux/lantiq/patches-2.6.39/250-mt-vpe.patch
@@ -0,0 +1,1179 @@
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1871,6 +1871,28 @@
+ Includes a loader for loading an elf relocatable object
+ onto another VPE and running it.
+
++config IFX_VPE_EXT
++ bool "IFX APRP Extensions"
++ depends on MIPS_VPE_LOADER
++ default y
++ help
++ IFX included extensions in APRP
++
++config PERFCTRS
++ bool "34K Performance counters"
++ depends on MIPS_MT && PROC_FS
++ default n
++ help
++ 34K Performance counter through /proc
++
++config MTSCHED
++ bool "Support mtsched priority configuration for TCs"
++ depends on MIPS_MT && PROC_FS
++ default y
++ help
++ Support for mtsched priority configuration for TCs through
++ /proc/mips/mtsched
++
+ config MIPS_MT_SMTC_IM_BACKSTOP
+ bool "Use per-TC register bits as backstop for inhibited IM bits"
+ depends on MIPS_MT_SMTC
+--- a/arch/mips/include/asm/mipsmtregs.h
++++ b/arch/mips/include/asm/mipsmtregs.h
+@@ -28,14 +28,34 @@
+ #define read_c0_vpeconf0() __read_32bit_c0_register($1, 2)
+ #define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val)
+
++#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
++#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
++
++#define read_c0_vpeschedule() __read_32bit_c0_register($1, 5)
++#define write_c0_vpeschedule(val) __write_32bit_c0_register($1, 5, val)
++
++#define read_c0_vpeschefback() __read_32bit_c0_register($1, 6)
++#define write_c0_vpeschefback(val) __write_32bit_c0_register($1, 6, val)
++
++#define read_c0_vpeopt() __read_32bit_c0_register($1, 7)
++#define write_c0_vpeopt(val) __write_32bit_c0_register($1, 7, val)
++
+ #define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
+ #define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
+
+ #define read_c0_tcbind() __read_32bit_c0_register($2, 2)
++#define write_c0_tcbind(val) __write_32bit_c0_register($2, 2, val)
+
+ #define read_c0_tccontext() __read_32bit_c0_register($2, 5)
+ #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
+
++#define read_c0_tcschedule() __read_32bit_c0_register($2, 6)
++#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val)
++
++#define read_c0_tcschefback() __read_32bit_c0_register($2, 7)
++#define write_c0_tcschefback(val) __write_32bit_c0_register($2, 7, val)
++
++
+ #else /* Assembly */
+ /*
+ * Macros for use in assembly language code
+@@ -74,6 +94,8 @@
+ #define MVPCONTROL_STLB_SHIFT 2
+ #define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
+
++#define MVPCONTROL_CPA_SHIFT 3
++#define MVPCONTROL_CPA (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
+
+ /* MVPConf0 fields */
+ #define MVPCONF0_PTC_SHIFT 0
+@@ -84,6 +106,8 @@
+ #define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
+ #define MVPCONF0_PTLBE_SHIFT 16
+ #define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
++#define MVPCONF0_PCP_SHIFT 27
++#define MVPCONF0_PCP (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
+ #define MVPCONF0_TLBS_SHIFT 29
+ #define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
+ #define MVPCONF0_M_SHIFT 31
+@@ -121,9 +145,25 @@
+ #define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
+ #define VPECONF0_MVP_SHIFT 1
+ #define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
++#define VPECONF0_ICS_SHIFT 16
++#define VPECONF0_ICS (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
++#define VPECONF0_DCS_SHIFT 17
++#define VPECONF0_DCS (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
+ #define VPECONF0_XTC_SHIFT 21
+ #define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
+
++/* VPEOpt fields */
++#define VPEOPT_DWX_SHIFT 0
++#define VPEOPT_IWX_SHIFT 8
++#define VPEOPT_IWX0 ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
++#define VPEOPT_IWX1 ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
++#define VPEOPT_IWX2 ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
++#define VPEOPT_IWX3 ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
++#define VPEOPT_DWX0 ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
++#define VPEOPT_DWX1 ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
++#define VPEOPT_DWX2 ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
++#define VPEOPT_DWX3 ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
++
+ /* TCStatus fields (per TC) */
+ #define TCSTATUS_TASID (_ULCAST_(0xff))
+ #define TCSTATUS_IXMT_SHIFT 10
+@@ -350,6 +390,14 @@
+ #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
+ #define read_vpe_c0_vpeconf0() mftc0(1, 2)
+ #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
++#define read_vpe_c0_vpeschedule() mftc0(1, 5)
++#define write_vpe_c0_vpeschedule(val) mttc0(1, 5, val)
++#define read_vpe_c0_vpeschefback() mftc0(1, 6)
++#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
++#define read_vpe_c0_vpeopt() mftc0(1, 7)
++#define write_vpe_c0_vpeopt(val) mttc0(1, 7, val)
++#define read_vpe_c0_wired() mftc0(6, 0)
++#define write_vpe_c0_wired(val) mttc0(6, 0, val)
+ #define read_vpe_c0_count() mftc0(9, 0)
+ #define write_vpe_c0_count(val) mttc0(9, 0, val)
+ #define read_vpe_c0_status() mftc0(12, 0)
+@@ -381,6 +429,12 @@
+ #define write_tc_c0_tchalt(val) mttc0(2, 4, val)
+ #define read_tc_c0_tccontext() mftc0(2, 5)
+ #define write_tc_c0_tccontext(val) mttc0(2, 5, val)
++#define read_tc_c0_tcschedule() mftc0(2, 6)
++#define write_tc_c0_tcschedule(val) mttc0(2, 6, val)
++#define read_tc_c0_tcschefback() mftc0(2, 7)
++#define write_tc_c0_tcschefback(val) mttc0(2, 7, val)
++#define read_tc_c0_entryhi() mftc0(10, 0)
++#define write_tc_c0_entryhi(val) mttc0(10, 0, val)
+
+ /* GPR */
+ #define read_tc_gpr_sp() mftgpr(29)
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -85,7 +85,8 @@
+
+ obj-$(CONFIG_KGDB) += kgdb.o
+ obj-$(CONFIG_PROC_FS) += proc.o
+-
++obj-$(CONFIG_MTSCHED) += mtsched_proc.o
++obj-$(CONFIG_PERFCTRS) += perf_proc.o
+ obj-$(CONFIG_64BIT) += cpu-bugs64.o
+
+ obj-$(CONFIG_I8253) += i8253.o
+--- a/arch/mips/kernel/mips-mt.c
++++ b/arch/mips/kernel/mips-mt.c
+@@ -21,26 +21,96 @@
+ #include <asm/cacheflush.h>
+
+ int vpelimit;
+-
+ static int __init maxvpes(char *str)
+ {
+ get_option(&str, &vpelimit);
+-
+ return 1;
+ }
+-
+ __setup("maxvpes=", maxvpes);
+
+ int tclimit;
+-
+ static int __init maxtcs(char *str)
+ {
+ get_option(&str, &tclimit);
++ return 1;
++}
++__setup("maxtcs=", maxtcs);
+
++#ifdef CONFIG_IFX_VPE_EXT
++int stlb;
++static int __init istlbshared(char *str)
++{
++ get_option(&str, &stlb);
+ return 1;
+ }
++__setup("vpe_tlb_shared=", istlbshared);
+
+-__setup("maxtcs=", maxtcs);
++int vpe0_wired;
++static int __init vpe0wired(char *str)
++{
++ get_option(&str, &vpe0_wired);
++ return 1;
++}
++__setup("vpe0_wired_tlb_entries=", vpe0wired);
++
++int vpe1_wired;
++static int __init vpe1wired(char *str)
++{
++ get_option(&str, &vpe1_wired);
++ return 1;
++}
++__setup("vpe1_wired_tlb_entries=", vpe1wired);
++
++#ifdef CONFIG_MIPS_MT_SMTC
++extern int nostlb;
++#endif
++void configure_tlb(void)
++{
++ int vpeflags, tcflags, tlbsiz;
++ unsigned int config1val;
++ vpeflags = dvpe();
++ tcflags = dmt();
++ write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
++ write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
++ mips_ihb();
++ //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
++ if (stlb) {
++ if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
++ emt(tcflags);
++ evpe(vpeflags);
++ return;
++ }
++
++ write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
++ write_c0_wired(vpe0_wired + vpe1_wired);
++ if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
++ config1val = read_vpe_c0_config1();
++ tlbsiz = (((config1val >> 25) & 0x3f) + 1);
++ if (tlbsiz > 64)
++ tlbsiz = 64;
++ cpu_data[0].tlbsize = tlbsiz;
++ current_cpu_data.tlbsize = tlbsiz;
++ }
++
++ }
++ else {
++ write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
++ write_c0_wired(vpe0_wired);
++ }
++
++ ehb();
++ write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
++ ehb();
++ local_flush_tlb_all();
++
++ printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
++#ifdef CONFIG_MIPS_MT_SMTC
++ nostlb = !stlb;
++#endif
++ emt(tcflags);
++ evpe(vpeflags);
++}
++#endif
+
+ /*
+ * Dump new MIPS MT state for the core. Does not leave TCs halted.
+@@ -78,18 +148,18 @@
+ if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+ printk(" VPE %d\n", i);
+ printk(" VPEControl : %08lx\n",
+- read_vpe_c0_vpecontrol());
++ read_vpe_c0_vpecontrol());
+ printk(" VPEConf0 : %08lx\n",
+- read_vpe_c0_vpeconf0());
++ read_vpe_c0_vpeconf0());
+ printk(" VPE%d.Status : %08lx\n",
+- i, read_vpe_c0_status());
++ i, read_vpe_c0_status());
+ printk(" VPE%d.EPC : %08lx %pS\n",
+- i, read_vpe_c0_epc(),
+- (void *) read_vpe_c0_epc());
++ i, read_vpe_c0_epc(),
++ (void *) read_vpe_c0_epc());
+ printk(" VPE%d.Cause : %08lx\n",
+- i, read_vpe_c0_cause());
++ i, read_vpe_c0_cause());
+ printk(" VPE%d.Config7 : %08lx\n",
+- i, read_vpe_c0_config7());
++ i, read_vpe_c0_config7());
+ break; /* Next VPE */
+ }
+ }
+@@ -287,6 +357,9 @@
+ printk("Mapped %ld ITC cells starting at 0x%08x\n",
+ ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
+ }
++#ifdef CONFIG_IFX_VPE_EXT
++ configure_tlb();
++#endif
+ }
+
+ /*
+--- a/arch/mips/kernel/proc.c
++++ b/arch/mips/kernel/proc.c
+@@ -7,6 +7,7 @@
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
++#include <linux/proc_fs.h>
+ #include <asm/bootinfo.h>
+ #include <asm/cpu.h>
+ #include <asm/cpu-features.h>
+@@ -110,3 +111,19 @@
+ .stop = c_stop,
+ .show = show_cpuinfo,
+ };
++
++/*
++ * Support for MIPS/local /proc hooks in /proc/mips/
++ */
++
++static struct proc_dir_entry *mips_proc = NULL;
++
++struct proc_dir_entry *get_mips_proc_dir(void)
++{
++ /*
++ * This ought not to be preemptable.
++ */
++ if(mips_proc == NULL)
++ mips_proc = proc_mkdir("mips", NULL);
++ return(mips_proc);
++}
+--- a/arch/mips/kernel/smtc.c
++++ b/arch/mips/kernel/smtc.c
+@@ -1334,6 +1334,13 @@
+ asid = asid_cache(cpu);
+
+ do {
++#ifdef CONFIG_IFX_VPE_EXT
++ /* If TLB is shared between AP and RP (AP is running SMTC),
++ leave out max ASID i.e., ASID_MASK for RP
++ */
++ if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
++ asid++;
++#endif
+ if (!((asid += ASID_INC) & ASID_MASK) ) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+--- a/arch/mips/kernel/vpe.c
++++ b/arch/mips/kernel/vpe.c
+@@ -76,6 +76,58 @@
+ static int kspd_events_reqd;
+ #endif
+
++#ifdef CONFIG_IFX_VPE_EXT
++static int is_sdepgm;
++extern int stlb;
++extern int vpe0_wired;
++extern int vpe1_wired;
++unsigned int vpe1_load_addr;
++
++static int __init load_address(char *str)
++{
++ get_option(&str, &vpe1_load_addr);
++ return 1;
++}
++__setup("vpe1_load_addr=", load_address);
++
++#include <asm/mipsmtregs.h>
++#define write_vpe_c0_wired(val) mttc0(6, 0, val)
++
++#ifndef COMMAND_LINE_SIZE
++# define COMMAND_LINE_SIZE 512
++#endif
++
++char command_line[COMMAND_LINE_SIZE * 2];
++
++static unsigned int vpe1_mem;
++static int __init vpe1mem(char *str)
++{
++ vpe1_mem = memparse(str, &str);
++ return 1;
++}
++__setup("vpe1_mem=", vpe1mem);
++
++uint32_t vpe1_wdog_ctr;
++static int __init wdog_ctr(char *str)
++{
++ get_option(&str, &vpe1_wdog_ctr);
++ return 1;
++}
++
++__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
++EXPORT_SYMBOL(vpe1_wdog_ctr);
++
++uint32_t vpe1_wdog_timeout;
++static int __init wdog_timeout(char *str)
++{
++ get_option(&str, &vpe1_wdog_timeout);
++ return 1;
++}
++
++__setup("vpe1_wdog_timeout=", wdog_timeout);
++EXPORT_SYMBOL(vpe1_wdog_timeout);
++
++#endif
+ /* grab the likely amount of memory we will need. */
+ #ifdef CONFIG_MIPS_VPE_LOADER_TOM
+ #define P_SIZE (2 * 1024 * 1024)
+@@ -268,6 +320,13 @@
+ void *addr;
+
+ #ifdef CONFIG_MIPS_VPE_LOADER_TOM
++#ifdef CONFIG_IFX_VPE_EXT
++ if (vpe1_load_addr) {
++ memset((void *)vpe1_load_addr, 0, len);
++ return (void *)vpe1_load_addr;
++ }
++#endif
++
+ /*
+ * This means you must tell Linux to use less memory than you
+ * physically have, for example by passing a mem= boot argument.
+@@ -746,6 +805,12 @@
+ }
+
+ /* Write the address we want it to start running from in the TCPC register. */
++#if defined(CONFIG_IFX_VPE_EXT) && 0
++ if (stlb)
++ write_vpe_c0_wired(vpe0_wired + vpe1_wired);
++ else
++ write_vpe_c0_wired(vpe1_wired);
++#endif
+ write_tc_c0_tcrestart((unsigned long)v->__start);
+ write_tc_c0_tccontext((unsigned long)0);
+
+@@ -759,6 +824,20 @@
+
+ write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
+
++#if defined(CONFIG_IFX_VPE_EXT) && 0
++ /*
++ * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
++ * points to the start of the command line string and $a3 points to
++ * the end of the string. This convention is identical to the Linux
++ * kernel boot parameter passing mechanism. Please note that $a3 is
++ * used to pass physical memory size or 0 in SDE tool kit. So, if you
++ * are passing comand line parameters through $a2 & $a3 SDE programs
++ * don't work as desired.
++ */
++ mttgpr(6, command_line);
++ mttgpr(7, (command_line + strlen(command_line)));
++ if (is_sdepgm)
++#endif
+ /*
+ * The sde-kit passes 'memsize' to __start in $a3, so set something
+ * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
+@@ -833,6 +912,9 @@
+ if ( (v->__start == 0) || (v->shared_ptr == NULL))
+ return -1;
+
++#ifdef CONFIG_IFX_VPE_EXT
++ is_sdepgm = 1;
++#endif
+ return 0;
+ }
+
+@@ -994,6 +1076,15 @@
+ (unsigned long)v->load_addr + v->len);
+
+ if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
++#ifdef CONFIG_IFX_VPE_EXT
++ if (vpe1_load_addr) {
++ /* Conversion to KSEG1 is required ??? */
++ v->__start = KSEG1ADDR(vpe1_load_addr);
++ is_sdepgm = 0;
++ return 0;
++ }
++#endif
++
+ if (v->__start == 0) {
+ printk(KERN_WARNING "VPE loader: program does not contain "
+ "a __start symbol\n");
+@@ -1064,6 +1155,9 @@
+ struct vpe_notifications *not;
+ struct vpe *v;
+ int ret;
++#ifdef CONFIG_IFX_VPE_EXT
++ int progsize;
++#endif
+
+ if (minor != iminor(inode)) {
+ /* assume only 1 device at the moment. */
+@@ -1089,7 +1183,12 @@
+ release_progmem(v->load_addr);
+ cleanup_tc(get_tc(tclimit));
+ }
+-
++#ifdef CONFIG_IFX_VPE_EXT
++ progsize = (vpe1_mem != 0) ? vpe1_mem : P_SIZE;
++ //printk("progsize = %x\n", progsize);
++ v->pbuffer = vmalloc(progsize);
++ v->plen = progsize;
++#else
+ /* this of-course trashes what was there before... */
+ v->pbuffer = vmalloc(P_SIZE);
+ if (!v->pbuffer) {
+@@ -1097,11 +1196,14 @@
+ return -ENOMEM;
+ }
+ v->plen = P_SIZE;
++#endif
+ v->load_addr = NULL;
+ v->len = 0;
+
++#if 0
+ v->uid = filp->f_cred->fsuid;
+ v->gid = filp->f_cred->fsgid;
++#endif
+
+ #ifdef CONFIG_MIPS_APSP_KSPD
+ /* get kspd to tell us when a syscall_exit happens */
+@@ -1349,6 +1451,133 @@
+ cleanup_tc(get_tc(sp_id));
+ }
+ #endif
++#ifdef CONFIG_IFX_VPE_EXT
++int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
++{
++ enum vpe_state state;
++ struct vpe *v = get_vpe(tclimit);
++ struct vpe_notifications *not;
++
++ if (tcmask || flags) {
++ printk(KERN_WARNING "Currently tcmask and flags should be 0.\
++ other values not supported\n");
++ return -1;
++ }
++
++ state = xchg(&v->state, VPE_STATE_INUSE);
++ if (state != VPE_STATE_UNUSED) {
++ vpe_stop(v);
++
++ list_for_each_entry(not, &v->notify, list) {
++ not->stop(tclimit);
++ }
++ }
++
++ v->__start = (unsigned long)sw_start_addr;
++ is_sdepgm = 0;
++
++ if (!vpe_run(v)) {
++ printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
++ return 0;
++ }
++ return -1;
++}
++
++EXPORT_SYMBOL(vpe1_sw_start);
++
++int32_t vpe1_sw_stop(uint32_t flags)
++{
++ struct vpe *v = get_vpe(tclimit);
++
++ if (!vpe_free(v)) {
++ printk(KERN_DEBUG "RP Stopped\n");
++ return 0;
++ }
++ else
++ return -1;
++}
++
++EXPORT_SYMBOL(vpe1_sw_stop);
++
++uint32_t vpe1_get_load_addr (uint32_t flags)
++{
++ return vpe1_load_addr;
++}
++
++EXPORT_SYMBOL(vpe1_get_load_addr);
++
++uint32_t vpe1_get_max_mem (uint32_t flags)
++{
++ if (!vpe1_mem)
++ return P_SIZE;
++ else
++ return vpe1_mem;
++}
++
++EXPORT_SYMBOL(vpe1_get_max_mem);
++
++void* vpe1_get_cmdline_argument(void)
++{
++ return saved_command_line;
++}
++
++EXPORT_SYMBOL(vpe1_get_cmdline_argument);
++
++int32_t vpe1_set_boot_param(char *field, char *value, char flags)
++{
++ char *ptr, string[64];
++ int start_off, end_off;
++ if (!field)
++ return -1;
++ strcpy(string, field);
++ if (value) {
++ strcat(string, "=");
++ strcat(string, value);
++ strcat(command_line, " ");
++ strcat(command_line, string);
++ }
++ else {
++ ptr = strstr(command_line, string);
++ if (ptr) {
++ start_off = ptr - command_line;
++ ptr += strlen(string);
++ while ((*ptr != ' ') && (*ptr != '\0'))
++ ptr++;
++ end_off = ptr - command_line;
++ command_line[start_off] = '\0';
++ strcat (command_line, command_line+end_off);
++ }
++ }
++ return 0;
++}
++
++EXPORT_SYMBOL(vpe1_set_boot_param);
++
++int32_t vpe1_get_boot_param(char *field, char **value, char flags)
++{
++ char *ptr, string[64];
++ int i = 0;
++ if (!field)
++ return -1;
++ if ((ptr = strstr(command_line, field))) {
++ ptr += strlen(field) + 1; /* including = */
++ while ((*ptr != ' ') && (*ptr != '\0'))
++ string[i++] = *ptr++;
++ string[i] = '\0';
++ *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
++ if (*value != NULL)
++ strcpy(*value, string);
++ }
++ else
++ *value = NULL;
++
++ return 0;
++}
++
++EXPORT_SYMBOL(vpe1_get_boot_param);
++
++extern void configure_tlb(void);
++#endif
+
+ static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+@@ -1430,6 +1659,18 @@
+ printk("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
++#ifdef CONFIG_IFX_VPE_EXT
++#ifndef CONFIG_MIPS_MT_SMTC
++ configure_tlb();
++#endif
++#endif
++
++#ifndef CONFIG_MIPS_MT_SMTC
++ if (!vpelimit)
++ vpelimit = 1;
++ if (!tclimit)
++ tclimit = 1;
++#endif
+
+ if (vpelimit == 0) {
+ printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
+@@ -1474,10 +1715,12 @@
+ mtflags = dmt();
+ vpflags = dvpe();
+
++ back_to_back_c0_hazard();
++
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+- /* dump_mtregs(); */
++ dump_mtregs();
+
+ val = read_c0_mvpconf0();
+ hw_tcs = (val & MVPCONF0_PTC) + 1;
+@@ -1489,6 +1732,7 @@
+ * reschedule send IPIs or similar we might hang.
+ */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
++ back_to_back_c0_hazard();
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+@@ -1514,6 +1758,7 @@
+ }
+
+ v->ntcs = hw_tcs - tclimit;
++ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+@@ -1582,6 +1827,7 @@
+ out_reenable:
+ /* release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
++ back_to_back_c0_hazard();
+
+ evpe(vpflags);
+ emt(mtflags);
+--- /dev/null
++++ b/arch/mips/kernel/mtsched_proc.c
+@@ -0,0 +1,279 @@
++/*
++ * /proc hooks for MIPS MT scheduling policy management for 34K cores
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ * Copyright (C) 2006 Mips Technologies, Inc
++ */
++
++#include <linux/kernel.h>
++
++#include <asm/cpu.h>
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/mipsregs.h>
++#include <asm/mipsmtregs.h>
++#include <asm/uaccess.h>
++#include <linux/proc_fs.h>
++
++static struct proc_dir_entry *mtsched_proc;
++
++#ifndef CONFIG_MIPS_MT_SMTC
++#define NTCS 2
++#else
++#define NTCS NR_CPUS
++#endif
++#define NVPES 2
++
++int lastvpe = 1;
++int lasttc = 8;
++
++static int proc_read_mtsched(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int totalen = 0;
++ int len;
++
++ int i;
++ int vpe;
++ int mytc;
++ unsigned long flags;
++ unsigned int mtflags;
++ unsigned int haltstate;
++ unsigned int vpes_checked[NVPES];
++ unsigned int vpeschedule[NVPES];
++ unsigned int vpeschefback[NVPES];
++ unsigned int tcschedule[NTCS];
++ unsigned int tcschefback[NTCS];
++
++ /* Dump the state of the MIPS MT scheduling policy manager */
++ /* Inititalize control state */
++ for(i = 0; i < NVPES; i++) {
++ vpes_checked[i] = 0;
++ vpeschedule[i] = 0;
++ vpeschefback[i] = 0;
++ }
++ for(i = 0; i < NTCS; i++) {
++ tcschedule[i] = 0;
++ tcschefback[i] = 0;
++ }
++
++ /* Disable interrupts and multithreaded issue */
++ local_irq_save(flags);
++ mtflags = dvpe();
++
++ /* Then go through the TCs, halt 'em, and extract the values */
++ mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
++ for(i = 0; i < NTCS; i++) {
++ if(i == mytc) {
++ /* No need to halt ourselves! */
++ tcschedule[i] = read_c0_tcschedule();
++ tcschefback[i] = read_c0_tcschefback();
++ /* If VPE bound to TC hasn't been checked, do it */
++ vpe = read_c0_tcbind() & TCBIND_CURVPE;
++ if(!vpes_checked[vpe]) {
++ vpeschedule[vpe] = read_c0_vpeschedule();
++ vpeschefback[vpe] = read_c0_vpeschefback();
++ vpes_checked[vpe] = 1;
++ }
++ } else {
++ settc(i);
++ haltstate = read_tc_c0_tchalt();
++ write_tc_c0_tchalt(TCHALT_H);
++ mips_ihb();
++ tcschedule[i] = read_tc_c0_tcschedule();
++ tcschefback[i] = read_tc_c0_tcschefback();
++ /* If VPE bound to TC hasn't been checked, do it */
++ vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
++ if(!vpes_checked[vpe]) {
++ vpeschedule[vpe] = read_vpe_c0_vpeschedule();
++ vpeschefback[vpe] = read_vpe_c0_vpeschefback();
++ vpes_checked[vpe] = 1;
++ }
++ if(!haltstate) write_tc_c0_tchalt(0);
++ }
++ }
++ /* Re-enable MT and interrupts */
++ evpe(mtflags);
++ local_irq_restore(flags);
++
++ for(vpe=0; vpe < NVPES; vpe++) {
++ len = sprintf(page, "VPE[%d].VPEschedule = 0x%08x\n",
++ vpe, vpeschedule[vpe]);
++ totalen += len;
++ page += len;
++ len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
++ vpe, vpeschefback[vpe]);
++ totalen += len;
++ page += len;
++ }
++ for(i=0; i < NTCS; i++) {
++ len = sprintf(page, "TC[%d].TCschedule = 0x%08x\n",
++ i, tcschedule[i]);
++ totalen += len;
++ page += len;
++ len = sprintf(page, "TC[%d].TCschefback = 0x%08x\n",
++ i, tcschefback[i]);
++ totalen += len;
++ page += len;
++ }
++ return totalen;
++}
++
++/*
++ * Write to perf counter registers based on text input
++ */
++
++#define TXTBUFSZ 100
++
++static int proc_write_mtsched(struct file *file, const char *buffer,
++ unsigned long count, void *data)
++{
++ int len = 0;
++ char mybuf[TXTBUFSZ];
++ /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
++ char entity[1]; //, entity1[1];
++ int number[1];
++ unsigned long value[1];
++ int nparsed = 0 , index = 0;
++ unsigned long flags;
++ unsigned int mtflags;
++ unsigned int haltstate;
++ unsigned int tcbindval;
++
++ if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
++ else len = count;
++ memset(mybuf,0,TXTBUFSZ);
++ if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
++
++ nparsed = sscanf(mybuf, "%c%d %lx",
++ &entity[0] ,&number[0], &value[0]);
++
++ /*
++ * Having acquired the inputs, which might have
++ * generated exceptions and preemptions,
++ * program the registers.
++ */
++ /* Disable interrupts and multithreaded issue */
++ local_irq_save(flags);
++ mtflags = dvpe();
++
++ if(entity[index] == 't' ) {
++ /* Set TCSchedule or TCScheFBack of specified TC */
++ if(number[index] > NTCS) goto skip;
++ /* If it's our own TC, do it direct */
++ if(number[index] ==
++ ((read_c0_tcbind() & TCBIND_CURTC)
++ >> TCBIND_CURTC_SHIFT)) {
++ if(entity[index] == 't')
++ write_c0_tcschedule(value[index]);
++ else
++ write_c0_tcschefback(value[index]);
++ } else {
++ /* Otherwise, we do it via MTTR */
++ settc(number[index]);
++ haltstate = read_tc_c0_tchalt();
++ write_tc_c0_tchalt(TCHALT_H);
++ mips_ihb();
++ if(entity[index] == 't')
++ write_tc_c0_tcschedule(value[index]);
++ else
++ write_tc_c0_tcschefback(value[index]);
++ mips_ihb();
++ if(!haltstate) write_tc_c0_tchalt(0);
++ }
++ } else if(entity[index] == 'v') {
++ /* Set VPESchedule of specified VPE */
++ if(number[index] > NVPES) goto skip;
++ tcbindval = read_c0_tcbind();
++ /* Are we doing this to our current VPE? */
++ if((tcbindval & TCBIND_CURVPE) == number[index]) {
++ /* Then life is simple */
++ write_c0_vpeschedule(value[index]);
++ } else {
++ /*
++ * Bind ourselves to the other VPE long enough
++ * to program the bind value.
++ */
++ write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
++ | number[index]);
++ mips_ihb();
++ write_c0_vpeschedule(value[index]);
++ mips_ihb();
++ /* Restore previous binding */
++ write_c0_tcbind(tcbindval);
++ mips_ihb();
++ }
++ }
++
++ else if(entity[index] == 'r') {
++ unsigned int vpes_checked[2], vpe ,i , mytc;
++ vpes_checked[0] = vpes_checked[1] = 0;
++
++ /* Then go through the TCs, halt 'em, and extract the values */
++ mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
++
++ for(i = 0; i < NTCS; i++) {
++ if(i == mytc) {
++ /* No need to halt ourselves! */
++ write_c0_vpeschefback(0);
++ write_c0_tcschefback(0);
++ } else {
++ settc(i);
++ haltstate = read_tc_c0_tchalt();
++ write_tc_c0_tchalt(TCHALT_H);
++ mips_ihb();
++ write_tc_c0_tcschefback(0);
++ /* If VPE bound to TC hasn't been checked, do it */
++ vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
++ if(!vpes_checked[vpe]) {
++ write_vpe_c0_vpeschefback(0);
++ vpes_checked[vpe] = 1;
++ }
++ if(!haltstate) write_tc_c0_tchalt(0);
++ }
++ }
++ }
++ else {
++ printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
++ }
++
++skip:
++ /* Re-enable MT and interrupts */
++ evpe(mtflags);
++ local_irq_restore(flags);
++ return (len);
++}
++
++static int __init init_mtsched_proc(void)
++{
++ extern struct proc_dir_entry *get_mips_proc_dir(void);
++ struct proc_dir_entry *mips_proc_dir;
++
++ if (!cpu_has_mipsmt) {
++ printk("mtsched: not a MIPS MT capable processor\n");
++ return -ENODEV;
++ }
++
++ mips_proc_dir = get_mips_proc_dir();
++
++ mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
++ mtsched_proc->read_proc = proc_read_mtsched;
++ mtsched_proc->write_proc = proc_write_mtsched;
++
++ return 0;
++}
++
++/* Automagically create the entry */
++module_init(init_mtsched_proc);
+--- /dev/null
++++ b/arch/mips/kernel/perf_proc.c
+@@ -0,0 +1,191 @@
++/*
++ * /proc hooks for CPU performance counter support for SMTC kernel
++ * (and ultimately others)
++ * Copyright (C) 2006 Mips Technologies, Inc
++ */
++
++#include <linux/kernel.h>
++
++#include <asm/cpu.h>
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/mipsregs.h>
++#include <asm/uaccess.h>
++#include <linux/proc_fs.h>
++
++/*
++ * /proc diagnostic and statistics hooks
++ */
++
++
++/* Internal software-extended event counters */
++
++static unsigned long long extencount[4] = {0,0,0,0};
++
++static struct proc_dir_entry *perf_proc;
++
++static int proc_read_perf(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int totalen = 0;
++ int len;
++
++ len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
++ extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
++ extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
++ extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
++ totalen += len;
++ page += len;
++ len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
++ extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
++ totalen += len;
++ page += len;
++
++ return totalen;
++}
++
++/*
++ * Write to perf counter registers based on text input
++ */
++
++#define TXTBUFSZ 100
++
++static int proc_write_perf(struct file *file, const char *buffer,
++ unsigned long count, void *data)
++{
++ int len;
++ int nparsed;
++ int index;
++ char mybuf[TXTBUFSZ];
++
++ int which[4];
++ unsigned long control[4];
++ long long ctrdata[4];
++
++ if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
++ else len = count;
++ memset(mybuf,0,TXTBUFSZ);
++ if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
++
++ nparsed = sscanf(mybuf,
++ "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
++ &which[0], &control[0], &ctrdata[0],
++ &which[1], &control[1], &ctrdata[1],
++ &which[2], &control[2], &ctrdata[2],
++ &which[3], &control[3], &ctrdata[3]);
++
++ for(index = 0; nparsed >= 3; index++) {
++ switch (which[index]) {
++ case 0:
++ write_c0_perfctrl0(control[index]);
++ if(ctrdata[index] != -1) {
++ extencount[0] = (unsigned long long)ctrdata[index];
++ write_c0_perfcntr0((unsigned long)0);
++ }
++ break;
++ case 1:
++ write_c0_perfctrl1(control[index]);
++ if(ctrdata[index] != -1) {
++ extencount[1] = (unsigned long long)ctrdata[index];
++ write_c0_perfcntr1((unsigned long)0);
++ }
++ break;
++ case 2:
++ write_c0_perfctrl2(control[index]);
++ if(ctrdata[index] != -1) {
++ extencount[2] = (unsigned long long)ctrdata[index];
++ write_c0_perfcntr2((unsigned long)0);
++ }
++ break;
++ case 3:
++ write_c0_perfctrl3(control[index]);
++ if(ctrdata[index] != -1) {
++ extencount[3] = (unsigned long long)ctrdata[index];
++ write_c0_perfcntr3((unsigned long)0);
++ }
++ break;
++ }
++ nparsed -= 3;
++ }
++ return (len);
++}
++
++extern int (*perf_irq)(void);
++
++/*
++ * Invoked when timer interrupt vector picks up a perf counter overflow
++ */
++
++static int perf_proc_irq(void)
++{
++ unsigned long snapshot;
++
++ /*
++ * It would be nice to do this as a loop, but we don't have
++ * indirect access to CP0 registers.
++ */
++ snapshot = read_c0_perfcntr0();
++ if ((long)snapshot < 0) {
++ extencount[0] +=
++ (unsigned long long)((unsigned)read_c0_perfcntr0());
++ write_c0_perfcntr0(0);
++ }
++ snapshot = read_c0_perfcntr1();
++ if ((long)snapshot < 0) {
++ extencount[1] +=
++ (unsigned long long)((unsigned)read_c0_perfcntr1());
++ write_c0_perfcntr1(0);
++ }
++ snapshot = read_c0_perfcntr2();
++ if ((long)snapshot < 0) {
++ extencount[2] +=
++ (unsigned long long)((unsigned)read_c0_perfcntr2());
++ write_c0_perfcntr2(0);
++ }
++ snapshot = read_c0_perfcntr3();
++ if ((long)snapshot < 0) {
++ extencount[3] +=
++ (unsigned long long)((unsigned)read_c0_perfcntr3());
++ write_c0_perfcntr3(0);
++ }
++ return 0;
++}
++
++static int __init init_perf_proc(void)
++{
++ extern struct proc_dir_entry *get_mips_proc_dir(void);
++
++ struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
++
++ write_c0_perfcntr0(0);
++ write_c0_perfcntr1(0);
++ write_c0_perfcntr2(0);
++ write_c0_perfcntr3(0);
++ perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
++ perf_proc->read_proc = proc_read_perf;
++ perf_proc->write_proc = proc_write_perf;
++ perf_irq = perf_proc_irq;
++
++ return 0;
++}
++
++/* Automagically create the entry */
++module_init(init_perf_proc);