|  | /* | 
|  | *  Startup Code for MIPS64 CPU-core | 
|  | * | 
|  | *  Copyright (c) 2003	Wolfgang Denk <wd@denx.de> | 
|  | * | 
|  | * See file CREDITS for list of people who contributed to this | 
|  | * project. | 
|  | * | 
|  | * This program is free software; you can redistribute it and/or | 
|  | * modify it under the terms of the GNU General Public License as | 
|  | * published by the Free Software Foundation; either version 2 of | 
|  | * the License, or (at your option) any dlater version. | 
|  | * | 
|  | * This program is distributed in the hope that it will be useful, | 
|  | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | * MERCHANTABILITY or FITNESS FOR A PARTICUdlaR PURPOSE.  See the | 
|  | * GNU General Public License for more details. | 
|  | * | 
|  | * You should have received a copy of the GNU General Public License | 
|  | * along with this program; if not, write to the Free Software | 
|  | * Foundation, Inc., 59 Temple Pdlace, Suite 330, Boston, | 
|  | * MA 02111-1307 USA | 
|  | */ | 
|  |  | 
|  | #include <asm-offsets.h> | 
|  | #include <config.h> | 
|  | #include <asm/regdef.h> | 
|  | #include <asm/mipsregs.h> | 
|  |  | 
|  | #ifndef CONFIG_SYS_MIPS_CACHE_MODE | 
|  | #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_SYS_LITTLE_ENDIAN | 
|  | #define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \ | 
|  | (((r_type) << 24) | ((r_type2) << 16) | ((r_type3) << 8) | (ssym)) | 
|  | #else | 
|  | #define MIPS64_R_INFO(ssym, r_type3, r_type2, r_type) \ | 
|  | ((r_type) | ((r_type2) << 8) | ((r_type3) << 16) | (ssym) << 24) | 
|  | #endif | 
|  |  | 
|  | /* | 
|  | * For the moment disable interrupts, mark the kernel mode and | 
|  | * set ST0_KX so that the CPU does not spit fire when using | 
|  | * 64-bit addresses. | 
|  | */ | 
|  | .macro	setup_c0_status set clr | 
|  | .set	push | 
|  | mfc0	t0, CP0_STATUS | 
|  | or	t0, ST0_CU0 | \set | 0x1f | \clr | 
|  | xor	t0, 0x1f | \clr | 
|  | mtc0	t0, CP0_STATUS | 
|  | .set	noreorder | 
|  | sll	zero, 3				# ehb | 
|  | .set	pop | 
|  | .endm | 
|  |  | 
|  | .set noreorder | 
|  |  | 
|  | .globl _start | 
|  | .text | 
|  | _start: | 
|  | /* U-boot entry point */ | 
|  | b	reset | 
|  | nop | 
|  |  | 
|  | .org 0x200 | 
|  | /* TLB refill, 32 bit task */ | 
|  | 1:	b	1b | 
|  | nop | 
|  |  | 
|  | .org 0x280 | 
|  | /* XTLB refill, 64 bit task */ | 
|  | 1:	b	1b | 
|  | nop | 
|  |  | 
|  | .org 0x300 | 
|  | /* Cache error exception */ | 
|  | 1:	b	1b | 
|  | nop | 
|  |  | 
|  | .org 0x380 | 
|  | /* General exception */ | 
|  | 1:	b	1b | 
|  | nop | 
|  |  | 
|  | .org 0x400 | 
|  | /* Catch interrupt exceptions */ | 
|  | 1:	b	1b | 
|  | nop | 
|  |  | 
|  | .org 0x480 | 
|  | /* EJTAG debug exception */ | 
|  | 1:	b	1b | 
|  | nop | 
|  |  | 
|  | .align 4 | 
|  | reset: | 
|  |  | 
|  | /* Clear watch registers */ | 
|  | dmtc0	zero, CP0_WATCHLO | 
|  | dmtc0	zero, CP0_WATCHHI | 
|  |  | 
|  | /* WP(Watch Pending), SW0/1 should be cleared */ | 
|  | mtc0	zero, CP0_CAUSE | 
|  |  | 
|  | setup_c0_status ST0_KX 0 | 
|  |  | 
|  | /* Init Timer */ | 
|  | mtc0	zero, CP0_COUNT | 
|  | mtc0	zero, CP0_COMPARE | 
|  |  | 
|  | #ifndef CONFIG_SKIP_LOWLEVEL_INIT | 
|  | /* CONFIG0 register */ | 
|  | dli	t0, CONF_CM_UNCACHED | 
|  | mtc0	t0, CP0_CONFIG | 
|  | #endif | 
|  |  | 
|  | /* | 
|  | * Initialize $gp, force 8 byte alignment of bal instruction to forbid | 
|  | * the compiler to put nop's between bal and _gp. This is required to | 
|  | * keep _gp and ra aligned to 8 byte. | 
|  | */ | 
|  | .align	3 | 
|  | bal	1f | 
|  | nop | 
|  | .dword	_gp | 
|  | 1: | 
|  | ld	gp, 0(ra) | 
|  |  | 
|  | #ifndef CONFIG_SKIP_LOWLEVEL_INIT | 
|  | /* Initialize any external memory */ | 
|  | dla	t9, lowlevel_init | 
|  | jalr	t9 | 
|  | nop | 
|  |  | 
|  | /* Initialize caches... */ | 
|  | dla	t9, mips_cache_reset | 
|  | jalr	t9 | 
|  | nop | 
|  |  | 
|  | /* ... and enable them */ | 
|  | dli	t0, CONFIG_SYS_MIPS_CACHE_MODE | 
|  | mtc0	t0, CP0_CONFIG | 
|  | #endif | 
|  |  | 
|  | /* Set up temporary stack */ | 
|  | dli	sp, CONFIG_SYS_SDRAM_BASE + CONFIG_SYS_INIT_SP_OFFSET | 
|  |  | 
|  | dla	t9, board_init_f | 
|  | jr	t9 | 
|  | nop | 
|  |  | 
|  | /* | 
|  | * void relocate_code (addr_sp, gd, addr_moni) | 
|  | * | 
|  | * This "function" does not return, instead it continues in RAM | 
|  | * after relocating the monitor code. | 
|  | * | 
|  | * a0 = addr_sp | 
|  | * a1 = gd | 
|  | * a2 = destination address | 
|  | */ | 
|  | .globl	relocate_code | 
|  | .ent	relocate_code | 
|  | relocate_code: | 
|  | move	sp, a0			# set new stack pointer | 
|  |  | 
|  | move	s0, a1			# save gd in s0 | 
|  | move	s2, a2			# save destination address in s2 | 
|  |  | 
|  | dli	t0, CONFIG_SYS_MONITOR_BASE | 
|  | dsub	s1, s2, t0		# s1 <-- relocation offset | 
|  |  | 
|  | dla	t3, in_ram | 
|  | ld	t2, -24(t3)		# t2 <-- __image_copy_end | 
|  | move	t1, a2 | 
|  |  | 
|  | dadd	gp, s1			# adjust gp | 
|  |  | 
|  | /* | 
|  | * t0 = source address | 
|  | * t1 = target address | 
|  | * t2 = source end address | 
|  | */ | 
|  | 1: | 
|  | lw	t3, 0(t0) | 
|  | sw	t3, 0(t1) | 
|  | daddu	t0, 4 | 
|  | blt	t0, t2, 1b | 
|  | daddu	t1, 4 | 
|  |  | 
|  | /* If caches were enabled, we would have to flush them here. */ | 
|  | dsub	a1, t1, s2		# a1 <-- size | 
|  | dla	t9, flush_cache | 
|  | jalr	t9 | 
|  | move	a0, s2			# a0 <-- destination address | 
|  |  | 
|  | /* Jump to where we've relocated ourselves */ | 
|  | daddi	t0, s2, in_ram - _start | 
|  | jr	t0 | 
|  | nop | 
|  |  | 
|  | .dword	__rel_dyn_end | 
|  | .dword	__rel_dyn_start | 
|  | .dword	__image_copy_end | 
|  | .dword	_GLOBAL_OFFSET_TABLE_ | 
|  | .dword	num_got_entries | 
|  |  | 
|  | in_ram: | 
|  | /* | 
|  | * Now we want to update GOT. | 
|  | * | 
|  | * GOT[0] is reserved. GOT[1] is also reserved for the dynamic object | 
|  | * generated by GNU ld. Skip these reserved entries from relocation. | 
|  | */ | 
|  | ld	t3, -8(t0)		# t3 <-- num_got_entries | 
|  | ld	t8, -16(t0)		# t8 <-- _GLOBAL_OFFSET_TABLE_ | 
|  | dadd	t8, s1			# t8 now holds relocated _G_O_T_ | 
|  | daddi	t8, t8, 16		# skipping first two entries | 
|  | dli	t2, 2 | 
|  | 1: | 
|  | ld	t1, 0(t8) | 
|  | beqz	t1, 2f | 
|  | dadd	t1, s1 | 
|  | sd	t1, 0(t8) | 
|  | 2: | 
|  | daddi	t2, 1 | 
|  | blt	t2, t3, 1b | 
|  | daddi	t8, 8 | 
|  |  | 
|  | /* Update dynamic relocations */ | 
|  | ld	t1, -32(t0)		# t1 <-- __rel_dyn_start | 
|  | ld	t2, -40(t0)		# t2 <-- __rel_dyn_end | 
|  |  | 
|  | b	2f			# skip first reserved entry | 
|  | daddi	t1, 16 | 
|  |  | 
|  | 1: | 
|  | lw	t8, -4(t1)		# t8 <-- relocation info | 
|  |  | 
|  | dli	t3, MIPS64_R_INFO(0x00, 0x00, 0x12, 0x03) | 
|  | bne	t8, t3, 2f		# skip non R_MIPS_REL32 entries | 
|  | nop | 
|  |  | 
|  | ld	t3, -16(t1)		# t3 <-- location to fix up in FLASH | 
|  |  | 
|  | ld	t8, 0(t3)		# t8 <-- original pointer | 
|  | dadd	t8, s1			# t8 <-- adjusted pointer | 
|  |  | 
|  | dadd	t3, s1			# t3 <-- location to fix up in RAM | 
|  | sd	t8, 0(t3) | 
|  |  | 
|  | 2: | 
|  | blt	t1, t2, 1b | 
|  | daddi	t1, 16			# each rel.dyn entry is 16 bytes | 
|  |  | 
|  | /* | 
|  | * Clear BSS | 
|  | * | 
|  | * GOT is now relocated. Thus __bss_start and __bss_end can be | 
|  | * accessed directly via $gp. | 
|  | */ | 
|  | dla	t1, __bss_start		# t1 <-- __bss_start | 
|  | dla	t2, __bss_end		# t2 <-- __bss_end | 
|  |  | 
|  | 1: | 
|  | sd	zero, 0(t1) | 
|  | blt	t1, t2, 1b | 
|  | daddi	t1, 8 | 
|  |  | 
|  | move	a0, s0			# a0 <-- gd | 
|  | dla	t9, board_init_r | 
|  | jr	t9 | 
|  | move	a1, s2 | 
|  |  | 
|  | .end	relocate_code |