summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorStefan Reinauer <stepan@coresystems.de>2010-04-27 06:56:47 +0000
committerStefan Reinauer <stepan@openbios.org>2010-04-27 06:56:47 +0000
commit14e22779625de673569c7b950ecc2753fb915b31 (patch)
tree14a6ed759e116e9e6e9bbd7f499b74b96d6cc072 /src/cpu/x86
parent0e1e8065e303030c39c3f2c27e5d32ee58a16c66 (diff)
downloadcoreboot-14e22779625de673569c7b950ecc2753fb915b31.tar.xz
Since some people disapprove of white space cleanups mixed in regular commits
while others dislike them being extra commits, let's clean them up once and for all for the existing code. If it's ugly, let it only be ugly once :-) Signed-off-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5507 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/16bit/entry16.inc10
-rw-r--r--src/cpu/x86/16bit/reset16.lds2
-rw-r--r--src/cpu/x86/32bit/entry32.inc14
-rw-r--r--src/cpu/x86/lapic/lapic.c28
-rw-r--r--src/cpu/x86/lapic/secondary.S2
-rw-r--r--src/cpu/x86/mtrr/earlymtrr.c4
-rw-r--r--src/cpu/x86/mtrr/mtrr.c32
-rw-r--r--src/cpu/x86/pae/pgtbl.c4
-rw-r--r--src/cpu/x86/smm/smiutil.c4
-rw-r--r--src/cpu/x86/smm/smm.ld4
-rw-r--r--src/cpu/x86/smm/smmhandler.S30
-rw-r--r--src/cpu/x86/smm/smmrelocate.S12
-rw-r--r--src/cpu/x86/sse_disable.inc2
-rw-r--r--src/cpu/x86/tsc/delay_tsc.c10
14 files changed, 79 insertions, 79 deletions
diff --git a/src/cpu/x86/16bit/entry16.inc b/src/cpu/x86/16bit/entry16.inc
index 674315fbd2..1eb92c82d1 100644
--- a/src/cpu/x86/16bit/entry16.inc
+++ b/src/cpu/x86/16bit/entry16.inc
@@ -21,8 +21,8 @@ it with the version available from LANL.
*/
-/** Start code to put an i386 or later processor into 32-bit
- * protected mode.
+/** Start code to put an i386 or later processor into 32-bit
+ * protected mode.
*/
/* .section ".rom.text" */
@@ -31,7 +31,7 @@ it with the version available from LANL.
.globl _start
.type _start, @function
-_start:
+_start:
cli
/* Save the BIST result */
movl %eax, %ebp
@@ -68,13 +68,13 @@ _start:
* pratical problem of being able to write code that can
* be relocated.
*
- * An lgdt call before we have memory enabled cannot be
+ * An lgdt call before we have memory enabled cannot be
* position independent, as we cannot execute a call
* instruction to get our current instruction pointer.
* So while this code is relocateable it isn't arbitrarily
* relocatable.
*
- * The criteria for relocation have been relaxed to their
+ * The criteria for relocation have been relaxed to their
* utmost, so that we can use the same code for both
* our initial entry point and startup of the second cpu.
* The code assumes when executing at _start that:
diff --git a/src/cpu/x86/16bit/reset16.lds b/src/cpu/x86/16bit/reset16.lds
index 929740bd4c..cec03d6bc6 100644
--- a/src/cpu/x86/16bit/reset16.lds
+++ b/src/cpu/x86/16bit/reset16.lds
@@ -12,5 +12,5 @@ SECTIONS {
*(.reset)
. = 15 ;
BYTE(0x00);
- }
+ }
}
diff --git a/src/cpu/x86/32bit/entry32.inc b/src/cpu/x86/32bit/entry32.inc
index bc5e4436ae..4e0f3b953a 100644
--- a/src/cpu/x86/32bit/entry32.inc
+++ b/src/cpu/x86/32bit/entry32.inc
@@ -18,23 +18,23 @@ gdtptr:
.word 0
/* selgdt 0x08, flat code segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for limit */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for limit */
/* selgdt 0x10,flat data segment */
- .word 0xffff, 0x0000
+ .word 0xffff, 0x0000
.byte 0x00, 0x93, 0xcf, 0x00
gdt_end:
-
+
/*
- * When we come here we are in protected mode. We expand
+ * When we come here we are in protected mode. We expand
* the stack and copies the data segment from ROM to the
* memory.
*
* After that, we call the chipset bootstrap routine that
- * does what is left of the chipset initialization.
+ * does what is left of the chipset initialization.
*
* NOTE aligned to 4 so that we are sure that the prefetch
* cache will be reloaded.
@@ -45,7 +45,7 @@ protected_start:
lgdt %cs:gdtptr
ljmp $ROM_CODE_SEG, $__protected_start
-
+
__protected_start:
/* Save the BIST value */
movl %eax, %ebp
diff --git a/src/cpu/x86/lapic/lapic.c b/src/cpu/x86/lapic/lapic.c
index 555d74eecc..c8f83b0dd0 100644
--- a/src/cpu/x86/lapic/lapic.c
+++ b/src/cpu/x86/lapic/lapic.c
@@ -5,11 +5,11 @@
void setup_lapic(void)
{
- /* this is so interrupts work. This is very limited scope --
+ /* this is so interrupts work. This is very limited scope --
* linux will do better later, we hope ...
*/
- /* this is the first way we learned to do it. It fails on real SMP
- * stuff. So we have to do things differently ...
+ /* this is the first way we learned to do it. It fails on real SMP
+ * stuff. So we have to do things differently ...
* see the Intel mp1.4 spec, page A-3
*/
@@ -33,25 +33,25 @@ void setup_lapic(void)
lapic_read_around(LAPIC_TASKPRI) & ~LAPIC_TPRI_MASK);
/* Put the local apic in virtual wire mode */
- lapic_write_around(LAPIC_SPIV,
+ lapic_write_around(LAPIC_SPIV,
(lapic_read_around(LAPIC_SPIV) & ~(LAPIC_VECTOR_MASK))
| LAPIC_SPIV_ENABLE);
- lapic_write_around(LAPIC_LVT0,
- (lapic_read_around(LAPIC_LVT0) &
- ~(LAPIC_LVT_MASKED | LAPIC_LVT_LEVEL_TRIGGER |
- LAPIC_LVT_REMOTE_IRR | LAPIC_INPUT_POLARITY |
+ lapic_write_around(LAPIC_LVT0,
+ (lapic_read_around(LAPIC_LVT0) &
+ ~(LAPIC_LVT_MASKED | LAPIC_LVT_LEVEL_TRIGGER |
+ LAPIC_LVT_REMOTE_IRR | LAPIC_INPUT_POLARITY |
LAPIC_SEND_PENDING |LAPIC_LVT_RESERVED_1 |
LAPIC_DELIVERY_MODE_MASK))
- | (LAPIC_LVT_REMOTE_IRR |LAPIC_SEND_PENDING |
+ | (LAPIC_LVT_REMOTE_IRR |LAPIC_SEND_PENDING |
LAPIC_DELIVERY_MODE_EXTINT)
);
- lapic_write_around(LAPIC_LVT1,
- (lapic_read_around(LAPIC_LVT1) &
- ~(LAPIC_LVT_MASKED | LAPIC_LVT_LEVEL_TRIGGER |
- LAPIC_LVT_REMOTE_IRR | LAPIC_INPUT_POLARITY |
+ lapic_write_around(LAPIC_LVT1,
+ (lapic_read_around(LAPIC_LVT1) &
+ ~(LAPIC_LVT_MASKED | LAPIC_LVT_LEVEL_TRIGGER |
+ LAPIC_LVT_REMOTE_IRR | LAPIC_INPUT_POLARITY |
LAPIC_SEND_PENDING |LAPIC_LVT_RESERVED_1 |
LAPIC_DELIVERY_MODE_MASK))
- | (LAPIC_LVT_REMOTE_IRR |LAPIC_SEND_PENDING |
+ | (LAPIC_LVT_REMOTE_IRR |LAPIC_SEND_PENDING |
LAPIC_DELIVERY_MODE_NMI)
);
diff --git a/src/cpu/x86/lapic/secondary.S b/src/cpu/x86/lapic/secondary.S
index dafc9a561c..5c1e7607e8 100644
--- a/src/cpu/x86/lapic/secondary.S
+++ b/src/cpu/x86/lapic/secondary.S
@@ -26,7 +26,7 @@ _secondary_start:
movl %eax, %cr0
ljmpl $0x10, $1f
-1:
+1:
.code32
movw $0x18, %ax
movw %ax, %ds
diff --git a/src/cpu/x86/mtrr/earlymtrr.c b/src/cpu/x86/mtrr/earlymtrr.c
index d97cd93deb..1cbc544350 100644
--- a/src/cpu/x86/mtrr/earlymtrr.c
+++ b/src/cpu/x86/mtrr/earlymtrr.c
@@ -89,13 +89,13 @@ static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
set_var_mtrr(1, REAL_XIP_ROM_BASE, CONFIG_XIP_ROM_SIZE, MTRR_TYPE_WRBACK);
#endif
- /* Set the default memory type and enable fixed and variable MTRRs
+ /* Set the default memory type and enable fixed and variable MTRRs
*/
/* Enable Variable MTRRs */
msr.hi = 0x00000000;
msr.lo = 0x00000800;
wrmsr(MTRRdefType_MSR, msr);
-
+
}
static inline void early_mtrr_init(void)
diff --git a/src/cpu/x86/mtrr/mtrr.c b/src/cpu/x86/mtrr/mtrr.c
index 94d7ca7d35..d44687a0e9 100644
--- a/src/cpu/x86/mtrr/mtrr.c
+++ b/src/cpu/x86/mtrr/mtrr.c
@@ -68,7 +68,7 @@ static void enable_var_mtrr(void)
/* setting variable mtrr, comes from linux kernel source */
static void set_var_mtrr(
- unsigned int reg, unsigned long basek, unsigned long sizek,
+ unsigned int reg, unsigned long basek, unsigned long sizek,
unsigned char type, unsigned address_bits)
{
msr_t base, mask;
@@ -81,7 +81,7 @@ static void set_var_mtrr(
// do this.
if (sizek == 0) {
disable_cache();
-
+
msr_t zero;
zero.lo = zero.hi = 0;
/* The invalid bit is kept in the mask, so we simply clear the
@@ -109,8 +109,8 @@ static void set_var_mtrr(
mask.lo = 0;
}
- // it is recommended that we disable and enable cache when we
- // do this.
+ // it is recommended that we disable and enable cache when we
+ // do this.
disable_cache();
/* Bit 32-35 of MTRRphysMask should be set to 1 */
@@ -228,7 +228,7 @@ static unsigned fixed_mtrr_index(unsigned long addrk)
return index;
}
-static unsigned int range_to_mtrr(unsigned int reg,
+static unsigned int range_to_mtrr(unsigned int reg,
unsigned long range_startk, unsigned long range_sizek,
unsigned long next_range_startk, unsigned char type, unsigned address_bits)
{
@@ -253,7 +253,7 @@ static unsigned int range_to_mtrr(unsigned int reg,
unsigned long sizek;
/* Compute the maximum size I can make a range */
max_align = fls(range_startk);
- align = fms(range_sizek);
+ align = fms(range_sizek);
if (align > max_align) {
align = max_align;
}
@@ -274,7 +274,7 @@ static unsigned int range_to_mtrr(unsigned int reg,
return reg;
}
-static unsigned long resk(uint64_t value)
+static unsigned long resk(uint64_t value)
{
unsigned long resultk;
if (value < (1ULL << 42)) {
@@ -298,7 +298,7 @@ static void set_fixed_mtrr_resource(void *gp, struct device *dev, struct resourc
printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: WB\n",
start_mtrr, last_mtrr);
set_fixed_mtrrs(start_mtrr, last_mtrr, MTRR_TYPE_WRBACK);
-
+
}
#ifndef CONFIG_VAR_MTRR_HOLE
@@ -343,10 +343,10 @@ void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res)
return;
}
#endif
- state->reg = range_to_mtrr(state->reg, state->range_startk,
+ state->reg = range_to_mtrr(state->reg, state->range_startk,
state->range_sizek, basek, MTRR_TYPE_WRBACK, state->address_bits);
#if CONFIG_VAR_MTRR_HOLE
- state->reg = range_to_mtrr(state->reg, state->hole_startk,
+ state->reg = range_to_mtrr(state->reg, state->hole_startk,
state->hole_sizek, basek, MTRR_TYPE_UNCACHEABLE, state->address_bits);
#endif
state->range_startk = 0;
@@ -356,7 +356,7 @@ void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res)
state->hole_sizek = 0;
#endif
}
- /* Allocate an msr */
+ /* Allocate an msr */
printk(BIOS_SPEW, " Allocate an msr - basek = %08lx, sizek = %08lx,\n", basek, sizek);
state->range_startk = basek;
state->range_sizek = sizek;
@@ -365,7 +365,7 @@ void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res)
void x86_setup_fixed_mtrrs(void)
{
/* Try this the simple way of incrementally adding together
- * mtrrs. If this doesn't work out we can get smart again
+ * mtrrs. If this doesn't work out we can get smart again
* and clear out the mtrrs.
*/
@@ -390,20 +390,20 @@ void x86_setup_fixed_mtrrs(void)
void x86_setup_var_mtrrs(unsigned address_bits)
/* this routine needs to know how many address bits a given processor
- * supports. CPUs get grumpy when you set too many bits in
+ * supports. CPUs get grumpy when you set too many bits in
* their mtrr registers :( I would generically call cpuid here
* and find out how many physically supported but some cpus are
* buggy, and report more bits then they actually support.
*/
{
/* Try this the simple way of incrementally adding together
- * mtrrs. If this doesn't work out we can get smart again
+ * mtrrs. If this doesn't work out we can get smart again
* and clear out the mtrrs.
*/
struct var_mtrr_state var_state;
/* Cache as many memory areas as possible */
- /* FIXME is there an algorithm for computing the optimal set of mtrrs?
+ /* FIXME is there an algorithm for computing the optimal set of mtrrs?
* In some cases it is definitely possible to do better.
*/
var_state.range_startk = 0;
@@ -431,7 +431,7 @@ void x86_setup_var_mtrrs(unsigned address_bits)
}
#endif
/* Write the last range */
- var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
+ var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
var_state.range_sizek, 0, MTRR_TYPE_WRBACK, var_state.address_bits);
#if CONFIG_VAR_MTRR_HOLE
var_state.reg = range_to_mtrr(var_state.reg, var_state.hole_startk,
diff --git a/src/cpu/x86/pae/pgtbl.c b/src/cpu/x86/pae/pgtbl.c
index 4440d7bd8c..0bec349444 100644
--- a/src/cpu/x86/pae/pgtbl.c
+++ b/src/cpu/x86/pae/pgtbl.c
@@ -43,7 +43,7 @@ static void paging_on(void *pdp)
);
}
-void *map_2M_page(unsigned long page)
+void *map_2M_page(unsigned long page)
{
struct pde {
uint32_t addr_lo;
@@ -56,7 +56,7 @@ void *map_2M_page(unsigned long page)
#if (CONFIG_RAMTOP>0x100000) && (CONFIG_RAMBASE<0x100000) && ((CONFIG_CONSOLE_VGA==1) || (CONFIG_PCI_ROM_RUN == 1))
/*
- pgtbl is too big, so use last one 1M before CONFIG_LB_MEM_TOP, otherwise for 8 way dual core with vga support will push stack and heap cross 0xa0000,
+ pgtbl is too big, so use last one 1M before CONFIG_LB_MEM_TOP, otherwise for 8 way dual core with vga support will push stack and heap cross 0xa0000,
and that region need to be used as vga font buffer. Please make sure set CONFIG_RAMTOP=0x200000 in MB Config
*/
struct pg_table *pgtbl = (struct pg_table*)0x100000; //1M
diff --git a/src/cpu/x86/smm/smiutil.c b/src/cpu/x86/smm/smiutil.c
index 9a2dfa599e..980ea69f51 100644
--- a/src/cpu/x86/smm/smiutil.c
+++ b/src/cpu/x86/smm/smiutil.c
@@ -72,14 +72,14 @@ static int uart_can_tx_byte(void)
static void uart_wait_to_tx_byte(void)
{
- while(!uart_can_tx_byte())
+ while(!uart_can_tx_byte())
;
}
static void uart_wait_until_sent(void)
{
while(!(inb(CONFIG_TTYS0_BASE + UART_LSR) & 0x40))
- ;
+ ;
}
static void uart_tx_byte(unsigned char data)
diff --git a/src/cpu/x86/smm/smm.ld b/src/cpu/x86/smm/smm.ld
index 1b25c2d2f8..d5c7127a15 100644
--- a/src/cpu/x86/smm/smm.ld
+++ b/src/cpu/x86/smm/smm.ld
@@ -4,7 +4,7 @@ CPUS = 4;
SECTIONS
{
- /* This is the actual SMM handler.
+ /* This is the actual SMM handler.
*
* We just put code, rodata, data and bss all in a row.
*/
@@ -43,7 +43,7 @@ SECTIONS
. = 0xa8000 - (( CPUS - 1) * 0x400);
.jumptable : {
*(.jumptable)
- }
+ }
/DISCARD/ : {
*(.comment)
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S
index b443e5c1fe..3dd0b14c5a 100644
--- a/src/cpu/x86/smm/smmhandler.S
+++ b/src/cpu/x86/smm/smmhandler.S
@@ -38,11 +38,11 @@
* | |
* | |
* +--------------------------------+ 0xa8400
- * | SMM Entry Node 0 (+ stack) |
+ * | SMM Entry Node 0 (+ stack) |
* +--------------------------------+ 0xa8000
- * | SMM Entry Node 1 (+ stack) |
- * | SMM Entry Node 2 (+ stack) |
- * | SMM Entry Node 3 (+ stack) |
+ * | SMM Entry Node 1 (+ stack) |
+ * | SMM Entry Node 2 (+ stack) |
+ * | SMM Entry Node 3 (+ stack) |
* | ... |
* +--------------------------------+ 0xa7400
* | |
@@ -56,7 +56,7 @@
/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
* at which smm_handler_start lives. At the moment the handler
- * lives right at 0xa0000, so the offset is 0.
+ * lives right at 0xa0000, so the offset is 0.
*/
#define SMM_HANDLER_OFFSET 0x0000
@@ -101,15 +101,15 @@ smm_handler_start:
movl $LAPIC_ID, %esi
movl (%esi), %ecx
shr $24, %ecx
-
+
/* calculate stack offset by multiplying the APIC ID
* by 1024 (0x400), and save that offset in ebp.
*/
shl $10, %ecx
movl %ecx, %ebp
- /* We put the stack for each core right above
- * its SMM entry point. Core 0 starts at 0xa8000,
+ /* We put the stack for each core right above
+ * its SMM entry point. Core 0 starts at 0xa8000,
* we spare 0x10 bytes for the jump to be sure.
*/
movl $0xa8010, %eax
@@ -155,11 +155,11 @@ smm_gdt:
.long 0x00000000, 0x00000000
/* gdt selector 0x08, flat code segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
/* gdt selector 0x10, flat data segment */
- .word 0xffff, 0x0000
+ .word 0xffff, 0x0000
.byte 0x00, 0x93, 0xcf, 0x00
smm_gdt_end:
@@ -168,7 +168,7 @@ smm_gdt_end:
.section ".jumptable", "a", @progbits
/* This is the SMM jump table. All cores use the same SMM handler
- * for simplicity. But SMM Entry needs to be different due to the
+ * for simplicity. But SMM Entry needs to be different due to the
* save state area. The jump table makes sure all CPUs jump into the
* real handler on SMM entry.
*/
@@ -185,13 +185,13 @@ smm_gdt_end:
.code16
jumptable:
/* core 3 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
+ ljmp $0xa000, $SMM_HANDLER_OFFSET
.align 1024, 0x00
/* core 2 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
+ ljmp $0xa000, $SMM_HANDLER_OFFSET
.align 1024, 0x00
/* core 1 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
+ ljmp $0xa000, $SMM_HANDLER_OFFSET
.align 1024, 0x00
/* core 0 */
ljmp $0xa000, $SMM_HANDLER_OFFSET
diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S
index 14fdc639bc..50a8f28c3f 100644
--- a/src/cpu/x86/smm/smmrelocate.S
+++ b/src/cpu/x86/smm/smmrelocate.S
@@ -22,7 +22,7 @@
// Make sure no stage 2 code is included:
#define __PRE_RAM__
-// FIXME: Is this piece of code southbridge specific, or
+// FIXME: Is this piece of code southbridge specific, or
// can it be cleaned up so this include is not required?
// It's needed right now because we get our PM_BASE from
// here.
@@ -73,7 +73,7 @@
* 0xa0000-0xa0400 and the stub plus stack would need to go
* at 0xa8000-0xa8100 (example for core 0). That is not enough.
*
- * This means we're basically limited to 16 cpu cores before
+ * This means we're basically limited to 16 cpu cores before
* we need to use the TSEG/HSEG for the actual SMM handler plus stack.
* When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG.
*
@@ -101,7 +101,7 @@ smm_relocation_start:
addr32 mov (%ebx), %al
cmp $0x64, %al
je 1f
-
+
mov $0x38000 + 0x7ef8, %ebx
jmp smm_relocate
1:
@@ -112,8 +112,8 @@ smm_relocate:
movl $LAPIC_ID, %esi
addr32 movl (%esi), %ecx
shr $24, %ecx
-
- /* calculate offset by multiplying the
+
+ /* calculate offset by multiplying the
* apic ID by 1024 (0x400)
*/
movl %ecx, %edx
@@ -158,7 +158,7 @@ smm_relocate:
outb %al, %dx
/* calculate ascii of cpu number. More than 9 cores? -> FIXME */
movb %cl, %al
- addb $'0', %al
+ addb $'0', %al
outb %al, %dx
mov $']', %al
outb %al, %dx
diff --git a/src/cpu/x86/sse_disable.inc b/src/cpu/x86/sse_disable.inc
index a18ea18643..a42cb41259 100644
--- a/src/cpu/x86/sse_disable.inc
+++ b/src/cpu/x86/sse_disable.inc
@@ -2,7 +2,7 @@
* Put the processor back into a reset state
* with respect to the xmm registers.
*/
-
+
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
xorps %xmm2, %xmm2
diff --git a/src/cpu/x86/tsc/delay_tsc.c b/src/cpu/x86/tsc/delay_tsc.c
index 4a8fd5287e..27c89e3a94 100644
--- a/src/cpu/x86/tsc/delay_tsc.c
+++ b/src/cpu/x86/tsc/delay_tsc.c
@@ -10,7 +10,7 @@ static unsigned long clocks_per_usec;
#if (CONFIG_TSC_X86RDTSC_CALIBRATE_WITH_TIMER2 == 1)
#define CLOCK_TICK_RATE 1193180U /* Underlying HZ */
-/* ------ Calibrate the TSC -------
+/* ------ Calibrate the TSC -------
* Too much 64-bit arithmetic here to do this cleanly in C, and for
* accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
* output busy loop as low as possible. We avoid reading the CTC registers
@@ -88,13 +88,13 @@ bad_ctc:
* this is the "no timer2" version.
* to calibrate tsc, we get a TSC reading, then do 1,000,000 outbs to port 0x80
* then we read TSC again, and divide the difference by 1,000,000
- * we have found on a wide range of machines that this gives us a a
+ * we have found on a wide range of machines that this gives us a a
* good microsecond value
* to +- 10%. On a dual AMD 1.6 Ghz box, it gives us .97 microseconds, and on a
* 267 Mhz. p5, it gives us 1.1 microseconds.
* also, since gcc now supports long long, we use that.
* also no unsigned long long / operator, so we play games.
- * about the only thing you can do with long longs, it seems,
+ * about the only thing you can do with long longs, it seems,
*is return them and assign them.
* (and do asm on them, yuck)
* so avoid all ops on long longs.
@@ -103,7 +103,7 @@ static unsigned long long calibrate_tsc(void)
{
unsigned long long start, end, delta;
unsigned long result, count;
-
+
printk(BIOS_SPEW, "Calibrating delay loop...\n");
start = rdtscll();
// no udivdi3 because we don't like libgcc. (only in x86emu)
@@ -130,7 +130,7 @@ static unsigned long long calibrate_tsc(void)
result = delta;
printk(BIOS_SPEW, "end %llx, start %llx\n", end, start);
printk(BIOS_SPEW, "32-bit delta %ld\n", (unsigned long) delta);
-
+
printk(BIOS_SPEW, "%s 32-bit result is %ld\n",
__func__,
result);