[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <200903152128.28663.pcacjr@gmail.com>
Date: Sun, 15 Mar 2009 21:28:28 -0300
From: Paulo Cezar A Junior <pcacjr@...il.com>
To: linux-kernel@...r.kernel.org, pcacjr@...il.com
Subject: [PATCH] mtrr: cleanup and set conding style
I mean there are lots of things to clean up in MTRR code, considering to its coding style as well.
Signed-off-by: Paulo Cezar A Junior <pcacjr@...il.com>
---
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 915ef13..0520d68 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -68,10 +68,11 @@ void set_mtrr_ops(struct mtrr_ops *ops)
}
/* Returns non-zero if we have the write-combining memory type */
-static int have_wrcomb(void)
+static int have_wrcomb()
{
struct pci_dev *dev;
u8 rev;
+ int ret;
if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
/* ServerWorks LE chipsets < rev 6 have problems with write-combining
@@ -94,12 +95,19 @@ static int have_wrcomb(void)
return 0;
}
pci_dev_put(dev);
- }
- return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
+ }
+
+ ret = mttr_if->have_wrcomb;
+ if (!ret)
+ return 0;
+
+ mttr_if->have_wrcomb();
+
+ return ret;
}
/* This function returns the number of variable MTRRs */
-static void __init set_num_var_ranges(void)
+static void __init set_num_var_ranges()
{
unsigned long config = 0, dummy;
@@ -112,7 +120,7 @@ static void __init set_num_var_ranges(void)
num_var_ranges = config & 0xff;
}
-static void __init init_table(void)
+static void __init init_table()
{
int i, max;
@@ -208,6 +216,7 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
* becomes nops.
*/
+
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
{
@@ -221,7 +230,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
atomic_set(&data.count, num_booting_cpus() - 1);
/* make sure data.count is visible before unleashing other CPUs */
smp_wmb();
- atomic_set(&data.gate,0);
+ atomic_set(&data.gate, 0);
/* Start the ball rolling on other CPUs */
if (smp_call_function(ipi_handler, &data, 0) != 0)
@@ -254,7 +263,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
atomic_set(&data.count, num_booting_cpus() - 1);
smp_wmb();
- atomic_set(&data.gate,0);
+ atomic_set(&data.gate, 0);
/*
* Wait here for everyone to have seen the gate change
@@ -312,7 +321,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
if (!mtrr_if)
return -ENXIO;
- if ((error = mtrr_if->validate_add_page(base,size,type)))
+ if ((error = mtrr_if->validate_add_page(base, size, type)))
return error;
if (type >= MTRR_NUM_TYPES) {
@@ -457,6 +466,7 @@ int mtrr_add(unsigned long base, unsigned long size,
{
if (mtrr_check(base, size))
return -EINVAL;
+
return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
increment);
}
@@ -475,7 +485,6 @@ int mtrr_add(unsigned long base, unsigned long size,
* On success the register is returned, on failure a negative error
* code.
*/
-
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
int i, max;
@@ -505,7 +514,6 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
goto out;
}
}
-
if (reg >= max) {
printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
goto out;
@@ -516,12 +524,10 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
goto out;
}
-
if (mtrr_usage_table[reg] < 1) {
printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
goto out;
}
-
if (--mtrr_usage_table[reg] < 1)
set_mtrr(reg, 0, 0, 0);
@@ -531,6 +537,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
put_online_cpus();
return error;
}
+
/**
* mtrr_del - delete a memory type region
* @reg: Register returned by mtrr_add
@@ -545,7 +552,6 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
* On success the register is returned, on failure a negative error
* code.
*/
-
int mtrr_del(int reg, unsigned long base, unsigned long size)
{
if (mtrr_check(base, size))
@@ -562,7 +568,7 @@ EXPORT_SYMBOL(mtrr_del);
* stuff is done...
*/
-static void __init init_ifs(void)
+static void __init init_ifs()
{
#ifndef CONFIG_X86_64
amd_init_mtrr();
@@ -583,7 +589,7 @@ struct mtrr_value {
static struct mtrr_value mtrr_state[MTRR_MAX_VAR_RANGES];
-static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
+static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
{
int i;
@@ -596,7 +602,7 @@ static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
return 0;
}
-static int mtrr_restore(struct sys_device * sysdev)
+static int mtrr_restore(struct sys_device *sysdev)
{
int i;
@@ -610,8 +616,6 @@ static int mtrr_restore(struct sys_device * sysdev)
return 0;
}
-
-
static struct sysdev_driver mtrr_sysdev_driver = {
.suspend = mtrr_save,
.resume = mtrr_restore,
@@ -642,8 +646,8 @@ add_range(struct res_range *range, int nr_range, unsigned long start,
}
static int __init
-add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
- unsigned long end)
+add_range_with_merge(struct res_range *range, int nr_range,
+ unsigned long start, unsigned long end)
{
int i;
@@ -673,7 +677,8 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
}
static void __init
-subtract_range(struct res_range *range, unsigned long start, unsigned long end)
+subtract_range(struct res_range *range, unsigned long start,
+ unsigned long end)
{
int i, j;
@@ -880,10 +885,10 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
base |= type;
mask |= 0x800;
- base_lo = base & ((1ULL<<32) - 1);
+ base_lo = base & ((1ULL << 32) - 1);
base_hi = base >> 32;
- mask_lo = mask & ((1ULL<<32) - 1);
+ mask_lo = mask & ((1ULL << 32) - 1);
mask_hi = mask >> 32;
fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
@@ -919,10 +924,10 @@ static unsigned long to_size_factor(unsigned long sizek, char *factorp)
char factor;
unsigned long base = sizek;
- if (base & ((1<<10) - 1))
+ if (base & ((1 << 10) - 1))
/* not MB alignment */
factor = 'K';
- else if (base & ((1<<20) - 1)) {
+ else if (base & ((1 << 20) - 1)) {
factor = 'M';
base >>= 10;
} else {
@@ -939,13 +944,14 @@ static unsigned int __init
range_to_mtrr(unsigned int reg, unsigned long range_startk,
unsigned long range_sizek, unsigned char type)
{
+ char start_factor = 'K', size_factor = 'K';
+ unsigned long start_base, size_base, sizek;
+ unsigned long max_align, align;
+
if (!range_sizek || (reg >= num_var_ranges))
return reg;
while (range_sizek) {
- unsigned long max_align, align;
- unsigned long sizek;
-
/* Compute the maximum size I can make a range */
if (range_startk)
max_align = ffs(range_startk) - 1;
@@ -958,9 +964,6 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
sizek = 1 << align;
if (debug_print) {
- char start_factor = 'K', size_factor = 'K';
- unsigned long start_base, size_base;
-
start_base = to_size_factor(range_startk, &start_factor),
size_base = to_size_factor(sizek, &size_factor),
@@ -982,16 +985,14 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
return reg;
}
-static unsigned long __init
+static unsigned __init
range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
unsigned long sizek)
{
unsigned long hole_basek, hole_sizek;
unsigned long second_basek, second_sizek;
- unsigned long range0_basek, range0_sizek;
- unsigned long range_basek, range_sizek;
- unsigned long chunk_sizek;
- unsigned long gran_sizek;
+ unsigned long range_sizek, range0_basek, range0_sizek;
+ unsigned long range_basek, chunk_sizek, gran_sizek;
hole_basek = 0;
hole_sizek = 0;
@@ -1023,8 +1024,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
if (range0_sizek == state->range_sizek) {
if (debug_print)
printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + state->range_sizek)<<10);
+ range0_basek << 10,
+ (range0_basek + state->range_sizek) << 10);
state->reg = range_to_mtrr(state->reg, range0_basek,
state->range_sizek, MTRR_TYPE_WRBACK);
@@ -1052,7 +1053,6 @@ second_try:
second_sizek = range_basek - basek;
if (range0_sizek > state->range_sizek) {
-
/* one hole in middle or at end */
hole_sizek = range0_sizek - state->range_sizek - second_sizek;
@@ -1066,36 +1066,33 @@ second_try:
goto second_try;
}
}
-
if (range0_sizek) {
if (debug_print)
printk(KERN_DEBUG "range0: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + range0_sizek)<<10);
+ range0_basek << 10,
+ (range0_basek + range0_sizek) << 10);
state->reg = range_to_mtrr(state->reg, range0_basek,
range0_sizek, MTRR_TYPE_WRBACK);
}
-
if (range0_sizek < state->range_sizek) {
/* need to handle left over */
range_sizek = state->range_sizek - range0_sizek;
if (debug_print)
printk(KERN_DEBUG "range: %016lx - %016lx\n",
- range_basek<<10,
- (range_basek + range_sizek)<<10);
+ range_basek << 10,
+ (range_basek + range_sizek) << 10);
state->reg = range_to_mtrr(state->reg, range_basek,
range_sizek, MTRR_TYPE_WRBACK);
}
-
if (hole_sizek) {
hole_basek = range_basek - hole_sizek - second_sizek;
if (debug_print)
printk(KERN_DEBUG "hole: %016lx - %016lx\n",
- hole_basek<<10,
- (hole_basek + hole_sizek)<<10);
+ hole_basek << 10,
+ (hole_basek + hole_sizek) << 10);
state->reg = range_to_mtrr(state->reg, hole_basek,
hole_sizek, MTRR_TYPE_UNCACHABLE);
@@ -1133,7 +1130,7 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
}
/* mininum size of mtrr block that can take hole */
-static u64 mtrr_chunk_size __initdata = (256ULL<<20);
+static u64 mtrr_chunk_size __initdata = (256ULL << 20);
static int __init parse_mtrr_chunk_size_opt(char *p)
{
@@ -1224,7 +1221,7 @@ struct mtrr_cleanup_result {
static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
static unsigned long __initdata min_loss_pfn[RANGE_NUM];
-static void __init print_out_mtrr_range_state(void)
+static void __init print_out_mtrr_range_state()
{
int i;
char start_factor = 'K', size_factor = 'K';
@@ -1232,7 +1229,6 @@ static void __init print_out_mtrr_range_state(void)
mtrr_type type;
for (i = 0; i < num_var_ranges; i++) {
-
size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
if (!size_base)
continue;
@@ -1252,7 +1248,7 @@ static void __init print_out_mtrr_range_state(void)
}
}
-static int __init mtrr_need_cleanup(void)
+static int __init mtrr_need_cleanup()
{
int i;
mtrr_type type;
@@ -1348,7 +1344,7 @@ static void __init mtrr_print_out_one_result(int i)
lose_base, lose_factor);
}
-static int __init mtrr_search_optimal_index(void)
+static int __init mtrr_search_optimal_index()
{
int i;
int num_reg_good;
@@ -1425,7 +1421,7 @@ static int __init mtrr_cleanup(unsigned address_bits)
* and fixed mtrrs should take effective before var mtrr for it
*/
nr_range = add_range_with_merge(range, nr_range, 0,
- (1ULL<<(20 - PAGE_SHIFT)) - 1);
+ (1ULL << (20 - PAGE_SHIFT)) - 1);
/* sort the ranges */
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
@@ -1451,9 +1447,9 @@ static int __init mtrr_cleanup(unsigned address_bits)
i = 0;
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
memset(result, 0, sizeof(result));
- for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
-
- for (chunk_size = gran_size; chunk_size < (1ULL<<32);
+ for (gran_size = (1ULL << 16); gran_size < (1ULL << 32);
+ gran_size <<= 1) {
+ for (chunk_size = gran_size; chunk_size < (1ULL << 32);
chunk_size <<= 1) {
if (i >= NUM_RESULT)
@@ -1468,7 +1464,6 @@ static int __init mtrr_cleanup(unsigned address_bits)
i++;
}
}
-
/* try to find the optimal index */
index_good = mtrr_search_optimal_index();
@@ -1506,7 +1501,6 @@ static int __init mtrr_cleanup(unsigned address_bits)
#endif
static int __initdata changed_by_mtrr_cleanup;
-
static int disable_mtrr_trim;
static int __init disable_mtrr_trim_setup(char *str)
@@ -1523,20 +1517,24 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
* Note this won't check if the MTRRs < 4GB where the magic bit doesn't
* apply to are wrong, but so far we don't know of any such case in the wild.
*/
+
#define Tom2Enabled (1U << 21)
#define Tom2ForceMemTypeWB (1U << 22)
-int __init amd_special_default_mtrr(void)
+int __init amd_special_default_mtrr()
{
u32 l, h;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0;
+
if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
return 0;
+
/* In case some hypervisor doesn't pass SYSCFG through */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
return 0;
+
/*
* Memory between 4GB and top of mem is forced WB by this magic bit.
* Reserved before K8RevF, but should be zero there.
@@ -1561,6 +1559,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn,
return e820_update_range(trim_start, trim_size, E820_RAM,
E820_RESERVED);
}
+
/**
* mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
* @end_pfn: ending page frame number
@@ -1572,6 +1571,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn,
* memory off the end by adjusting end_pfn, removing it from the kernel's
* allocation pools, warning the user with an obnoxious message.
*/
+
int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{
unsigned long i, base, size, highest_pfn = 0, def, dummy;
@@ -1606,6 +1606,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
type = range_state[i].type;
if (type != MTRR_TYPE_WRBACK)
continue;
+
base = range_state[i].base_pfn;
size = range_state[i].size_pfn;
if (highest_pfn < base + size)
@@ -1624,6 +1625,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
type = range_state[i].type;
if (type >= MTRR_NUM_TYPES)
continue;
+
size = range_state[i].size_pfn;
if (!size)
type = MTRR_NUM_TYPES;
@@ -1642,10 +1644,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
memset(range, 0, sizeof(range));
nr_range = 0;
if (mtrr_tom2) {
- range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
+ range[nr_range].start = (1ULL << (32 - PAGE_SHIFT));
range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
if (highest_pfn < range[nr_range].end + 1)
highest_pfn = range[nr_range].end + 1;
+
nr_range++;
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
@@ -1656,9 +1659,9 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
total_trim_size += real_trim_memory(0, range[0].start);
/* check the holes */
for (i = 0; i < nr_range - 1; i++) {
- if (range[i].end + 1 < range[i+1].start)
+ if (range[i].end + 1 < range[i + 1].start)
total_trim_size += real_trim_memory(range[i].end + 1,
- range[i+1].start);
+ range[i + 1].start);
}
/* check the top */
i = nr_range - 1;
@@ -1689,7 +1692,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
* initialized (i.e. before smp_init()).
*
*/
-void __init mtrr_bp_init(void)
+void __init mtrr_bp_init()
{
u32 phys_addr;
init_ifs();
@@ -1727,30 +1730,33 @@ void __init mtrr_bp_init(void)
}
} else {
switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_AMD:
- if (cpu_has_k6_mtrr) {
+ case X86_VENDOR_AMD:
+ if (cpu_has_k6_mtrr) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
- mtrr_if = mtrr_ops[X86_VENDOR_AMD];
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
- break;
- case X86_VENDOR_CENTAUR:
- if (cpu_has_centaur_mcr) {
- mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
- break;
- case X86_VENDOR_CYRIX:
- if (cpu_has_cyrix_arr) {
- mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
- break;
- default:
- break;
+ mtrr_if = mtrr_ops[X86_VENDOR_AMD];
+ size_or_mask = 0xfff00000; /* 32 bits */
+ size_and_mask = 0;
+ }
+ break;
+
+ case X86_VENDOR_CENTAUR:
+ if (cpu_has_centaur_mcr) {
+ mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
+ size_or_mask = 0xfff00000; /* 32 bits */
+ size_and_mask = 0;
+ }
+ break;
+
+ case X86_VENDOR_CYRIX:
+ if (cpu_has_cyrix_arr) {
+ mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
+ size_or_mask = 0xfff00000; /* 32 bits */
+ size_and_mask = 0;
+ }
+ break;
+
+ default:
+ break;
}
}
@@ -1769,7 +1775,7 @@ void __init mtrr_bp_init(void)
}
}
-void mtrr_ap_init(void)
+void mtrr_ap_init()
{
unsigned long flags;
@@ -1793,18 +1799,18 @@ void mtrr_ap_init(void)
/**
* Save current fixed-range MTRR state of the BSP
*/
-void mtrr_save_state(void)
+void mtrr_save_state()
{
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
}
-static int __init mtrr_init_finialize(void)
+static int __init mtrr_init_finialize()
{
if (!mtrr_if)
return 0;
if (use_intel()) {
if (!changed_by_mtrr_cleanup)
- mtrr_state_warn();
+ mtrr_state_warn();
} else {
/* The CPUs haven't MTRR and seem to not support SMP. They have
* specific drivers, we use a tricky method to support
Download attachment "forwarded message" of type "message/rfc822" (323623 bytes)
Powered by blists - more mailing lists