[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20081113185620.GB3765@kroah.com>
Date: Thu, 13 Nov 2008 10:56:20 -0800
From: Greg KH <gregkh@...e.de>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
torvalds@...ux-foundation.org, stable@...nel.org
Subject: Re: Linux 2.6.27.6
diff --git a/Documentation/cciss.txt b/Documentation/cciss.txt
index 8244c64..48d80d9 100644
--- a/Documentation/cciss.txt
+++ b/Documentation/cciss.txt
@@ -26,6 +26,8 @@ This driver is known to work with the following cards:
* SA P410i
* SA P411
* SA P812
+ * SA P712m
+ * SA P711m
Detecting drive failures:
-------------------------
diff --git a/Makefile b/Makefile
index 4ea7b3c..1ea4453 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 27
-EXTRAVERSION = .5
+EXTRAVERSION = .6
NAME = Trembling Tortoise
# *DOCUMENTATION*
diff --git a/arch/arm/mach-pxa/include/mach/reset.h b/arch/arm/mach-pxa/include/mach/reset.h
index 9489a48..7b8842c 100644
--- a/arch/arm/mach-pxa/include/mach/reset.h
+++ b/arch/arm/mach-pxa/include/mach/reset.h
@@ -10,9 +10,12 @@
extern unsigned int reset_status;
extern void clear_reset_status(unsigned int mask);
-/*
- * register GPIO as reset generator
+/**
+ * init_gpio_reset() - register GPIO as reset generator
+ *
+ * @gpio - gpio nr
+ * @output - set gpio as out/low instead of input during normal work
*/
-extern int init_gpio_reset(int gpio);
+extern int init_gpio_reset(int gpio, int output);
#endif /* __ASM_ARCH_RESET_H */
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 9996c61..1b236a6 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -20,7 +20,7 @@ static void do_hw_reset(void);
static int reset_gpio = -1;
-int init_gpio_reset(int gpio)
+int init_gpio_reset(int gpio, int output)
{
int rc;
@@ -30,9 +30,12 @@ int init_gpio_reset(int gpio)
goto out;
}
- rc = gpio_direction_input(gpio);
+ if (output)
+ rc = gpio_direction_output(gpio, 0);
+ else
+ rc = gpio_direction_input(gpio);
if (rc) {
- printk(KERN_ERR "Can't configure reset_gpio for input\n");
+ printk(KERN_ERR "Can't configure reset_gpio\n");
gpio_free(gpio);
goto out;
}
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index b569f3b..32cee4c 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -548,7 +548,7 @@ static void spitz_restart(char mode)
static void __init common_init(void)
{
- init_gpio_reset(SPITZ_GPIO_ON_RESET);
+ init_gpio_reset(SPITZ_GPIO_ON_RESET, 1);
pm_power_off = spitz_poweroff;
arm_pm_restart = spitz_restart;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 9f3ef9e..130e37e 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -781,7 +781,7 @@ static void __init tosa_init(void)
gpio_set_wake(MFP_PIN_GPIO1, 1);
/* We can't pass to gpio-keys since it will drop the Reset altfunc */
- init_gpio_reset(TOSA_GPIO_ON_RESET);
+ init_gpio_reset(TOSA_GPIO_ON_RESET, 0);
pm_power_off = tosa_poweroff;
arm_pm_restart = tosa_restart;
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 158bd96..99ec030 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -97,7 +97,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
/*
* Clean and invalidate partial last cache line.
*/
- if (end & (CACHE_LINE_SIZE - 1)) {
+ if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
end &= ~(CACHE_LINE_SIZE - 1);
@@ -106,7 +106,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
- while (start != end) {
+ while (start < end) {
xsc3_l2_inv_pa(start);
start += CACHE_LINE_SIZE;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ed92864..552d2b7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1059,6 +1059,26 @@ config HIGHPTE
low memory. Setting this option will put user-space page table
entries in high memory.
+config X86_RESERVE_LOW_64K
+ bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
+ default y
+ help
+ Reserve the first 64K of physical RAM on BIOSes that are known
+ to potentially corrupt that memory range. A numbers of BIOSes are
+ known to utilize this area during suspend/resume, so it must not
+ be used by the kernel.
+
+ Set this to N if you are absolutely sure that you trust the BIOS
+ to get all its memory reservations and usages right.
+
+ If you have doubts about the BIOS (e.g. suspend/resume does not
+ work or there's kernel crashes after certain hardware hotplug
+ events) and it's not AMI or Phoenix, then you might want to enable
+ X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
+ corruption patterns.
+
+ Say Y if unsure.
+
config MATH_EMULATION
bool
prompt "Math emulation" if X86_32
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9838f25..64b5c42 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -578,6 +578,39 @@ static struct x86_quirks default_x86_quirks __initdata;
struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
+static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE
+ "%s detected: BIOS may corrupt low RAM, working it around.\n",
+ d->ident);
+
+ e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+
+ return 0;
+}
+
+/* List of systems that have known low memory corruption BIOS problems */
+static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
+#ifdef CONFIG_X86_RESERVE_LOW_64K
+ {
+ .callback = dmi_low_memory_corruption,
+ .ident = "AMI BIOS",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ },
+ {
+ .callback = dmi_low_memory_corruption,
+ .ident = "Phoenix BIOS",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+ },
+ },
+#endif
+ {}
+};
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -699,6 +732,10 @@ void __init setup_arch(char **cmdline_p)
finish_e820_parsing();
+ dmi_scan_machine();
+
+ dmi_check_system(bad_bios_dmi_table);
+
#ifdef CONFIG_X86_32
probe_roms();
#endif
@@ -781,8 +818,6 @@ void __init setup_arch(char **cmdline_p)
vsmp_init();
#endif
- dmi_scan_machine();
-
io_delay_init();
/*
@@ -885,3 +920,5 @@ void __init setup_arch(char **cmdline_p)
#endif
#endif
}
+
+
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8f98e9d..de850e9 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -639,10 +639,6 @@ void __init tsc_init(void)
cpu_khz = calibrate_cpu();
#endif
- lpj = ((u64)tsc_khz * 1000);
- do_div(lpj, HZ);
- lpj_fine = lpj;
-
printk("Detected %lu.%03lu MHz processor.\n",
(unsigned long)cpu_khz / 1000,
(unsigned long)cpu_khz % 1000);
@@ -662,6 +658,10 @@ void __init tsc_init(void)
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
+ lpj = ((u64)tsc_khz * 1000);
+ do_div(lpj, HZ);
+ lpj_fine = lpj;
+
use_tsc_delay();
/* Check and install the TSC clocksource */
dmi_check_system(bad_tsc_dmi_table);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 7d2edf1..25d2161 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -604,14 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(acpi_handle handle, u32 event, void *data)
{
struct dock_station *ds = data;
+ struct acpi_device *tmp;
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
- if (!dock_in_progress(ds) && dock_present(ds)) {
+ if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
+ &tmp)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to dock!\n");
+ complete_dock(ds);
break;
}
atomic_notifier_call_chain(&dock_notifier_list,
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c1db2f2..2c4ccec 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -604,9 +604,6 @@ void ata_scsi_error(struct Scsi_Host *host)
if (ata_ncq_enabled(dev))
ehc->saved_ncq_enabled |= 1 << devno;
}
-
- /* set last reset timestamp to some time in the past */
- ehc->last_reset = jiffies - 60 * HZ;
}
ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
@@ -2209,17 +2206,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
if (link->flags & ATA_LFLAG_NO_SRST)
softreset = NULL;
- now = jiffies;
- deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN);
- if (time_before(now, deadline))
- schedule_timeout_uninterruptible(deadline - now);
+ /* make sure each reset attemp is at least COOL_DOWN apart */
+ if (ehc->i.flags & ATA_EHI_DID_RESET) {
+ now = jiffies;
+ WARN_ON(time_after(ehc->last_reset, now));
+ deadline = ata_deadline(ehc->last_reset,
+ ATA_EH_RESET_COOL_DOWN);
+ if (time_before(now, deadline))
+ schedule_timeout_uninterruptible(deadline - now);
+ }
spin_lock_irqsave(ap->lock, flags);
ap->pflags |= ATA_PFLAG_RESETTING;
spin_unlock_irqrestore(ap->lock, flags);
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
- ehc->last_reset = jiffies;
ata_link_for_each_dev(dev, link) {
/* If we issue an SRST then an ATA drive (not ATAPI)
@@ -2285,7 +2286,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
/*
* Perform reset
*/
- ehc->last_reset = jiffies;
if (ata_is_host_link(link))
ata_eh_freeze_port(ap);
@@ -2297,6 +2297,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
reset == softreset ? "soft" : "hard");
/* mark that this EH session started with reset */
+ ehc->last_reset = jiffies;
if (reset == hardreset)
ehc->i.flags |= ATA_EHI_DID_HARDRESET;
else
@@ -2404,7 +2405,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* reset successful, schedule revalidation */
ata_eh_done(link, NULL, ATA_EH_RESET);
- ehc->last_reset = jiffies;
+ ehc->last_reset = jiffies; /* update to completion time */
ehc->i.action |= ATA_EH_REVALIDATE;
rc = 0;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b73116e..2ac91b8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -96,6 +96,8 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -133,6 +135,8 @@ static struct board_type products[] = {
{0x3245103C, "Smart Array P410i", &SA5_access},
{0x3247103C, "Smart Array P411", &SA5_access},
{0x3249103C, "Smart Array P812", &SA5_access},
+ {0x324A103C, "Smart Array P712m", &SA5_access},
+ {0x324B103C, "Smart Array P711m", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -1365,6 +1369,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->first_minor = drv_index << NWD_SHIFT;
disk->fops = &cciss_fops;
disk->private_data = &h->drv[drv_index];
+ disk->driverfs_dev = &h->pdev->dev;
/* Set up queue information */
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -3403,7 +3408,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
int i;
int j = 0;
int rc;
- int dac;
+ int dac, return_code;
+ InquiryData_struct *inq_buff = NULL;
i = alloc_cciss_hba();
if (i < 0)
@@ -3509,6 +3515,25 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
/* Turn the interrupts on so we can service requests */
hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
+ /* Get the firmware version */
+ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ if (inq_buff == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ goto clean4;
+ }
+
+ return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
+ sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
+ if (return_code == IO_OK) {
+ hba[i]->firm_ver[0] = inq_buff->data_byte[32];
+ hba[i]->firm_ver[1] = inq_buff->data_byte[33];
+ hba[i]->firm_ver[2] = inq_buff->data_byte[34];
+ hba[i]->firm_ver[3] = inq_buff->data_byte[35];
+ } else { /* send command failed */
+ printk(KERN_WARNING "cciss: unable to determine firmware"
+ " version of controller\n");
+ }
+
cciss_procinit(i);
hba[i]->cciss_max_sectors = 2048;
@@ -3519,6 +3544,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
return 1;
clean4:
+ kfree(inq_buff);
#ifdef CONFIG_CISS_SCSI_TAPE
kfree(hba[i]->scsi_rejects.complete);
#endif
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 09c1434..f5d2e54 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -567,7 +567,12 @@ static int __init cpqarray_init(void)
num_cntlrs_reg++;
}
- return(num_cntlrs_reg);
+ if (num_cntlrs_reg)
+ return 0;
+ else {
+ pci_unregister_driver(&cpqarray_pci_driver);
+ return -ENODEV;
+ }
}
/* Function to find the first free pointer into our hba[] array */
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index ec249d2..d883e1b 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
dca_sysfs_exit();
}
-module_init(dca_init);
+subsys_initcall(dca_init);
module_exit(dca_exit);
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index bc8c6e3..3f4db54 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -519,7 +519,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
}
hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- if (new->async_tx.callback) {
+ if (first->async_tx.callback) {
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
if (first != new) {
/* move callback into to last desc */
@@ -611,7 +611,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
}
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- if (new->async_tx.callback) {
+ if (first->async_tx.callback) {
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
if (first != new) {
/* move callback into to last desc */
@@ -801,6 +801,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
struct ioat_desc_sw *desc, *_desc;
int in_use_descs = 0;
+ /* Before freeing channel resources first check
+ * if they have been previously allocated for this channel.
+ */
+ if (ioat_chan->desccount == 0)
+ return;
+
tasklet_disable(&ioat_chan->cleanup_task);
ioat_dma_memcpy_cleanup(ioat_chan);
@@ -863,6 +869,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
ioat_chan->pending = 0;
ioat_chan->dmacount = 0;
+ ioat_chan->desccount = 0;
ioat_chan->watchdog_completion = 0;
ioat_chan->last_compl_desc_addr_hw = 0;
ioat_chan->watchdog_tcp_cookie =
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index e763d72..9f6fe46 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
int nr_iovecs = 0;
int iovec_len_used = 0;
int iovec_pages_used = 0;
- long err;
/* don't pin down non-user-based iovecs */
if (segment_eq(get_fs(), KERNEL_DS))
@@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
local_list = kmalloc(sizeof(*local_list)
+ (nr_iovecs * sizeof (struct dma_page_list))
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
- if (!local_list) {
- err = -ENOMEM;
+ if (!local_list)
goto out;
- }
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
+ local_list->nr_iovecs = 0;
+
for (i = 0; i < nr_iovecs; i++) {
struct dma_page_list *page_list = &local_list->page_list[i];
len -= iov[i].iov_len;
- if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) {
- err = -EFAULT;
+ if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
goto unpin;
- }
page_list->nr_pages = num_pages_spanned(&iov[i]);
page_list->base_address = iov[i].iov_base;
@@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
NULL);
up_read(¤t->mm->mmap_sem);
- if (ret != page_list->nr_pages) {
- err = -ENOMEM;
+ if (ret != page_list->nr_pages)
goto unpin;
- }
local_list->nr_iovecs = i + 1;
}
@@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
unpin:
dma_unpin_iovec_pages(local_list);
out:
- return ERR_PTR(err);
+ return NULL;
}
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c40f040..8c030d9 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -113,7 +113,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
if (!dev->hid_output_raw_report)
return -ENODEV;
- if (count > HID_MIN_BUFFER_SIZE) {
+ if (count > HID_MAX_BUFFER_SIZE) {
printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
task_pid_nr(current));
return -EINVAL;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b1eebf8..a58a19e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -157,6 +157,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
min_spacing = conf->array_sectors / 2;
sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
+ if (min_spacing == 0)
+ min_spacing = 1;
/* min_spacing is the minimum spacing that will fit the hash
* table in one PAGE. This may be much smaller than needed.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e34cd0e..941576d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1132,7 +1132,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
if (!enough(conf))
return -EINVAL;
- if (rdev->raid_disk)
+ if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= 0 &&
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 044d84e..f7284b9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -280,7 +280,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
(card->host->ios.clock / 1000);
if (data->flags & MMC_DATA_WRITE)
- limit_us = 250000;
+ /*
+ * The limit is really 250 ms, but that is
+ * insufficient for some crappy cards.
+ */
+ limit_us = 300000;
else
limit_us = 100000;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a972cc6..9e7a236 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -362,19 +362,6 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/* Set the default CFI lock/unlock addresses */
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2aa;
- /* Modify the unlock address if we are in compatibility mode */
- if ( /* x16 in x8 mode */
- ((cfi->device_type == CFI_DEVICETYPE_X8) &&
- (cfi->cfiq->InterfaceDesc ==
- CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
- /* x32 in x16 mode */
- ((cfi->device_type == CFI_DEVICETYPE_X16) &&
- (cfi->cfiq->InterfaceDesc ==
- CFI_INTERFACE_X16_BY_X32_ASYNC)))
- {
- cfi->addr_unlock1 = 0xaaa;
- cfi->addr_unlock2 = 0x555;
- }
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index f84ab61..2f3f2f7 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1808,9 +1808,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
* several first banks can contain 0x7f instead of actual ID
*/
do {
- uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8),
- cfi_interleave(cfi),
- cfi->device_type);
+ uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
result = map_read(map, base + ofs);
bank++;
@@ -1824,7 +1822,7 @@ static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
{
map_word result;
unsigned long mask;
- u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type);
+ u32 ofs = cfi_build_cmd_addr(1, map, cfi);
mask = (1 << (cfi->device_type * 8)) -1;
result = map_read(map, base + ofs);
return result.x[0] & mask;
@@ -2067,8 +2065,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
}
/* Ensure the unlock addresses we try stay inside the map */
- probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
- probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
+ probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
+ probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
((base + probe_offset2 + map_bankwidth(map)) >= map->size))
goto retry;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0f6f974..39c17bb 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -370,8 +370,9 @@ struct ring_info {
};
enum features {
- RTL_FEATURE_WOL = (1 << 0),
- RTL_FEATURE_MSI = (1 << 1),
+ RTL_FEATURE_WOL = (1 << 0),
+ RTL_FEATURE_MSI = (1 << 1),
+ RTL_FEATURE_GMII = (1 << 2),
};
struct rtl8169_private {
@@ -406,13 +407,15 @@ struct rtl8169_private {
struct vlan_group *vlgrp;
#endif
int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
- void (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
void (*phy_reset_enable)(void __iomem *);
void (*hw_start)(struct net_device *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct delayed_work task;
unsigned features;
+
+ struct mii_if_info mii;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@...r.kernel.org>");
@@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
return value;
}
+static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
+ int val)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ mdio_write(ioaddr, location, val);
+}
+
+static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return mdio_read(ioaddr, location);
+}
+
static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
{
RTL_W16(IntrMask, 0x0000);
@@ -720,9 +740,13 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_12) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
- /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
+ (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
+ /*
+ * Wake up the PHY.
+ * Vendor specific (0x1f) and reserved (0x0e) MII registers.
+ */
mdio_write(ioaddr, 0x1f, 0x0000);
mdio_write(ioaddr, 0x0e, 0x0000);
}
@@ -850,7 +874,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
#endif
-static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -867,65 +891,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = SPEED_1000;
cmd->duplex = DUPLEX_FULL; /* Always set */
+
+ return 0;
}
-static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- u8 status;
-
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP;
-
- cmd->autoneg = 1;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
-
- if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
- cmd->advertising |= ADVERTISED_1000baseT_Full;
-
- status = RTL_R8(PHYstatus);
-
- if (status & _1000bpsF)
- cmd->speed = SPEED_1000;
- else if (status & _100bps)
- cmd->speed = SPEED_100;
- else if (status & _10bps)
- cmd->speed = SPEED_10;
-
- if (status & TxFlowCtrl)
- cmd->advertising |= ADVERTISED_Asym_Pause;
- if (status & RxFlowCtrl)
- cmd->advertising |= ADVERTISED_Pause;
-
- cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
- DUPLEX_FULL : DUPLEX_HALF;
+
+ return mii_ethtool_gset(&tp->mii, cmd);
}
static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned long flags;
+ int rc;
spin_lock_irqsave(&tp->lock, flags);
- tp->get_settings(dev, cmd);
+ rc = tp->get_settings(dev, cmd);
spin_unlock_irqrestore(&tp->lock, flags);
- return 0;
+ return rc;
}
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1513,7 +1501,7 @@ static const struct rtl_cfg_info {
unsigned int align;
u16 intr_event;
u16 napi_event;
- unsigned msi;
+ unsigned features;
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
.hw_start = rtl_hw_start_8169,
@@ -1522,7 +1510,7 @@ static const struct rtl_cfg_info {
.intr_event = SYSErr | LinkChg | RxOverflow |
RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
.napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .msi = 0
+ .features = RTL_FEATURE_GMII
},
[RTL_CFG_1] = {
.hw_start = rtl_hw_start_8168,
@@ -1531,7 +1519,7 @@ static const struct rtl_cfg_info {
.intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
- .msi = RTL_FEATURE_MSI
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
},
[RTL_CFG_2] = {
.hw_start = rtl_hw_start_8101,
@@ -1540,7 +1528,7 @@ static const struct rtl_cfg_info {
.intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
.napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .msi = RTL_FEATURE_MSI
+ .features = RTL_FEATURE_MSI
}
};
@@ -1552,7 +1540,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
- if (cfg->msi) {
+ if (cfg->features & RTL_FEATURE_MSI) {
if (pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
} else {
@@ -1578,6 +1566,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
const unsigned int region = cfg->region;
struct rtl8169_private *tp;
+ struct mii_if_info *mii;
struct net_device *dev;
void __iomem *ioaddr;
unsigned int i;
@@ -1602,6 +1591,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+ mii = &tp->mii;
+ mii->dev = dev;
+ mii->mdio_read = rtl_mdio_read;
+ mii->mdio_write = rtl_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
@@ -2099,8 +2096,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_R8(IntrMask);
- RTL_W32(RxMissed, 0);
-
rtl_set_rx_mode(dev);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2143,8 +2138,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_R8(IntrMask);
- RTL_W32(RxMissed, 0);
-
rtl_set_rx_mode(dev);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2922,6 +2915,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ return;
+
+ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
+ RTL_W32(RxMissed, 0);
+}
+
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -2939,9 +2943,7 @@ core_down:
rtl8169_asic_down(ioaddr);
- /* Update the error counts. */
- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
- RTL_W32(RxMissed, 0);
+ rtl8169_rx_missed(dev, ioaddr);
spin_unlock_irq(&tp->lock);
@@ -3063,8 +3065,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
if (netif_running(dev)) {
spin_lock_irqsave(&tp->lock, flags);
- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
- RTL_W32(RxMissed, 0);
+ rtl8169_rx_missed(dev, ioaddr);
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -3089,8 +3090,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
rtl8169_asic_down(ioaddr);
- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
- RTL_W32(RxMissed, 0);
+ rtl8169_rx_missed(dev, ioaddr);
spin_unlock_irq(&tp->lock);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 17d4f31..c479ee2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -129,6 +129,13 @@ struct iwl5000_shared {
__le32 padding2;
} __attribute__ ((packed));
+/* calibrations defined for 5000 */
+/* defines the order in which results should be sent to the runtime uCode */
+enum iwl5000_calib {
+ IWL5000_CALIB_LO,
+ IWL5000_CALIB_TX_IQ,
+ IWL5000_CALIB_TX_IQ_PERD,
+};
#endif /* __iwl_5000_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index b08036a..79ff288 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -445,48 +445,6 @@ static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
sizeof(cal_cmd), &cal_cmd);
}
-static int iwl5000_send_calib_results(struct iwl_priv *priv)
-{
- int ret = 0;
-
- struct iwl_host_cmd hcmd = {
- .id = REPLY_PHY_CALIBRATION_CMD,
- .meta.flags = CMD_SIZE_HUGE,
- };
-
- if (priv->calib_results.lo_res) {
- hcmd.len = priv->calib_results.lo_res_len;
- hcmd.data = priv->calib_results.lo_res;
- ret = iwl_send_cmd_sync(priv, &hcmd);
-
- if (ret)
- goto err;
- }
-
- if (priv->calib_results.tx_iq_res) {
- hcmd.len = priv->calib_results.tx_iq_res_len;
- hcmd.data = priv->calib_results.tx_iq_res;
- ret = iwl_send_cmd_sync(priv, &hcmd);
-
- if (ret)
- goto err;
- }
-
- if (priv->calib_results.tx_iq_perd_res) {
- hcmd.len = priv->calib_results.tx_iq_perd_res_len;
- hcmd.data = priv->calib_results.tx_iq_perd_res;
- ret = iwl_send_cmd_sync(priv, &hcmd);
-
- if (ret)
- goto err;
- }
-
- return 0;
-err:
- IWL_ERROR("Error %d\n", ret);
- return ret;
-}
-
static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
{
struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
@@ -511,33 +469,30 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
-
- iwl_free_calib_results(priv);
+ int index;
/* reduce the size of the length field itself */
len -= 4;
+ /* Define the order in which the results will be sent to the runtime
+ * uCode. iwl_send_calib_results sends them in a row according to their
+ * index. We sort them here */
switch (hdr->op_code) {
case IWL5000_PHY_CALIBRATE_LO_CMD:
- priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC);
- priv->calib_results.lo_res_len = len;
- memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
+ index = IWL5000_CALIB_LO;
break;
case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
- priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC);
- priv->calib_results.tx_iq_res_len = len;
- memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
+ index = IWL5000_CALIB_TX_IQ;
break;
case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC);
- priv->calib_results.tx_iq_perd_res_len = len;
- memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
+ index = IWL5000_CALIB_TX_IQ_PERD;
break;
default:
IWL_ERROR("Unknown calibration notification %d\n",
hdr->op_code);
return;
}
+ iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
}
static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
@@ -832,7 +787,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
iwl5000_send_Xtal_calib(priv);
if (priv->ucode_type == UCODE_RT)
- iwl5000_send_calib_results(priv);
+ iwl_send_calib_results(priv);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e01f048..72a6743 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2090,7 +2090,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
iwl4965_error_recovery(priv);
iwl_power_update_mode(priv, 1);
- ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
iwl4965_set_mode(priv, priv->iw_mode);
@@ -2342,6 +2341,7 @@ static void iwl_bg_alive_start(struct work_struct *data)
mutex_lock(&priv->mutex);
iwl_alive_start(priv);
mutex_unlock(&priv->mutex);
+ ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
}
static void iwl4965_bg_rf_kill(struct work_struct *work)
@@ -2486,6 +2486,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
if (!priv->vif || !priv->is_open)
return;
+ iwl_power_cancel_timeout(priv);
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -2503,8 +2504,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
- if (priv->current_ht_config.is_ht)
- iwl_set_rxon_ht(priv, &priv->current_ht_config);
+ iwl_set_rxon_ht(priv, &priv->current_ht_config);
iwl_set_rxon_chain(priv);
priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -2550,10 +2550,6 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
break;
}
- /* Enable Rx differential gain and sensitivity calibrations */
- iwl_chain_noise_reset(priv);
- priv->start_calib = 1;
-
if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
priv->assoc_station_added = 1;
@@ -2561,7 +2557,12 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
iwl_activate_qos(priv, 0);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_power_update_mode(priv, 0);
+ iwl_power_enable_management(priv);
+
+ /* Enable Rx differential gain and sensitivity calibrations */
+ iwl_chain_noise_reset(priv);
+ priv->start_calib = 1;
+
/* we have just associated, don't start scan too early */
priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
}
@@ -3212,18 +3213,26 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
goto out_unlock;
}
- /* we don't schedule scan within next_scan_jiffies period */
+ /* We don't schedule scan within next_scan_jiffies period.
+ * Avoid scanning during possible EAPOL exchange, return
+ * success immediately.
+ */
if (priv->next_scan_jiffies &&
- time_after(priv->next_scan_jiffies, jiffies)) {
- rc = -EAGAIN;
+ time_after(priv->next_scan_jiffies, jiffies)) {
+ IWL_DEBUG_SCAN("scan rejected: within next scan period\n");
+ queue_work(priv->workqueue, &priv->scan_completed);
+ rc = 0;
goto out_unlock;
}
/* if we just finished scan ask for delay */
- if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
- IWL_DELAY_NEXT_SCAN, jiffies)) {
- rc = -EAGAIN;
+ if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
+ time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
+ IWL_DEBUG_SCAN("scan rejected: within previous scan period\n");
+ queue_work(priv->workqueue, &priv->scan_completed);
+ rc = 0;
goto out_unlock;
}
+
if (len) {
IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
iwl_escape_essid(ssid, len), (int)len);
@@ -3546,6 +3555,16 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
/* Per mac80211.h: This is only used in IBSS mode... */
if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+ /* switch to CAM during association period.
+ * the ucode will block any association/authentication
+ * frome during assiciation period if it can not hear
+ * the AP because of PM. the timer enable PM back is
+ * association do not complete
+ */
+ if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN |
+ IEEE80211_CHAN_RADAR))
+ iwl_power_disable_management(priv, 3000);
+
IWL_DEBUG_MAC80211("leave - not in IBSS\n");
mutex_unlock(&priv->mutex);
return;
@@ -4083,6 +4102,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
/* FIXME : remove when resolved PENDING */
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
iwl_setup_scan_deferred_work(priv);
+ iwl_setup_power_deferred_work(priv);
if (priv->cfg->ops->lib->setup_deferred_work)
priv->cfg->ops->lib->setup_deferred_work(priv);
@@ -4102,6 +4122,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
+ cancel_delayed_work_sync(&priv->set_power_save);
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
@@ -4204,13 +4225,13 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
printk(KERN_WARNING "%s: No suitable DMA available.\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index ef49440..35fb4a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -66,6 +66,66 @@
#include "iwl-core.h"
#include "iwl-calib.h"
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+ int iwl_send_calib_results(struct iwl_priv *priv)
+{
+ int ret = 0;
+ int i = 0;
+
+ struct iwl_host_cmd hcmd = {
+ .id = REPLY_PHY_CALIBRATION_CMD,
+ .meta.flags = CMD_SIZE_HUGE,
+ };
+
+ for (i = 0; i < IWL_CALIB_MAX; i++)
+ if (priv->calib_results[i].buf) {
+ hcmd.len = priv->calib_results[i].buf_len;
+ hcmd.data = priv->calib_results[i].buf;
+ ret = iwl_send_cmd_sync(priv, &hcmd);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ IWL_ERROR("Error %d iteration %d\n", ret, i);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_send_calib_results);
+
+int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+{
+ if (res->buf_len != len) {
+ kfree(res->buf);
+ res->buf = kzalloc(len, GFP_ATOMIC);
+ }
+ if (unlikely(res->buf == NULL))
+ return -ENOMEM;
+
+ res->buf_len = len;
+ memcpy(res->buf, buf, len);
+ return 0;
+}
+EXPORT_SYMBOL(iwl_calib_set);
+
+void iwl_calib_free_results(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < IWL_CALIB_MAX; i++) {
+ kfree(priv->calib_results[i].buf);
+ priv->calib_results[i].buf = NULL;
+ priv->calib_results[i].buf_len = 0;
+ }
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
/* "false alarms" are signals that our DSP tries to lock onto,
* but then determines that they are either noise, or transmissions
* from a distant wireless network (also "noise", really) that get
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 80f2f84..1383fd1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -646,8 +646,14 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
u32 val;
- if (!ht_info->is_ht)
+ if (!ht_info->is_ht) {
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
+ RXON_FLG_CHANNEL_MODE_PURE_40_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+ RXON_FLG_FAT_PROT_MSK |
+ RXON_FLG_HT_PROT_MSK);
return;
+ }
/* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
if (iwl_is_fat_tx_allowed(priv, NULL))
@@ -950,22 +956,6 @@ err:
}
EXPORT_SYMBOL(iwl_init_drv);
-void iwl_free_calib_results(struct iwl_priv *priv)
-{
- kfree(priv->calib_results.lo_res);
- priv->calib_results.lo_res = NULL;
- priv->calib_results.lo_res_len = 0;
-
- kfree(priv->calib_results.tx_iq_res);
- priv->calib_results.tx_iq_res = NULL;
- priv->calib_results.tx_iq_res_len = 0;
-
- kfree(priv->calib_results.tx_iq_perd_res);
- priv->calib_results.tx_iq_perd_res = NULL;
- priv->calib_results.tx_iq_perd_res_len = 0;
-}
-EXPORT_SYMBOL(iwl_free_calib_results);
-
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
int ret = 0;
@@ -993,10 +983,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
}
EXPORT_SYMBOL(iwl_set_tx_power);
-
void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_free_calib_results(priv);
+ iwl_calib_free_results(priv);
iwlcore_free_geos(priv);
iwl_free_channel_map(priv);
kfree(priv->scan);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64f139e..51b36b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -186,7 +186,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
void iwl_hw_detect(struct iwl_priv *priv);
void iwl_clear_stations_table(struct iwl_priv *priv);
-void iwl_free_calib_results(struct iwl_priv *priv);
void iwl_reset_qos(struct iwl_priv *priv);
void iwl_set_rxon_chain(struct iwl_priv *priv);
int iwl_set_rxon_channel(struct iwl_priv *priv,
@@ -291,6 +290,13 @@ int iwl_scan_initiate(struct iwl_priv *priv);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
+/*******************************************************************************
+ * Calibrations - implemented in iwl-calib.c
+ ******************************************************************************/
+int iwl_send_calib_results(struct iwl_priv *priv);
+int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
+void iwl_calib_free_results(struct iwl_priv *priv);
+
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index cdfb343..09bdf8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -745,13 +745,10 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-struct iwl_calib_results {
- void *tx_iq_res;
- void *tx_iq_perd_res;
- void *lo_res;
- u32 tx_iq_res_len;
- u32 tx_iq_perd_res_len;
- u32 lo_res_len;
+/* Opaque calibration results */
+struct iwl_calib_result {
+ void *buf;
+ size_t buf_len;
};
enum ucode_type {
@@ -813,6 +810,7 @@ enum {
#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
+#define IWL_CALIB_MAX 3
struct iwl_priv {
@@ -857,7 +855,7 @@ struct iwl_priv {
s32 last_temperature;
/* init calibration results */
- struct iwl_calib_results calib_results;
+ struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
unsigned long last_scan_jiffies;
@@ -1047,6 +1045,7 @@ struct iwl_priv {
struct tasklet_struct irq_tasklet;
+ struct delayed_work set_power_save;
struct delayed_work init_alive_start;
struct delayed_work alive_start;
struct delayed_work scan_check;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index a099c9e..ae60bfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(iwl_power_update_mode);
* this will be usefull for rate scale to disable PM during heavy
* Tx/Rx activities
*/
-int iwl_power_disable_management(struct iwl_priv *priv)
+int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
{
u16 prev_mode;
int ret = 0;
@@ -337,6 +337,11 @@ int iwl_power_disable_management(struct iwl_priv *priv)
ret = iwl_power_update_mode(priv, 0);
priv->power_data.power_disabled = 1;
priv->power_data.user_power_setting = prev_mode;
+ cancel_delayed_work(&priv->set_power_save);
+ if (ms)
+ queue_delayed_work(priv->workqueue, &priv->set_power_save,
+ msecs_to_jiffies(ms));
+
return ret;
}
@@ -431,3 +436,35 @@ int iwl_power_temperature_change(struct iwl_priv *priv)
return ret;
}
EXPORT_SYMBOL(iwl_power_temperature_change);
+
+static void iwl_bg_set_power_save(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work,
+ struct iwl_priv, set_power_save.work);
+ IWL_DEBUG(IWL_DL_STATE, "update power\n");
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ /* on starting association we disable power managment
+ * until association, if association failed then this
+ * timer will expire and enable PM again.
+ */
+ if (!iwl_is_associated(priv))
+ iwl_power_enable_management(priv);
+
+ mutex_unlock(&priv->mutex);
+}
+void iwl_setup_power_deferred_work(struct iwl_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save);
+}
+EXPORT_SYMBOL(iwl_setup_power_deferred_work);
+
+void iwl_power_cancel_timeout(struct iwl_priv *priv)
+{
+ cancel_delayed_work(&priv->set_power_save);
+}
+EXPORT_SYMBOL(iwl_power_cancel_timeout);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index abcbbf9..aa99f36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -78,8 +78,10 @@ struct iwl_power_mgr {
u8 power_disabled; /* flag to disable using power saving level */
};
+void iwl_setup_power_deferred_work(struct iwl_priv *priv);
+void iwl_power_cancel_timeout(struct iwl_priv *priv);
int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh);
-int iwl_power_disable_management(struct iwl_priv *priv);
+int iwl_power_disable_management(struct iwl_priv *priv, u32 ms);
int iwl_power_enable_management(struct iwl_priv *priv);
int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 6c8ac3a..3a90a67 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -464,11 +464,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
int iwl_scan_initiate(struct iwl_priv *priv)
{
- if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
- IWL_ERROR("APs don't scan.\n");
- return 0;
- }
-
if (!iwl_is_ready_rf(priv)) {
IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
return -EIO;
@@ -480,8 +475,7 @@ int iwl_scan_initiate(struct iwl_priv *priv)
}
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN("Scan request while abort pending. "
- "Queuing.\n");
+ IWL_DEBUG_SCAN("Scan request while abort pending\n");
return -EAGAIN;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b775d5b..752e7f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -5761,7 +5761,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
if (priv->error_recovering)
iwl3945_error_recovery(priv);
- ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
return;
restart:
@@ -6006,6 +6005,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
mutex_lock(&priv->mutex);
iwl3945_alive_start(priv);
mutex_unlock(&priv->mutex);
+ ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
}
static void iwl3945_bg_rf_kill(struct work_struct *work)
@@ -6259,6 +6259,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
direct_mask,
(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
+ goto done;
+ }
+
cmd.len += le16_to_cpu(scan->tx_cmd.len) +
scan->channel_count * sizeof(struct iwl3945_scan_channel);
cmd.data = scan;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a60ae86..a3ccd8c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
@@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f38a5af..810bf7c 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2365,13 +2365,12 @@ static void ext3_write_super (struct super_block * sb)
static int ext3_sync_fs(struct super_block *sb, int wait)
{
- tid_t target;
-
sb->s_dirt = 0;
- if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
- if (wait)
- log_wait_commit(EXT3_SB(sb)->s_journal, target);
- }
+ if (wait)
+ ext3_force_commit(sb);
+ else
+ journal_start_commit(EXT3_SB(sb)->s_journal, NULL);
+
return 0;
}
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index ba85157..6d98f11 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -190,6 +190,10 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
fd->search_key->cat.ParID = rec.thread.ParID;
len = fd->search_key->cat.CName.len = rec.thread.CName.len;
+ if (len > HFS_NAMELEN) {
+ printk(KERN_ERR "hfs: bad catalog namelength\n");
+ return -EIO;
+ }
memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
return hfs_brec_find(fd);
}
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 8adebd3..0fd792b 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -85,15 +85,15 @@ static int jffs2_garbage_collect_thread(void *_c)
for (;;) {
allow_signal(SIGHUP);
again:
+ spin_lock(&c->erase_completion_lock);
if (!jffs2_thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
+ spin_unlock(&c->erase_completion_lock);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
- /* Yes, there's a race here; we checked jffs2_thread_should_wake()
- before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
- matter - We don't care if we miss a wakeup, because the GC thread
- is only an optimisation anyway. */
schedule();
- }
+ } else
+ spin_unlock(&c->erase_completion_lock);
+
/* This thread is purely an optimisation. But if it runs when
other things could be running, it actually makes things a
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
index 47b0457..90cb60d 100644
--- a/fs/jffs2/compr_lzo.c
+++ b/fs/jffs2/compr_lzo.c
@@ -19,7 +19,7 @@
static void *lzo_mem;
static void *lzo_compress_buf;
-static DEFINE_MUTEX(deflate_mutex);
+static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
static void free_workspace(void)
{
@@ -49,18 +49,21 @@ static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out,
mutex_lock(&deflate_mutex);
ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
- mutex_unlock(&deflate_mutex);
-
if (ret != LZO_E_OK)
- return -1;
+ goto fail;
if (compress_size > *dstlen)
- return -1;
+ goto fail;
memcpy(cpage_out, lzo_compress_buf, compress_size);
- *dstlen = compress_size;
+ mutex_unlock(&deflate_mutex);
+ *dstlen = compress_size;
return 0;
+
+ fail:
+ mutex_unlock(&deflate_mutex);
+ return -1;
}
static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index ae060c6..18546d8 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -34,7 +34,7 @@
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(pfn); \
+ unsigned long __nid = arch_pfn_to_nid(__pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d6fb115..3a16bea 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -281,9 +281,25 @@ struct cfi_private {
/*
* Returns the command address according to the given geometry.
*/
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
+static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi)
{
- return (cmd_ofs * type) * interleave;
+ unsigned bankwidth = map_bankwidth(map);
+ unsigned interleave = cfi_interleave(cfi);
+ unsigned type = cfi->device_type;
+ uint32_t addr;
+
+ addr = (cmd_ofs * type) * interleave;
+
+ /* Modify the unlock address if we are in compatiblity mode.
+ * For 16bit devices on 8 bit busses
+ * and 32bit devices on 16 bit busses
+ * set the low bit of the alternating bit sequence of the address.
+ */
+ if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
+ addr |= (type >> 1)*interleave;
+
+ return addr;
}
/*
@@ -429,7 +445,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t
int type, map_word *prev_val)
{
map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
val = cfi_build_cmd(cmd, map, cfi);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 7dd29b7..c29ff1d 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -54,6 +54,7 @@ struct unix_sock {
atomic_long_t inflight;
spinlock_t lock;
unsigned int gc_candidate : 1;
+ unsigned int gc_maybe_cycle : 1;
wait_queue_head_t peer_wait;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0123d7..d68bf2b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2443,7 +2443,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
list_del(&cgrp->sibling);
spin_lock(&cgrp->dentry->d_lock);
d = dget(cgrp->dentry);
- cgrp->dentry = NULL;
spin_unlock(&d->d_lock);
cgroup_d_remove_dir(d);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 67a7119..77427c8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -353,11 +353,26 @@ static int vma_has_reserves(struct vm_area_struct *vma)
return 0;
}
+static void clear_gigantic_page(struct page *page,
+ unsigned long addr, unsigned long sz)
+{
+ int i;
+ struct page *p = page;
+
+ might_sleep();
+ for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
+ cond_resched();
+ clear_user_highpage(p, addr + i * PAGE_SIZE);
+ }
+}
static void clear_huge_page(struct page *page,
unsigned long addr, unsigned long sz)
{
int i;
+ if (unlikely(sz > MAX_ORDER_NR_PAGES))
+ return clear_gigantic_page(page, addr, sz);
+
might_sleep();
for (i = 0; i < sz/PAGE_SIZE; i++) {
cond_resched();
@@ -365,12 +380,32 @@ static void clear_huge_page(struct page *page,
}
}
+static void copy_gigantic_page(struct page *dst, struct page *src,
+ unsigned long addr, struct vm_area_struct *vma)
+{
+ int i;
+ struct hstate *h = hstate_vma(vma);
+ struct page *dst_base = dst;
+ struct page *src_base = src;
+ might_sleep();
+ for (i = 0; i < pages_per_huge_page(h); ) {
+ cond_resched();
+ copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+
+ i++;
+ dst = mem_map_next(dst, dst_base, i);
+ src = mem_map_next(src, src_base, i);
+ }
+}
static void copy_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma)
{
int i;
struct hstate *h = hstate_vma(vma);
+ if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
+ return copy_gigantic_page(dst, src, addr, vma);
+
might_sleep();
for (i = 0; i < pages_per_huge_page(h); i++) {
cond_resched();
@@ -455,6 +490,8 @@ static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
+ VM_BUG_ON(h->order >= MAX_ORDER);
+
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -969,6 +1006,14 @@ found:
return 1;
}
+static void prep_compound_huge_page(struct page *page, int order)
+{
+ if (unlikely(order > (MAX_ORDER - 1)))
+ prep_compound_gigantic_page(page, order);
+ else
+ prep_compound_page(page, order);
+}
+
/* Put bootmem huge pages into the standard lists after mem_map is up */
static void __init gather_bootmem_prealloc(void)
{
@@ -979,7 +1024,7 @@ static void __init gather_bootmem_prealloc(void)
struct hstate *h = m->hstate;
__ClearPageReserved(page);
WARN_ON(page_count(page) != 1);
- prep_compound_page(page, h->order);
+ prep_compound_huge_page(page, h->order);
prep_new_huge_page(h, page, page_to_nid(page));
}
}
@@ -2103,7 +2148,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
same_page:
if (pages) {
get_page(page);
- pages[i] = page + pfn_offset;
+ pages[i] = mem_map_offset(page, pfn_offset);
}
if (vmas)
diff --git a/mm/internal.h b/mm/internal.h
index 1f43f74..92729ea 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -17,6 +17,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
extern void prep_compound_page(struct page *page, unsigned long order);
+extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
static inline void set_page_count(struct page *page, int v)
{
@@ -53,6 +54,34 @@ static inline unsigned long page_order(struct page *page)
}
/*
+ * Return the mem_map entry representing the 'offset' subpage within
+ * the maximally aligned gigantic page 'base'. Handle any discontiguity
+ * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
+ */
+static inline struct page *mem_map_offset(struct page *base, int offset)
+{
+ if (unlikely(offset >= MAX_ORDER_NR_PAGES))
+ return pfn_to_page(page_to_pfn(base) + offset);
+ return base + offset;
+}
+
+/*
+ * Iterator over all subpages withing the maximally aligned gigantic
+ * page 'base'. Handle any discontiguity in the mem_map.
+ */
+static inline struct page *mem_map_next(struct page *iter,
+ struct page *base, int offset)
+{
+ if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
+ unsigned long pfn = page_to_pfn(base) + offset;
+ if (!pfn_valid(pfn))
+ return NULL;
+ return pfn_to_page(pfn);
+ }
+ return iter + 1;
+}
+
+/*
* FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
* so all functions starting at paging_init should be marked __init
* in those cases. SPARSEMEM, however, allows for memory hotplug,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 27b8681..ed5cdae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -268,24 +268,39 @@ void prep_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
+
+ set_compound_page_dtor(page, free_compound_page);
+ set_compound_order(page, order);
+ __SetPageHead(page);
+ for (i = 1; i < nr_pages; i++) {
+ struct page *p = page + i;
+
+ __SetPageTail(p);
+ p->first_page = page;
+ }
+}
+
+#ifdef CONFIG_HUGETLBFS
+void prep_compound_gigantic_page(struct page *page, unsigned long order)
+{
+ int i;
+ int nr_pages = 1 << order;
struct page *p = page + 1;
set_compound_page_dtor(page, free_compound_page);
set_compound_order(page, order);
__SetPageHead(page);
- for (i = 1; i < nr_pages; i++, p++) {
- if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
- p = pfn_to_page(page_to_pfn(page) + i);
+ for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
p->first_page = page;
}
}
+#endif
static void destroy_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
- struct page *p = page + 1;
if (unlikely(compound_order(page) != order))
bad_page(page);
@@ -293,9 +308,8 @@ static void destroy_compound_page(struct page *page, unsigned long order)
if (unlikely(!PageHead(page)))
bad_page(page);
__ClearPageHead(page);
- for (i = 1; i < nr_pages; i++, p++) {
- if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
- p = pfn_to_page(page_to_pfn(page) + i);
+ for (i = 1; i < nr_pages; i++) {
+ struct page *p = page + i;
if (unlikely(!PageTail(p) |
(p->first_page != page)))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 015606b..8bde9bf 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1300,14 +1300,23 @@ static void unix_destruct_fds(struct sk_buff *skb)
sock_wfree(skb);
}
-static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
+
+ /*
+ * Need to duplicate file references for the sake of garbage
+ * collection. Otherwise a socket in the fps might become a
+ * candidate for GC while the skb is not yet queued.
+ */
+ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+ if (!UNIXCB(skb).fp)
+ return -ENOMEM;
+
for (i=scm->fp->count-1; i>=0; i--)
unix_inflight(scm->fp->fp[i]);
- UNIXCB(skb).fp = scm->fp;
skb->destructor = unix_destruct_fds;
- scm->fp = NULL;
+ return 0;
}
/*
@@ -1366,8 +1375,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp)
- unix_attach_fds(siocb->scm, skb);
+ if (siocb->scm->fp) {
+ err = unix_attach_fds(siocb->scm, skb);
+ if (err)
+ goto out_free;
+ }
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
@@ -1536,8 +1548,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = min_t(int, size, skb_tailroom(skb));
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp)
- unix_attach_fds(siocb->scm, skb);
+ if (siocb->scm->fp) {
+ err = unix_attach_fds(siocb->scm, skb);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+ }
if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
kfree_skb(skb);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a27b84..6d4a9a8 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
*/
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
- hit = true;
- func(unix_sk(sk));
+ struct unix_sock *u = unix_sk(sk);
+
+ /*
+ * Ignore non-candidates, they could
+ * have been added to the queues after
+ * starting the garbage collection
+ */
+ if (u->gc_candidate) {
+ hit = true;
+ func(u);
+ }
}
}
if (hit && hitlist != NULL) {
@@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/*
- * If this is still a candidate, move it to the end of the
- * list, so that it's checked even if it was already passed
- * over
+ * If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
*/
- if (u->gc_candidate)
+ if (u->gc_maybe_cycle)
list_move_tail(&u->link, &gc_candidates);
}
@@ -267,6 +276,7 @@ void unix_gc(void)
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
+ LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
@@ -282,10 +292,14 @@ void unix_gc(void)
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
- * reference. This also means, that since there are no
- * possible receivers, the receive queues of these sockets are
- * static during the GC, even though the dequeue is done
- * before the detach without atomicity guarantees.
+ * reference. Since there are no possible receivers, all
+ * buffers currently on the candidates' queues stay there
+ * during the garbage collection.
+ *
+ * We also know that no new candidate can be added onto the
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
@@ -299,6 +313,7 @@ void unix_gc(void)
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
+ u->gc_maybe_cycle = 1;
}
}
@@ -325,14 +340,24 @@ void unix_gc(void)
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
- list_move_tail(&u->link, &gc_inflight_list);
- u->gc_candidate = 0;
+ list_move_tail(&u->link, ¬_cycle_list);
+ u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
+ * not_cycle_list contains those sockets which do not make up a
+ * cycle. Restore these to the inflight list.
+ */
+ while (!list_empty(¬_cycle_list)) {
+ u = list_entry(not_cycle_list.next, struct unix_sock, link);
+ u->gc_candidate = 0;
+ list_move_tail(&u->link, &gc_inflight_list);
+ }
+
+ /*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
diff --git a/security/keys/internal.h b/security/keys/internal.h
index b39f5c2..239098f 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -107,6 +107,7 @@ extern key_ref_t search_process_keyrings(struct key_type *type,
extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
+extern int install_user_keyrings(struct task_struct *tsk);
extern int install_thread_keyring(struct task_struct *tsk);
extern int install_process_keyring(struct task_struct *tsk);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 5be6d01..45b240a 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -40,7 +40,7 @@ struct key_user root_key_user = {
/*
* install user and user session keyrings for a particular UID
*/
-static int install_user_keyrings(struct task_struct *tsk)
+int install_user_keyrings(struct task_struct *tsk)
{
struct user_struct *user = tsk->user;
struct key *uid_keyring, *session_keyring;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index ba32ca6..abea08f 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -74,6 +74,10 @@ static int call_sbin_request_key(struct key_construction *cons,
kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
+ ret = install_user_keyrings(tsk);
+ if (ret < 0)
+ goto error_alloc;
+
/* allocate a new session keyring */
sprintf(desc, "_req.%u", key->serial);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f3da621..732ce13 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -67,6 +67,7 @@ enum {
enum {
STAC_92HD73XX_REF,
STAC_DELL_M6,
+ STAC_DELL_EQ,
STAC_92HD73XX_MODELS
};
@@ -560,9 +561,7 @@ static struct hda_verb dell_eq_core_init[] = {
};
static struct hda_verb dell_m6_core_init[] = {
- /* set master volume to max value without distortion
- * and direct control */
- { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec},
+ { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
/* setup audio connections */
{ 0x0d, AC_VERB_SET_CONNECT_SEL, 0x00},
{ 0x0a, AC_VERB_SET_CONNECT_SEL, 0x01},
@@ -1297,11 +1296,13 @@ static unsigned int dell_m6_pin_configs[13] = {
static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
[STAC_92HD73XX_REF] = ref92hd73xx_pin_configs,
[STAC_DELL_M6] = dell_m6_pin_configs,
+ [STAC_DELL_EQ] = dell_m6_pin_configs,
};
static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
[STAC_92HD73XX_REF] = "ref",
[STAC_DELL_M6] = "dell-m6",
+ [STAC_DELL_EQ] = "dell-eq",
};
static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
@@ -3560,8 +3561,12 @@ again:
spec->gpio_data = 0x01;
switch (spec->board_config) {
- case STAC_DELL_M6:
+ case STAC_DELL_EQ:
spec->init = dell_eq_core_init;
+ /* fallthru */
+ case STAC_DELL_M6:
+ if (!spec->init)
+ spec->init = dell_m6_core_init;
switch (codec->subsystem_id) {
case 0x1028025e: /* Analog Mics */
case 0x1028025f:
@@ -3570,8 +3575,6 @@ again:
break;
case 0x10280271: /* Digital Mics */
case 0x10280272:
- spec->init = dell_m6_core_init;
- /* fall-through */
case 0x10280254:
case 0x10280255:
stac92xx_set_config_reg(codec, 0x13, 0x90A60160);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists