[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1283165327-10144-3-git-send-email-maximlevitsky@gmail.com>
Date: Mon, 30 Aug 2010 13:48:46 +0300
From: Maxim Levitsky <maximlevitsky@...il.com>
To: Alex Dubov <oakad@...oo.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Maxim Levitsky <maximlevitsky@...il.com>
Subject: [PATCH 2/3] MEMSTICK: add support for legacy memorysticks
Huge thanks for Alex Dubov for code that this is based on
and for lot, really lot of help he gave me in understanding
MemorySticks and implementing this driver.
His help made this driver possible.
As any code that works with user data this driver isn't recommened
to use with valuable data.
It tries its best though to avoid data corruption and possible
damage to the card.
Signed-off-by: Maxim Levitsky <maximlevitsky@...il.com>
---
MAINTAINERS | 5 +
drivers/memstick/core/Kconfig | 13 +
drivers/memstick/core/Makefile | 2 +-
drivers/memstick/core/ms_block.c | 2249 ++++++++++++++++++++++++++++++++++++++
drivers/memstick/core/ms_block.h | 236 ++++
5 files changed, 2504 insertions(+), 1 deletions(-)
create mode 100644 drivers/memstick/core/ms_block.c
create mode 100644 drivers/memstick/core/ms_block.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 5fa8451..089bdb7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5380,6 +5380,11 @@ W: http://tifmxx.berlios.de/
S: Maintained
F: drivers/memstick/host/tifm_ms.c
+SONY MEMORYSTICK STANDARD SUPPORT
+M: Maxim Levitsky <maximlevitsky@...il.com>
+S: Maintained
+F: drivers/memstick/core/ms_block.*
+
SOUND
M: Jaroslav Kysela <perex@...ex.cz>
M: Takashi Iwai <tiwai@...e.de>
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 95f1814..f0920c2 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -24,3 +24,16 @@ config MSPRO_BLOCK
support. This provides a block device driver, which you can use
to mount the filesystem. Almost everyone wishing MemoryStick
support should say Y or M here.
+
+config MS_BLOCK
+ tristate "MemoryStick Standard device driver"
+ depends on BLOCK && EXPERIMENTAL
+ help
+ Say Y here to enable the MemoryStick Standard device driver
+ support. This provides a block device driver, which you can use
+ to mount the filesystem.
+ This driver works with old (bulky) MemoryStick and MemoryStick Duo
+ but not PRO. Say Y if you have such card.
+ Driver is new and not yet well tested, thus it can damage your card
+ (even permanently)
+
diff --git a/drivers/memstick/core/Makefile b/drivers/memstick/core/Makefile
index 8b2b529..19d960b 100644
--- a/drivers/memstick/core/Makefile
+++ b/drivers/memstick/core/Makefile
@@ -7,5 +7,5 @@ ifeq ($(CONFIG_MEMSTICK_DEBUG),y)
endif
obj-$(CONFIG_MEMSTICK) += memstick.o
-
+obj-$(CONFIG_MS_BLOCK) += ms_block.o
obj-$(CONFIG_MSPRO_BLOCK) += mspro_block.o
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
new file mode 100644
index 0000000..5801dcd
--- /dev/null
+++ b/drivers/memstick/core/ms_block.c
@@ -0,0 +1,2249 @@
+/*
+ * ms_block.c - Sony MemoryStick (legacy) storage support
+ *
+ * Copyright (C) 2007 Alex Dubov <oakad@...oo.com>
+ * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@...il.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Special thanks to Carlos Corbacho for providing various MemoryStick cards
+ * that made this driver possible.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/memstick.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include "ms_block.h"
+
+static int major;
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+static void dbg_dump_params(u8 intreg, struct ms_param_extra param, char *txt)
+{
+ char *transfer_mode;
+
+ if (debug < 2 || !(intreg & (MEMSTICK_INT_ERR|MEMSTICK_INT_CMDNAK)))
+ return;
+
+ switch (param.param.cp) {
+ case MEMSTICK_CP_BLOCK:
+ transfer_mode = "block";
+ break;
+ case MEMSTICK_CP_PAGE:
+ transfer_mode = "page";
+ break;
+ case MEMSTICK_CP_EXTRA:
+ transfer_mode = "oob";
+ default:
+ dbg("<bad access mode>");
+ }
+
+ dbg("IO: error on %s %s to page %d of pba %d", transfer_mode, txt,
+ param.param.page_address, param.param.block_address);
+
+ dbg("IO: int status: CMDNACK: %d, ERR %d BREQ %d CED %d",
+ !!(intreg & MEMSTICK_INT_CMDNAK),
+ !!(intreg & MEMSTICK_INT_ERR),
+ !!(intreg & MEMSTICK_INT_BREQ),
+ !!(intreg & MEMSTICK_INT_CED));
+
+ dbg("IO: transfer mode is %s", param.param.system & MEMSTICK_SYS_PAM ?
+ "parallel" : "serial");
+
+ if (param.param.block_address_msb)
+ dbg("IO: error: msb of block address not zero, is %02x",
+ param.param.block_address_msb);
+
+ dbg("IO: managmemt flag (%02x): SYSFLG: %d ATFLG %d SCMS1 %d SCMS0 %d",
+ param.extra.management_flag,
+ !!(param.extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG),
+ !!(param.extra.management_flag & MEMSTICK_MANAGEMENT_ATFLG),
+ !!(param.extra.management_flag & MEMSTICK_MANAGEMENT_SCMS1),
+ !!(param.extra.management_flag & MEMSTICK_MANAGEMENT_SCMS0)
+ );
+
+ dbg("IO: overwrite flag (%02x) UDST %d, PGST1 %d PGST0 %d BKST %d",
+ param.extra.overwrite_flag,
+ !!(param.extra.overwrite_flag & MEMSTICK_OVERWRITE_UDST),
+ !!(param.extra.overwrite_flag & MEMSTICK_OVERWRITE_PGST1),
+ !!(param.extra.overwrite_flag & MEMSTICK_OVERWRITE_PGST0),
+ !!(param.extra.overwrite_flag & MEMSTICK_OVERWRITE_BKST)
+ );
+}
+
+/*
+ * Advance scatterlist by 'consumed' bytes
+ * Returns new scatterlist, or NULL if can't advance that much
+ */
+static struct scatterlist *sg_advance(struct scatterlist *sg, int consumed)
+{
+ while (consumed >= sg->length) {
+ consumed -= sg->length;
+
+ sg = sg_next(sg);
+ if (!sg)
+ break;
+ }
+
+ WARN_ON(!sg && consumed);
+
+ if (!sg)
+ return NULL;
+
+ sg->offset += consumed;
+ sg->length -= consumed;
+
+ if (sg->offset >= PAGE_SIZE) {
+ struct page *page =
+ nth_page(sg_page(sg), sg->offset / PAGE_SIZE);
+ sg_set_page(sg, page, sg->length, sg->offset % PAGE_SIZE);
+ }
+
+ return sg;
+}
+
+/* Calculate number of sg entries in sg list */
+static int sg_nents(struct scatterlist *sg)
+{
+ int nents = 0;
+ while (sg) {
+ nents++;
+ sg = sg_next(sg);
+ }
+
+ return nents;
+}
+
+/* Calculate total lenght of scatterlist */
+static int sg_total_len(struct scatterlist *sg)
+{
+ int len = 0;
+ while (sg) {
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+ return len;
+}
+
+/* Compare contents of an sg to a buffer */
+static bool sg_compare_to_buffer(struct scatterlist *sg, u8 *buffer, size_t len)
+{
+ unsigned long flags;
+ int retval = 0;
+ struct sg_mapping_iter miter;
+
+ if (sg_total_len(sg) < len)
+ return 1;
+
+ local_irq_save(flags);
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+
+ int cmplen = min(miter.length, len);
+
+ if (memcmp(miter.addr, buffer, cmplen)) {
+ retval = 1;
+ break;
+ }
+
+ buffer += cmplen;
+ len -= cmplen;
+ }
+
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+ return retval;
+}
+
+/* Get zone at which block with logical address 'lba' lives */
+static int msb_get_zone_from_lba(int lba)
+{
+ if (lba < 494)
+ return 0;
+ return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+ return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+#ifdef DEBUG
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+ int total_free_blocks = 0;
+ int i;
+
+ for (i = 0 ; i < msb->zone_count ; i++)
+ total_free_blocks += msb->free_block_count[i];
+
+ if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+ msb->block_count) == total_free_blocks)
+ return 0;
+
+ ms_printk("BUG: free block counts don't match the bitmap");
+ msb->read_only = true;
+ return -EINVAL;
+}
+#endif
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ ms_printk("BUG: attempt to mark "
+ "already used pba %d as used", pba);
+ msb->read_only = true;
+ return;
+ }
+
+#ifdef DEBUG
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+#endif
+ set_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (!test_bit(pba, msb->used_blocks_bitmap)) {
+ ms_printk("BUG: attempt to mark "
+ "already unused pba %d as unused" , pba);
+ msb->read_only = true;
+ return;
+ }
+
+#ifdef DEBUG
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+#endif
+ clear_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]++;
+}
+
+/*
+ * Create a sg that spans page from current scatterlist
+ * used by read/write functions
+ */
+static void msb_set_sg(struct msb_data *msb, struct scatterlist *sg)
+{
+ int offset = msb->current_sg->offset + msb->sg_offset;
+ struct page *page = nth_page(sg_page(msb->current_sg),
+ offset >> PAGE_SHIFT);
+ sg_set_page(sg, page, msb->page_size, offset_in_page(offset));
+}
+
+/* Advances the current sg by one page. Returns error if can't */
+static int msb_advance_sg(struct msb_data *msb)
+{
+ msb->sg_offset += msb->page_size;
+
+ if (msb->sg_offset & (msb->page_size - 1)) {
+ ms_printk("BUG: sg not aligned");
+ return -EINVAL;
+ }
+
+ if (msb->sg_offset > msb->current_sg->length) {
+ dbg("BUG: sg overrun");
+ return -EINVAL;
+ }
+
+ if (msb->sg_offset == msb->current_sg->length) {
+ msb->current_sg = sg_next(msb->current_sg);
+ msb->sg_offset = 0;
+ }
+
+ if (!msb->current_sg)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * This function is a handler for reads of one page from device
+ * Writes output to msb->current_sg, takes sector address from msb->params
+ * Can also be used to read extra data only. Set params accordintly
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct scatterlist sg;
+ u8 command = MS_CMD_BLOCK_READ, intreg;
+
+ memstick_fetch_request(card, mrq);
+ if ((*mrq)->error) {
+ dbg("read_page, unknown error");
+ return memstick_complete_request(card, *mrq, 0);
+ }
+again:
+ switch (card->state) {
+ case 0: /* write address of page we need*/
+ memset(&msb->param.extra, 0xFF, sizeof(msb->param.extra));
+ memstick_init_req(*mrq, MS_TPC_WRITE_REG,
+ (unsigned char *)&msb->param, sizeof(msb->param));
+ break;
+
+ case 1: /* execute the read command*/
+ command = MS_CMD_BLOCK_READ;
+ memstick_init_req(*mrq, MS_TPC_SET_CMD, &command, 1);
+ break;
+
+ case 2: /* send INT request */
+ if (memstick_send_int_request(card, *mrq))
+ break;
+ card->state++;
+
+ case 3: /* poll for int bits */
+ intreg = (*mrq)->data[0];
+ dbg_dump_params(intreg, msb->param, "read");
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return memstick_complete_request(card, *mrq, -EIO);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ card->state = 2;
+ goto again;
+ }
+ memstick_finish_int_request(card);
+
+ memstick_init_req(*mrq, MS_TPC_READ_REG, NULL,
+ sizeof(struct ms_register));
+ break;
+
+ case 4: /* check registers, then read memstick buffer*/
+ msb->out_regs = *(struct ms_register *)(*mrq)->data;
+
+
+ if (msb->param.param.cp == MEMSTICK_CP_EXTRA) {
+ card->state++;
+ goto again;
+ }
+
+ msb_set_sg(msb, &sg);
+ memstick_init_req_sg(*mrq, MS_TPC_READ_LONG_DATA, &sg);
+ break;
+
+ case 5: /* done */
+
+ if (msb->out_regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+ dbg("read_page: uncorrectable error");
+ return memstick_complete_request(card, *mrq, -EBADMSG);
+ }
+
+ if (msb->out_regs.status.status1 & MEMSTICK_CORR_ERROR) {
+ dbg("read_page: correctable error");
+ return memstick_complete_request(card, *mrq, -EUCLEAN);
+ } else
+ return memstick_complete_request(card, *mrq, 0);
+ default:
+ BUG();
+ }
+ card->state++;
+ return 0;
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->params. Writes same extra data to blocks, also taken
+ * from msb->params
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct scatterlist sg;
+ u8 intreg, command = MS_CMD_BLOCK_WRITE;
+
+ memstick_fetch_request(card, mrq);
+ if ((*mrq)->error)
+ return memstick_complete_request(card, *mrq, 0);
+
+again:
+ switch (card->state) {
+ case 0: /* write address of page we need + its oob*/
+ if (msb->param.param.page_address) {
+ ms_printk(
+ "BUG: attempt to write block from non zero offset!");
+ return memstick_complete_request(card, *mrq, -EINVAL);
+ }
+
+ msb->current_page = 0;
+ msb->param.param.cp = MEMSTICK_CP_BLOCK;
+ memstick_init_req(*mrq, MS_TPC_WRITE_REG,
+ (unsigned char *)&msb->param, sizeof(msb->param));
+ break;
+
+ case 1: /* execute the write command*/
+ memstick_init_req(*mrq, MS_TPC_SET_CMD, &command, 1);
+ break;
+
+ case 2: /* send INT request */
+ if (memstick_send_int_request(card, *mrq))
+ break;
+ card->state++;
+
+ case 3: /* poll for int bits */
+ intreg = (*mrq)->data[0];
+ dbg_dump_params(intreg, msb->param, "write");
+
+ /* errors mean out of here, and fast... */
+ if (intreg & (MEMSTICK_INT_CMDNAK))
+ return memstick_complete_request(card, *mrq, -EIO);
+
+ if (intreg & MEMSTICK_INT_ERR)
+ return memstick_complete_request(card, *mrq, -EBADMSG);
+
+
+ /* for last page we need to poll CED */
+ if (msb->current_page == msb->pages_in_block) {
+ if (intreg & MEMSTICK_INT_CED)
+ return memstick_complete_request(card, *mrq, 0);
+ card->state = 2;
+ goto again;
+
+ }
+
+ /* for non-last page we need BREQ before writing next chunk */
+ if (!(intreg & MEMSTICK_INT_BREQ)) {
+ card->state = 2;
+ goto again;
+ }
+
+ memstick_finish_int_request(card);
+
+ msb_set_sg(msb, &sg);
+ memstick_init_req_sg(*mrq, MS_TPC_WRITE_LONG_DATA, &sg);
+ (*mrq)->need_card_int = 1;
+ break;
+
+ case 4: /* switch to next page, end request if reached end */
+ msb->current_page++;
+
+ if (msb->current_page < msb->pages_in_block) {
+ if (msb_advance_sg(msb)) {
+ ms_printk(
+ "BUG: out of data while writing block WTF?");
+ return memstick_complete_request(card,
+ *mrq, -EIO);
+ }
+ }
+ card->state = 2;
+ goto again;
+ default:
+ BUG();
+ }
+ card->state++;
+ return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ u8 intreg;
+
+ memstick_fetch_request(card, mrq);
+ if ((*mrq)->error) {
+ dbg("send_command: unknown error");
+ return memstick_complete_request(card, *mrq, 0);
+ }
+
+ switch (card->state) {
+
+again: case 0: /* write regs */
+ memstick_init_req(*mrq, MS_TPC_WRITE_REG,
+ (unsigned char *)&msb->param, sizeof(msb->param));
+ break;
+
+ case 1: /* execute the command*/
+ memstick_init_req(*mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+ break;
+
+ case 2: /* send INT request */
+ if (memstick_send_int_request(card, *mrq))
+ break;
+ card->state++;
+
+ case 3: /* poll for int bits */
+ intreg = (*mrq)->data[0];
+ dbg_dump_params(intreg, msb->param, "erase/write");
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return memstick_complete_request(card, *mrq, -EIO);
+ if (intreg & MEMSTICK_INT_ERR)
+ return memstick_complete_request(card, *mrq, -EBADMSG);
+
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ card->state = 2;
+ goto again;
+ }
+
+ memstick_finish_int_request(card);
+ return memstick_complete_request(card, *mrq, 0);
+ }
+ card->state++;
+ return 0;
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ u8 command = MS_CMD_RESET;
+
+ memstick_fetch_request(card, mrq);
+ if ((*mrq)->error)
+ return memstick_complete_request(card, *mrq, 0);
+
+ switch (card->state) {
+ case 0:
+ memstick_init_req(*mrq, MS_TPC_SET_CMD, &command, 1);
+ (*mrq)->need_card_int = 0;
+ break;
+ case 1:
+ return memstick_complete_request(card, *mrq, 0);
+ }
+ card->state++;
+ return 0;
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+
+ memstick_fetch_request(card, mrq);
+ if ((*mrq)->error) {
+ dbg("parallel_switch: error");
+ return memstick_complete_request(card, *mrq, 0);
+ }
+
+ switch (card->state) {
+ case 0:
+ msb->param.param.system |= MEMSTICK_SYS_PAM;
+ memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+ &msb->param, sizeof(msb->param));
+ break;
+ case 1:
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+ memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1);
+ break;
+ case 2:
+ return memstick_complete_request(card, *mrq, 0);
+ }
+ card->state++;
+ return 0;
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb)
+{
+ bool was_parallel = msb->param.param.system & MEMSTICK_SYS_PAM;
+ struct memstick_dev *card = msb->card;
+ struct memstick_host *host = card->host;
+ int error;
+
+ msb->param.param.system = MEMSTICK_SYS_BAMD;
+
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ msleep(100);
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+
+ /* Reset the card */
+ error = memstick_do_request_handler(card, h_msb_reset);
+ if (error) {
+ dbg("Failed to reset the card");
+ msb->read_only = true;
+ return -ENODEV;
+ }
+
+ /* Set the permanent register window */
+ card->reg_addr.r_offset = offsetof(struct ms_register, status);
+ card->reg_addr.r_length = sizeof(struct ms_register);
+ card->reg_addr.w_offset = offsetof(struct ms_register, param);
+ card->reg_addr.w_length = sizeof(struct ms_param_extra);
+
+ error = memstick_set_rw_addr(card);
+ if (error) {
+ dbg("Failed to set RW reg window");
+ msb->read_only = true;
+ return error;
+ }
+
+ if (was_parallel)
+ msb_switch_to_parallel(msb);
+ return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+ int error;
+
+ error = memstick_do_request_handler(msb->card, h_msb_parallel_switch);
+ if (error) {
+
+ ms_printk("Switch to parallel failed");
+ msb->param.param.system &= ~MEMSTICK_SYS_PAM;
+ msb_reset(msb);
+ return -EFAULT;
+ }
+
+ msb->card->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+ return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+ u16 pba, u8 page, u8 flag)
+{
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->param.param.block_address = cpu_to_be16(pba);
+ msb->param.param.page_address = page;
+ msb->param.param.cp = MEMSTICK_CP_OVERWRITE;
+
+ msb->param.extra.overwrite_flag = flag;
+ msb->command_value = MS_CMD_BLOCK_WRITE;
+
+ dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+ flag, pba, page);
+ return memstick_do_request_handler(msb->card, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+ ms_printk("marking pba %d as bad", pba);
+ msb_reset(msb);
+ return msb_set_overwrite_flag(
+ msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+ dbg("marking page %d of pba %d as bad", page, pba);
+ msb_reset(msb);
+ return msb_set_overwrite_flag(msb,
+ pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+ int error, try;
+ if (msb->read_only)
+ return -EROFS;
+
+ dbg_verbose("erasing pba %d", pba);
+
+ for (try = 1 ; try < 3 ; try++) {
+ msb->param.param.block_address = cpu_to_be16(pba);
+ msb->param.param.page_address = 0;
+ msb->param.param.cp = MEMSTICK_CP_BLOCK;
+ msb->command_value = MS_CMD_BLOCK_ERASE;
+
+ error = memstick_do_request_handler(msb->card,
+ h_msb_send_command);
+ if (!error || msb_reset(msb))
+ break;
+ }
+
+ if (error) {
+ ms_printk("erase failed, marking pba %d as bad", pba);
+ msb_mark_bad(msb, pba);
+ }
+
+ dbg_verbose("erase success, marking pba %d as unused", pba);
+ msb_mark_block_unused(msb, pba);
+ set_bit(pba, msb->erased_blocks_bitmap);
+ return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+ u16 pba, u8 page, struct ms_extra_data_register *extra,
+ struct scatterlist *sg)
+{
+ int try, error;
+
+ if (sg && sg->length < msb->page_size) {
+ ms_printk(
+ "BUG: attempt to read pba %d page %d with too small sg",
+ pba, page);
+ return -EINVAL;
+ }
+
+ if (pba == MS_BLOCK_INVALID) {
+ u8 *ptr;
+ unsigned long flags;
+
+ dbg_verbose("read unmapped sector. returning 0xFF");
+
+ local_irq_save(flags);
+ ptr = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+ memset(ptr, 0xFF, msb->page_size);
+ kunmap_atomic(ptr - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
+
+ if (extra)
+ memset(extra, 0xFF, sizeof(*extra));
+ return 0;
+ }
+
+ if (pba >= msb->block_count) {
+ ms_printk("BUG: attempt to read beyong"
+ " the end of card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ for (try = 1 ; try < 3 ; try++) {
+ msb->param.param.block_address = cpu_to_be16(pba);
+ msb->param.param.page_address = page;
+ msb->param.param.cp = MEMSTICK_CP_PAGE;
+
+ msb->current_sg = sg;
+ msb->sg_offset = 0;
+ error = memstick_do_request_handler(msb->card, h_msb_read_page);
+
+ if (extra)
+ *extra = msb->out_regs.extra_data;
+
+ if (error == -EUCLEAN) {
+ ms_printk("correctable error on pba %d, page %d",
+ pba, page);
+ error = 0;
+ }
+
+ if (!error || msb_reset(msb))
+ break;
+ }
+
+ /* Mark bad pages */
+ if (error == -EBADMSG) {
+ ms_printk("uncorrectable error on read of pba %d, page %d",
+ pba, page);
+
+ if (msb->param.extra.overwrite_flag &
+ MEMSTICK_OVERWRITE_PGST0)
+ msb_mark_page_bad(msb, pba, page);
+ return -EBADMSG;
+ }
+
+ if (error)
+ ms_printk("read of pba %d, page %d failed with error %d",
+ pba, page, error);
+ return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+ struct ms_extra_data_register *extra)
+{
+ int error;
+ BUG_ON(!extra);
+
+ msb->param.param.block_address = cpu_to_be16(pba);
+ msb->param.param.page_address = page;
+ msb->param.param.cp = MEMSTICK_CP_EXTRA;
+
+ if (pba > msb->block_count) {
+ ms_printk("BUG: attempt to read beyong"
+ " the end of card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ error = memstick_do_request_handler(msb->card, h_msb_read_page);
+ *extra = msb->out_regs.extra_data;
+
+ if (error == -EUCLEAN) {
+ ms_printk("correctable error on pba %d, page %d",
+ pba, page);
+ return 0;
+ }
+
+ return error;
+}
+
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static bool msb_verify_block(struct msb_data *msb, u16 pba,
+ struct scatterlist *orig_sg)
+{
+ struct scatterlist sg;
+ int page = 0, error;
+
+ while (page < msb->pages_in_block) {
+ sg_init_one(&sg, msb->block_buffer +
+ page * msb->page_size, msb->page_size);
+
+ error = msb_read_page(msb, pba, page, NULL, &sg);
+ if (error)
+ return -EIO;
+ page++;
+ }
+
+ if (sg_compare_to_buffer(orig_sg, msb->block_buffer, msb->block_size))
+ return -EIO;
+ return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+ u16 pba, u32 lba, struct scatterlist *sg)
+{
+ int error, current_try = 1;
+ BUG_ON(sg->length < msb->page_size);
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (sg_total_len(sg) < msb->block_size) {
+ ms_printk("BUG: write: sg underrrun");
+ return -EINVAL;
+ }
+
+ if (pba == MS_BLOCK_INVALID) {
+ ms_printk(
+ "BUG: write: attempt to write MS_BLOCK_INVALID block");
+ return -EINVAL;
+ }
+
+ if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+ ms_printk("BUG: write: attempt to write beyong end of device");
+ return -EINVAL;
+ }
+
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ ms_printk("BUG: write: lba zone mismatch");
+ return -EINVAL;
+ }
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ ms_printk("BUG: write: attempt to write to boot blocks!");
+ return -EINVAL;
+ }
+
+ while (1) {
+
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->param.param.block_address = cpu_to_be16(pba);
+ msb->param.param.page_address = 0;
+ msb->param.extra.logical_address = cpu_to_be16(lba);
+ msb->param.extra.management_flag = 0xFF;
+ msb->param.extra.overwrite_flag = 0xF8;
+
+ msb->current_sg = sg;
+ msb->sg_offset = 0;
+ error = memstick_do_request_handler(msb->card,
+ h_msb_write_block);
+
+
+ /* Sector we just wrote to is assumed erased since its pba
+ was erased. If it wasn't erased, write will succeed
+ and will just clear the bits that were set in the block
+ thus test that what we written matches what we expect
+ We do trust the blocks that we erased */
+ if (!error && (verify_writes ||
+ !test_bit(pba, msb->erased_blocks_bitmap)))
+ error = msb_verify_block(msb, pba, sg);
+
+ if (!error)
+ break;
+
+ if (current_try > 1 || msb_reset(msb))
+ break;
+
+ ms_printk("write failed, trying to erase the pba %d", pba);
+ error = msb_erase_block(msb, pba);
+ if (error)
+ break;
+
+ current_try++;
+ }
+ return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+ u16 pos;
+ int pba = zone * MS_BLOCKS_IN_ZONE;
+ int i;
+
+ get_random_bytes(&pos, sizeof(pos));
+
+ if (!msb->free_block_count[zone]) {
+ ms_printk("NO free blocks in zone %d to use for write"
+ "(media WORN out) switching to RO mode", zone);
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ pos %= msb->free_block_count[zone];
+
+ dbg_verbose("have %d choices for free block, selected randomally: %d",
+ msb->free_block_count[zone], pos);
+
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba);
+
+ for (i = 0 ; i < pos ; ++i)
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba + 1);
+
+ dbg_verbose("result of free blocks scan: pba %d", pba);
+
+ if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+ ms_printk("BUG: cant get free block");
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ msb_mark_block_used(msb, pba);
+ return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+ struct scatterlist *sg)
+{
+ u16 pba, new_pba;
+ int error, try;
+
+ pba = msb->lba_to_pba_table[lba];
+ dbg_verbose("start of block update at lba %d, pba %d", lba, pba);
+
+ if (pba != MS_BLOCK_INVALID) {
+ dbg_verbose("setting update flag on the block");
+ msb_set_overwrite_flag(msb, pba, 0,
+ 0xFF & ~MEMSTICK_OVERWRITE_UDST);
+ }
+
+ for (try = 0 ; try < 3 ; try++) {
+ new_pba = msb_get_free_block(msb,
+ msb_get_zone_from_lba(lba));
+
+ if (new_pba == MS_BLOCK_INVALID) {
+ error = -EIO;
+ goto out;
+ }
+
+ dbg_verbose("block update: writing updated block to pba %d",
+ new_pba);
+
+ error = msb_write_block(msb, new_pba, lba, sg);
+ if (error == -EBADMSG) {
+ msb_mark_bad(msb, new_pba);
+ continue;
+ }
+
+ if (error)
+ goto out;
+
+ dbg_verbose("block update: erasing the old block");
+ msb_erase_block(msb, pba);
+ msb->lba_to_pba_table[lba] = new_pba;
+ return 0;
+ }
+out:
+ if (error) {
+ ms_printk("block update error after %d tries, "
+ "switching to r/o mode", try);
+ msb->read_only = true;
+ }
+ return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+ p->header.block_id = be16_to_cpu(p->header.block_id);
+ p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+ p->entry.disabled_block.start_addr
+ = be32_to_cpu(p->entry.disabled_block.start_addr);
+ p->entry.disabled_block.data_size
+ = be32_to_cpu(p->entry.disabled_block.data_size);
+ p->entry.cis_idi.start_addr
+ = be32_to_cpu(p->entry.cis_idi.start_addr);
+ p->entry.cis_idi.data_size
+ = be32_to_cpu(p->entry.cis_idi.data_size);
+ p->attr.block_size = be16_to_cpu(p->attr.block_size);
+ p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+ p->attr.number_of_effective_blocks
+ = be16_to_cpu(p->attr.number_of_effective_blocks);
+ p->attr.page_size = be16_to_cpu(p->attr.page_size);
+ p->attr.memory_manufacturer_code
+ = be16_to_cpu(p->attr.memory_manufacturer_code);
+ p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+ p->attr.implemented_capacity
+ = be16_to_cpu(p->attr.implemented_capacity);
+ p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+ p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+ int pba = 0;
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ struct ms_boot_page *page;
+
+ msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+ msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+ msb->boot_block_count = 0;
+
+ dbg_verbose("Start of scan for boot blocks");
+
+ if (!msb->boot_page) {
+ page = kmalloc(sizeof(struct ms_boot_page) * 2, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ msb->boot_page = page;
+ }
+
+ msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+ for (pba = 0 ; pba < MS_BLOCK_MAX_BOOT_ADDR ; pba++) {
+
+ sg_init_one(&sg, page, sizeof(*page));
+ if (msb_read_page(msb, pba, 0, &extra, &sg)) {
+ dbg("boot scan: can't read pba %d", pba);
+ continue;
+ }
+
+ if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+ dbg("managment flag doesn't indicate boot block %d",
+ pba);
+ continue;
+ }
+
+ if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+ dbg("pba at %d doesn' contain boot block ID", pba);
+ continue;
+ }
+
+ msb_fix_boot_page_endianness(page);
+ msb->boot_block_locations[msb->boot_block_count] = pba;
+
+ page++;
+ msb->boot_block_count++;
+
+ if (msb->boot_block_count == 2)
+ break;
+ }
+
+ if (!msb->boot_block_count) {
+ ms_printk("media doesn't contain master page, aborting");
+ return -EIO;
+ }
+
+ dbg_verbose("End of scan for boot blocks");
+ return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+ struct ms_boot_page *boot_block;
+ struct scatterlist sg;
+ struct scatterlist *sg_ptr = &sg;
+ u16 *buffer = NULL;
+
+ int i, error = 0;
+ int data_size, data_offset, page, page_offset, size_to_read;
+ u16 pba;
+
+ BUG_ON(block_nr > 1);
+
+ boot_block = &msb->boot_page[block_nr];
+ pba = msb->boot_block_locations[block_nr];
+
+ if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+ return -EINVAL;
+
+ data_size = boot_block->entry.disabled_block.data_size;
+ data_offset = sizeof(struct ms_boot_page) +
+ boot_block->entry.disabled_block.start_addr;
+ if (!data_size)
+ return 0;
+
+ page = data_offset / msb->page_size;
+ page_offset = data_offset % msb->page_size;
+ size_to_read =
+ DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+ msb->page_size;
+
+ dbg("reading bad block of boot block at pba %d, offset %d len %d",
+ pba, data_offset, data_size);
+
+ buffer = kzalloc(size_to_read, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+
+ /* Read the buffer */
+ sg_init_one(&sg, buffer, size_to_read);
+
+ while (sg_ptr) {
+ error = msb_read_page(msb, pba, page, NULL, sg_ptr);
+ if (error)
+ goto out;
+
+ sg_ptr = sg_advance(sg_ptr, msb->page_size);
+ page++;
+ if (page == msb->pages_in_block) {
+ ms_printk("bad block table extends beyong boot block");
+ break;
+ }
+ }
+
+ /* Process the bad block table */
+ for (i = page_offset ; i < data_size / sizeof(u16) ; i++) {
+
+ u16 bad_block = be16_to_cpu(buffer[i]);
+
+ if (bad_block >= msb->block_count) {
+ dbg("bad block table contains invalid block %d",
+ bad_block);
+ continue;
+ }
+
+ if (test_bit(bad_block, msb->used_blocks_bitmap)) {
+ dbg("duplicate bad block %d in the table",
+ bad_block);
+ continue;
+ }
+
+ dbg("block %d is marked as factory bad", bad_block);
+ msb_mark_block_used(msb, bad_block);
+ }
+out:
+ kfree(buffer);
+ return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+ int i;
+
+ if (msb->ftl_initialized)
+ return 0;
+
+ msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+ msb->logical_block_count = msb->zone_count * 496 - 2;
+
+ msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+
+ msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+
+ msb->lba_to_pba_table =
+ kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+
+ if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+ !msb->erased_blocks_bitmap) {
+ kfree(msb->used_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->erased_blocks_bitmap);
+ return -ENOMEM;
+ }
+
+ for (i = 0 ; i < msb->zone_count ; i++)
+ msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+ memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+ msb->logical_block_count * sizeof(u16));
+
+ dbg("initial FTL tables created. Zone count = %d, "
+ "Logical block count = %d",
+ msb->zone_count, msb->logical_block_count);
+
+ msb->ftl_initialized = true;
+ return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+ u16 pba, lba, other_block;
+ u8 overwrite_flag, managment_flag, other_overwrite_flag;
+ int error;
+ struct ms_extra_data_register extra;
+
+ u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+ if (!overwrite_flags)
+ return -ENOMEM;
+
+ dbg("Start of media scanning");
+
+ for (pba = 0 ; pba < msb->block_count ; pba++) {
+
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ dbg_verbose("pba %05d -> [boot block]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ dbg_verbose("pba %05d -> [factory bad]", pba);
+ continue;
+ }
+
+ error = msb_read_oob(msb, pba, 0, &extra);
+
+ /* can't trust the page if we can't read the oob */
+ if (error == -EBADMSG) {
+ ms_printk(
+ "oob of pba %d damaged, will try to erase it", pba);
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ } else if (error)
+ return error;
+
+ lba = be16_to_cpu(extra.logical_address);
+ managment_flag = extra.management_flag;
+ overwrite_flag = extra.overwrite_flag;
+ overwrite_flags[pba] = overwrite_flag;
+
+ /* Skip bad blocks */
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+ dbg("pba %05d -> [BAD]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+
+ /* Skip system/drm blocks */
+ if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
+ MEMSTICK_MANAGMENT_FLAG_NORMAL) {
+ dbg("pba %05d -> [reserved managment flag %02x]",
+ pba, managment_flag);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+
+ /* Erase temporary tables */
+ if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+ dbg("pba %05d -> [temp table] - will erase", pba);
+
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ if (lba == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [free]", pba);
+ continue;
+ }
+
+
+ msb_mark_block_used(msb, pba);
+
+ /* Block has LBA not according to zoning*/
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ ms_printk("pba %05d -> [bad lba %05d] - will erase",
+ pba, lba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ /* No collisions - great */
+ if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ other_block = msb->lba_to_pba_table[lba];
+ other_overwrite_flag = overwrite_flags[other_block];
+
+ ms_printk("Collision between pba %d and pba %d",
+ pba, other_block);
+
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ ms_printk("pba %d is marked as stable, use it", pba);
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ ms_printk("pba %d is marked as stable, use it",
+ other_block);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ ms_printk("collision between blocks %d and %d with"
+ " without stable flag set on both, erasing pba %d",
+ pba, other_block, other_block);
+
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ }
+
+ dbg("End of media scanning");
+ kfree(overwrite_flags);
+ return 0;
+}
+
+static void msb_cache_flush_timer(unsigned long data)
+{
+ struct msb_data *msb = (struct msb_data *)data;
+ msb->need_flush_cache = true;
+ wake_up_process(msb->io_thread);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return;
+
+ dbg_verbose("Discarding the write cache");
+ msb->cache_block_lba = MS_BLOCK_INVALID;
+ bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+ del_timer(&msb->cache_flush_timer);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+ setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
+ (unsigned long)msb);
+
+ if (!msb->cache)
+ msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->cache)
+ return -ENOMEM;
+
+ msb_cache_discard(msb);
+ return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ int page, offset, error;
+ u16 pba, lba;
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return 0;
+
+ lba = msb->cache_block_lba;
+ pba = msb->lba_to_pba_table[lba];
+
+ dbg_verbose("Flusing the write cache of pba %d (LBA %d)",
+ pba, msb->cache_block_lba);
+
+ /* Read all missing pages in cache */
+ for (page = 0 ; page < msb->pages_in_block ; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ offset = page * msb->page_size;
+ sg_init_one(&sg, msb->cache + offset , msb->page_size);
+
+
+ dbg_verbose("reading non-present sector %d of cache block %d",
+ page, lba);
+ error = msb_read_page(msb, pba, page, &extra, &sg);
+
+ /* Bad pages are copied with 00 page status */
+ if (error == -EBADMSG) {
+ ms_printk("read error on sector %d, contents probably"
+ " damaged", page);
+ continue;
+ }
+
+ if (error)
+ return error;
+
+ if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+ MEMSTICK_OV_PG_NORMAL) {
+ dbg("page %d is marked as bad", page);
+ continue;
+ }
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ }
+
+ /* Write the cache now */
+ sg_init_one(&sg, msb->cache , msb->block_size);
+ error = msb_update_block(msb, msb->cache_block_lba, &sg);
+ pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+ /* Mark invalid pages */
+ if (!error) {
+ for (page = 0 ; page < msb->pages_in_block ; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ dbg("marking page %d as containing damaged data",
+ page);
+ msb_set_overwrite_flag(msb,
+ pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+ }
+ }
+
+ msb_cache_discard(msb);
+ return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+ int page, bool add_to_cache_only, struct scatterlist *sg)
+{
+ int error;
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+ lba != msb->cache_block_lba)
+ if (add_to_cache_only)
+ return 0;
+
+ /* If we need to write different block */
+ if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+ lba != msb->cache_block_lba) {
+ dbg_verbose("first flush the cache");
+ error = msb_cache_flush(msb);
+ if (error)
+ return error;
+ }
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID) {
+ msb->cache_block_lba = lba;
+ mod_timer(&msb->cache_flush_timer,
+ jiffies + msecs_to_jiffies(cache_flush_timeout));
+ }
+
+ dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+ sg_copy_to_buffer(sg, 1, msb->cache + page * msb->page_size,
+ msb->page_size);
+ set_bit(page, &msb->valid_cache_bitmap);
+ return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg)
+{
+ int pba = msb->lba_to_pba_table[lba];
+ int error = 0;
+
+ if (lba == msb->cache_block_lba &&
+ test_bit(page, &msb->valid_cache_bitmap)) {
+
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+ lba, pba, page);
+ sg_copy_from_buffer(sg, 1,
+ msb->cache + msb->page_size * page,
+ msb->page_size);
+ } else {
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+ lba, pba, page);
+
+ error = msb_read_page(msb, pba, page, NULL, sg);
+ if (error)
+ return error;
+
+ msb_cache_write(msb, lba, page, true, sg);
+ }
+ return error;
+}
+
+
+static const struct chs_entry chs_table[] = {
+ { 4, 16, 247, 2 },
+ { 8, 16, 495, 2 },
+ { 16, 16, 495, 4 },
+ { 32, 16, 991, 4 },
+ { 64, 16, 991, 8 },
+ {128, 16, 991, 16 },
+ { 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ struct ms_boot_page *boot_block;
+ int error = 0, i, raw_size_in_megs;
+
+ card->caps = 0;
+
+ if (card->id.class >= MEMSTICK_CLASS_ROM &&
+ card->id.class <= MEMSTICK_CLASS_ROM)
+ msb->read_only = true;
+
+ error = msb_reset(msb);
+ if (error)
+ return error;
+
+ msb->page_size = sizeof(struct ms_boot_page);
+
+ /* Read the boot page */
+ error = msb_read_boot_blocks(msb);
+ if (error)
+ return -EIO;
+
+ boot_block = &msb->boot_page[0];
+
+ /* Save intersting attributes from boot page */
+ msb->block_count = boot_block->attr.number_of_blocks;
+ msb->page_size = boot_block->attr.page_size;
+
+ msb->pages_in_block = boot_block->attr.block_size * 2;
+ msb->block_size = msb->page_size * msb->pages_in_block;
+
+ if (msb->page_size > PAGE_SIZE) {
+ /* this is nonsensional, so don't bother */
+ dbg("device page %d size isn't supported", msb->page_size);
+ return -EINVAL;
+ }
+
+ msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->block_buffer)
+ return -ENOMEM;
+
+
+ raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+ for (i = 0 ; chs_table[i].size ; i++) {
+
+ if (chs_table[i].size != raw_size_in_megs)
+ continue;
+
+ msb->geometry.cylinders = chs_table[i].cyl;
+ msb->geometry.heads = chs_table[i].head;
+ msb->geometry.sectors = chs_table[i].sec;
+ break;
+ }
+
+
+ if (boot_block->attr.transfer_supporting == 1)
+ card->caps |= MEMSTICK_CAP_PAR4;
+
+ if (boot_block->attr.device_type & 0x03)
+ msb->read_only = true;
+
+ dbg("Total block count = %d", msb->block_count);
+ dbg("Each block consists of %d pages", msb->pages_in_block);
+ dbg("Page size = %d bytes", msb->page_size);
+ dbg("Parallel mode supported: %d", !!(card->caps & MEMSTICK_CAP_PAR4));
+ dbg("Read only: %d", msb->read_only);
+
+ /* Now we can switch the interface */
+ if (host->caps & card->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+
+
+ error = msb_cache_init(msb);
+ if (error)
+ return error;
+
+ error = msb_ftl_initialize(msb);
+ if (error)
+ return error;
+
+
+ /* Read the bad block table */
+ error = msb_read_bad_block_table(msb, 0);
+
+ if (error && error != -ENOMEM) {
+ dbg("failed to read bad block table from primary boot block,"
+ " trying from backup");
+ error = msb_read_bad_block_table(msb, 1);
+ }
+
+ if (error)
+ return error;
+
+ /* *drum roll* Scan the media */
+ error = msb_ftl_scan(msb);
+ if (error) {
+ ms_printk("Scan of media failed");
+ return error;
+ }
+
+ return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int *sucessfuly_written)
+{
+ int error = 0;
+ *sucessfuly_written = 0;
+
+ while (sg) {
+ if (page == 0 && sg_total_len(sg) >= msb->block_size) {
+
+ if (msb->cache_block_lba == lba)
+ msb_cache_discard(msb);
+
+ dbg_verbose("Writing whole lba %d", lba);
+ error = msb_update_block(msb, lba, sg);
+ if (error)
+ return error;
+
+ sg = sg_advance(sg, msb->block_size);
+ *sucessfuly_written += msb->block_size;
+ lba++;
+ continue;
+ }
+
+ error = msb_cache_write(msb, lba, page, false, sg);
+ if (error)
+ return error;
+
+ sg = sg_advance(sg, msb->page_size);
+ *sucessfuly_written += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int *sucessfuly_read)
+{
+ int error = 0;
+ *sucessfuly_read = 0;
+
+ while (sg) {
+
+ error = msb_cache_read(msb, lba, page, sg);
+ if (error)
+ return error;
+
+ sg = sg_advance(sg, msb->page_size);
+ *sucessfuly_read += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static int msb_io_thread(void *data)
+{
+ struct msb_data *msb = data;
+ int page, error, len;
+ sector_t lba;
+ unsigned long flags;
+
+ dbg("IO: thread started");
+
+ while (1) {
+
+ if (kthread_should_stop()) {
+ if (msb->req)
+ blk_requeue_request(msb->queue, msb->req);
+ break;
+ }
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+
+ if (msb->need_flush_cache) {
+ msb->need_flush_cache = false;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ msb_cache_flush(msb);
+ continue;
+ }
+
+ if (!msb->req) {
+ msb->req = blk_fetch_request(msb->queue);
+
+ if (!msb->req) {
+ dbg_verbose("IO: no more requests, sleeping");
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ schedule();
+ dbg_verbose("IO: thread woken up");
+ continue;
+ }
+ }
+
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* If card was removed meanwhile */
+ if (!msb->req)
+ continue;
+
+ /* process the request */
+ dbg_verbose("IO: thread processing new request");
+ blk_rq_map_sg(msb->queue, msb->req, msb->req_sg);
+
+ lba = blk_rq_pos(msb->req);
+
+ sector_div(lba, msb->page_size / 512);
+ page = do_div(lba, msb->pages_in_block);
+
+
+ mutex_lock(&msb->card->host->lock);
+ if (rq_data_dir(msb->req) == READ)
+ error = msb_do_read_request(
+ msb, lba, page, msb->req_sg, &len);
+ else
+ error = msb_do_write_request(
+ msb, lba, page, msb->req_sg, &len);
+
+ mutex_unlock(&msb->card->host->lock);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+
+ if (len)
+ if (!__blk_end_request(msb->req, 0, len))
+ msb->req = NULL;
+
+ if (error && msb->req) {
+ dbg_verbose("IO: ending one sector "
+ "of the request with error");
+ if (!__blk_end_request(msb->req, error, msb->page_size))
+ msb->req = NULL;
+ }
+
+ if (msb->req)
+ dbg_verbose("IO: request still pending");
+
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ }
+ return 0;
+}
+
+static DEFINE_IDR(msb_disk_idr);
+static DEFINE_MUTEX(msb_disk_lock);
+
+static int msb_bd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct msb_data *msb = disk->private_data;
+
+ dbg_verbose("block device open");
+
+ mutex_lock(&msb_disk_lock);
+
+ if (msb && msb->card)
+ msb->usage_count++;
+
+ mutex_unlock(&msb_disk_lock);
+ return 0;
+}
+
+
+static void msb_data_clear(struct msb_data *msb)
+{
+ kfree(msb->boot_page);
+ kfree(msb->used_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->cache);
+ msb->card = NULL;
+}
+
+
+static int msb_disk_release(struct gendisk *disk)
+{
+ struct msb_data *msb = disk->private_data;
+ int disk_id = MINOR(disk_devt(disk)) >> MS_BLOCK_PART_SHIFT;
+
+ dbg_verbose("block device release");
+
+ mutex_lock(&msb_disk_lock);
+
+ if (msb) {
+ if (msb->usage_count)
+ msb->usage_count--;
+
+ if (!msb->usage_count) {
+ kfree(msb);
+ disk->private_data = NULL;
+ idr_remove(&msb_disk_idr, disk_id);
+ put_disk(disk);
+ }
+ }
+ mutex_unlock(&msb_disk_lock);
+ return 0;
+}
+
+static int msb_bd_release(struct gendisk *disk, fmode_t mode)
+{
+ return msb_disk_release(disk);
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ struct msb_data *msb = bdev->bd_disk->private_data;
+ *geo = msb->geometry;
+ return 0;
+}
+
+static int msb_prepare_req(struct request_queue *q, struct request *req)
+{
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ blk_dump_rq_flags(req, "MS unsupported request");
+ return BLKPREP_KILL;
+ }
+ req->cmd_flags |= REQ_DONTPREP;
+ return BLKPREP_OK;
+}
+
+static void msb_submit_req(struct request_queue *q)
+{
+ struct memstick_dev *card = q->queuedata;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct request *req = NULL;
+
+ dbg_verbose("Submit request");
+
+ if (msb->card_dead) {
+ dbg("Refusing requests on removed card");
+
+ WARN_ON(msb->io_thread);
+
+
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ return;
+ }
+
+ if (msb->req)
+ return;
+
+ if (msb->io_thread)
+ wake_up_process(msb->io_thread);
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ return (msb->card_dead == 0);
+}
+
+
+static void msb_stop(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+ struct task_struct *io_thread;
+
+ dbg("Stopping all msblock IO");
+
+ /* Just stop the IO thread.
+ Be carefull not to race against submit_request
+ If it is called, all pending requests will be processed by
+ the IO thread as soon as msb_start is called */
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_stop_queue(msb->queue);
+ io_thread = msb->io_thread;
+ msb->io_thread = NULL;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ del_timer_sync(&msb->cache_flush_timer);
+
+ if (io_thread)
+ kthread_stop(io_thread);
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+ int disk_id = MINOR(disk_devt(msb->disk)) >> MS_BLOCK_PART_SHIFT;
+
+ dbg("Resuming IO from msblock");
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ if (msb->io_thread || msb->card_dead) {
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+
+ /* Kick cache flush anyway, its harmeless */
+ msb->need_flush_cache = true;
+
+ msb->io_thread = kthread_run(msb_io_thread, msb, "kmemstick_block%d",
+ disk_id);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+}
+
+static const struct block_device_operations msb_bdops = {
+ .open = msb_bd_open,
+ .release = msb_bd_release,
+ .getgeo = msb_bd_getgeo,
+ .owner = THIS_MODULE
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ int rc, disk_id;
+ u64 limit = BLK_BOUNCE_HIGH;
+ unsigned long capacity;
+
+ if (host->dev.dma_mask && *(host->dev.dma_mask))
+ limit = *(host->dev.dma_mask);
+
+ if (!idr_pre_get(&msb_disk_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&msb_disk_lock);
+ rc = idr_get_new(&msb_disk_idr, card, &disk_id);
+ mutex_unlock(&msb_disk_lock);
+
+ if (rc)
+ return rc;
+
+ if ((disk_id << MS_BLOCK_PART_SHIFT) > 255) {
+ rc = -ENOSPC;
+ goto out_release_id;
+ }
+
+ msb->disk = alloc_disk(1 << MS_BLOCK_PART_SHIFT);
+ if (!msb->disk) {
+ rc = -ENOMEM;
+ goto out_release_id;
+ }
+
+ msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
+ if (!msb->queue) {
+ rc = -ENOMEM;
+ goto out_put_disk;
+ }
+
+ msb->queue->queuedata = card;
+ blk_queue_prep_rq(msb->queue, msb_prepare_req);
+
+ blk_queue_bounce_limit(msb->queue, limit);
+ blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+ blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+ blk_queue_max_segment_size(msb->queue,
+ MS_BLOCK_MAX_PAGES * msb->page_size);
+ msb->disk->major = major;
+ msb->disk->first_minor = disk_id << MS_BLOCK_PART_SHIFT;
+ msb->disk->fops = &msb_bdops;
+ msb->usage_count = 1;
+ msb->disk->private_data = msb;
+ msb->disk->queue = msb->queue;
+ msb->disk->driverfs_dev = &card->dev;
+
+ sprintf(msb->disk->disk_name, "msblk%d", disk_id);
+
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+ capacity = msb->pages_in_block * msb->logical_block_count;
+ capacity *= (msb->page_size / 512);
+
+ set_capacity(msb->disk, capacity);
+ dbg("set total disk size to %lu sectors", capacity);
+
+
+ if (msb->read_only)
+ set_disk_ro(msb->disk, 1);
+
+
+ msb_start(card);
+
+ mutex_unlock(&host->lock);
+ add_disk(msb->disk);
+ mutex_unlock(&host->lock);
+ dbg("Disk added");
+ return 0;
+
+out_put_disk:
+ put_disk(msb->disk);
+out_release_id:
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, disk_id);
+ mutex_unlock(&msb_disk_lock);
+ return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+ struct msb_data *msb;
+ int rc = 0;
+
+ msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!msb)
+ return -ENOMEM;
+ memstick_set_drvdata(card, msb);
+ msb->card = card;
+ spin_lock_init(&msb->q_lock);
+
+ rc = msb_init_card(card);
+ if (rc)
+ goto out_free;
+
+ rc = msb_init_disk(card);
+ if (!rc) {
+ card->check = msb_check_card;
+ card->stop = msb_stop;
+ card->start = msb_start;
+ return 0;
+ }
+
+out_free:
+ memstick_set_drvdata(card, NULL);
+ msb_data_clear(msb);
+ kfree(msb);
+ return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ /* Assumption: that msb_stop was already called by core */
+ WARN_ON(msb->io_thread);
+
+ mutex_unlock(&card->host->lock);
+ if (msb->io_thread)
+ msb_stop(card);
+ mutex_lock(&card->host->lock);
+
+ dbg("Removing the disk device");
+
+ /* Take care of unhandled + new requests from now on */
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->card_dead = true;
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* Remove the disk */
+ del_gendisk(msb->disk);
+ blk_cleanup_queue(msb->queue);
+ msb->queue = NULL;
+
+ mutex_lock(&msb_disk_lock);
+ msb_data_clear(msb);
+ mutex_unlock(&msb_disk_lock);
+
+ msb_disk_release(msb->disk);
+ memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+ msb_stop(card);
+ return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct msb_data *new_msb = NULL;
+ bool card_dead = true;
+
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+ msb->card_dead = true;
+ return 0;
+#endif
+
+ mutex_lock(&card->host->lock);
+
+ new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!new_msb)
+ goto out;
+
+ new_msb->card = card;
+ memstick_set_drvdata(card, new_msb);
+
+
+ if (msb_init_card(card))
+ goto out;
+
+ if (msb->block_size != new_msb->block_size)
+ goto out;
+
+ if (memcmp(msb->boot_page, new_msb->boot_page,
+ sizeof(struct ms_boot_page)))
+ goto out;
+
+ if (msb->logical_block_count != new_msb->logical_block_count ||
+ memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+ msb->logical_block_count))
+ goto out;
+
+
+ if (msb->block_count != new_msb->block_count ||
+ memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count / 8))
+ goto out;
+
+ card_dead = false;
+
+out:
+
+ if (card_dead)
+ dbg("Card was removed/replaced during suspend");
+
+ msb->card_dead = card_dead;
+ memstick_set_drvdata(card, msb);
+
+ if (new_msb) {
+ msb_data_clear(new_msb);
+ kfree(new_msb);
+ }
+
+ msb_start(card);
+
+ mutex_unlock(&card->host->lock);
+ return 0;
+}
+
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_FLASH},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_ROM},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_RO},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_WP},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+ MEMSTICK_CLASS_DUO},
+ {}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = msb_id_tbl,
+ .probe = msb_probe,
+ .remove = msb_remove,
+ .suspend = msb_suspend,
+ .resume = msb_resume
+};
+
+static int __init msb_init(void)
+{
+ int rc = -ENOMEM;
+
+ rc = register_blkdev(major, DRIVER_NAME);
+ if (rc < 0) {
+ printk(KERN_ERR DRIVER_NAME ": failed to register "
+ "major %d, error %d\n", major, rc);
+ return rc;
+ }
+ if (!major)
+ major = rc;
+
+ rc = memstick_register_driver(&msb_driver);
+ if (rc)
+ unregister_blkdev(major, DRIVER_NAME);
+ return rc;
+}
+
+static void __exit msb_exit(void)
+{
+ memstick_unregister_driver(&msb_driver);
+ unregister_blkdev(major, DRIVER_NAME);
+ idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(major, int, S_IRUGO);
+MODULE_PARM_DESC(major, "Major to use for block device (default auto)");
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+ "Cache flush timeout in msec (1000 default)");
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-3)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Dubov");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
new file mode 100644
index 0000000..732d89c
--- /dev/null
+++ b/drivers/memstick/core/ms_block.h
@@ -0,0 +1,236 @@
+/*
+ * ms_block.h - Sony MemoryStick (legacy) storage support
+ *
+ * Copyright (C) 2007 Alex Dubov <oakad@...oo.com>
+ * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@...il.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Special thanks to Carlos Corbacho for providing various MemoryStick cards
+ * that made this driver possible.
+ *
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS 32
+#define MS_BLOCK_MAX_PAGES ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID 0x0001
+#define MS_BLOCK_INVALID 0xffff
+#define MS_MAX_ZONES 16
+#define MS_BLOCKS_IN_ZONE 512
+
+#define MS_BLOCK_MAP_LINE_SZ 16
+#define MS_BLOCK_PART_SHIFT 3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+ MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+ MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | \
+ MEMSTICK_OVERWRITE_PGST0 | \
+ MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+ (MEMSTICK_MANAGEMENT_SYSFLG | \
+ MEMSTICK_MANAGEMENT_SCMS1 | \
+ MEMSTICK_MANAGEMENT_SCMS0) \
+
+struct ms_boot_header {
+ unsigned short block_id;
+ unsigned short format_reserved;
+ unsigned char reserved0[184];
+ unsigned char data_entry;
+ unsigned char reserved1[179];
+} __attribute__((packed));
+
+
+struct ms_system_item {
+ unsigned int start_addr;
+ unsigned int data_size;
+ unsigned char data_type_id;
+ unsigned char reserved[3];
+} __attribute__((packed));
+
+struct ms_system_entry {
+ struct ms_system_item disabled_block;
+ struct ms_system_item cis_idi;
+ unsigned char reserved[24];
+} __attribute__((packed));
+
+struct ms_boot_attr_info {
+ unsigned char memorystick_class;
+ unsigned char format_unique_value1;
+ unsigned short block_size;
+ unsigned short number_of_blocks;
+ unsigned short number_of_effective_blocks;
+ unsigned short page_size;
+ unsigned char extra_data_size;
+ unsigned char format_unique_value2;
+ unsigned char assembly_time[8];
+ unsigned char format_unique_value3;
+ unsigned char serial_number[3];
+ unsigned char assembly_manufacturer_code;
+ unsigned char assembly_model_code[3];
+ unsigned short memory_manufacturer_code;
+ unsigned short memory_device_code;
+ unsigned short implemented_capacity;
+ unsigned char format_unique_value4[2];
+ unsigned char vcc;
+ unsigned char vpp;
+ unsigned short controller_number;
+ unsigned short controller_function;
+ unsigned char reserved0[9];
+ unsigned char transfer_supporting;
+ unsigned short format_unique_value5;
+ unsigned char format_type;
+ unsigned char memorystick_application;
+ unsigned char device_type;
+ unsigned char reserved1[22];
+ unsigned char format_uniqure_value6[2];
+ unsigned char reserved2[15];
+} __attribute__((packed));
+
+struct ms_cis_idi {
+ unsigned short general_config;
+ unsigned short logical_cylinders;
+ unsigned short reserved0;
+ unsigned short logical_heads;
+ unsigned short track_size;
+ unsigned short page_size;
+ unsigned short pages_per_track;
+ unsigned short msw;
+ unsigned short lsw;
+ unsigned short reserved1;
+ unsigned char serial_number[20];
+ unsigned short buffer_type;
+ unsigned short buffer_size_increments;
+ unsigned short long_command_ecc;
+ unsigned char firmware_version[28];
+ unsigned char model_name[18];
+ unsigned short reserved2[5];
+ unsigned short pio_mode_number;
+ unsigned short dma_mode_number;
+ unsigned short field_validity;
+ unsigned short current_logical_cylinders;
+ unsigned short current_logical_heads;
+ unsigned short current_pages_per_track;
+ unsigned int current_page_capacity;
+ unsigned short mutiple_page_setting;
+ unsigned int addressable_pages;
+ unsigned short single_word_dma;
+ unsigned short multi_word_dma;
+ unsigned char reserved3[128];
+} __attribute__((packed));
+
+
+struct ms_boot_page {
+ struct ms_boot_header header;
+ struct ms_system_entry entry;
+ struct ms_boot_attr_info attr;
+} __attribute__((packed));
+
+
+
+struct ms_param_extra {
+ struct ms_param_register param;
+ struct ms_extra_data_register extra;
+} __attribute__((packed));
+
+
+struct msb_data {
+ unsigned int usage_count;
+ struct memstick_dev *card;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ spinlock_t q_lock;
+ struct hd_geometry geometry;
+ struct attribute_group attr_group;
+ struct request *req;
+
+ /* IO */
+ struct task_struct *io_thread;
+ bool card_dead;
+
+ /* Media properties */
+ struct ms_boot_page *boot_page;
+ u16 boot_block_locations[2];
+ int boot_block_count;
+
+ bool read_only;
+ unsigned short page_size;
+ int block_size;
+ int pages_in_block;
+ int zone_count;
+ int block_count;
+ int logical_block_count;
+
+ /* FTL tables */
+ unsigned long *used_blocks_bitmap;
+ unsigned long *erased_blocks_bitmap;
+ u16 *lba_to_pba_table;
+ int free_block_count[MS_MAX_ZONES];
+ bool ftl_initialized;
+
+ /* Cache */
+ unsigned char *cache;
+ unsigned long valid_cache_bitmap;
+ int cache_block_lba;
+ bool need_flush_cache;
+ struct timer_list cache_flush_timer;
+
+
+ struct scatterlist req_sg[MS_BLOCK_MAX_SEGS+1];
+ unsigned char *block_buffer;
+
+ /* handler's local data */
+ u8 command_value;
+ struct scatterlist *current_sg;
+ int sg_offset;
+ struct ms_param_extra param;
+ struct ms_register out_regs;
+ int current_page;
+};
+
+
+struct chs_entry {
+ unsigned long size;
+ unsigned char sec;
+ unsigned short cyl;
+ unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb);
+
+
+#define DRIVER_NAME "ms_block"
+
+#define ms_printk(format, ...) \
+ printk(KERN_INFO DRIVER_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG DRIVER_NAME \
+ ": " format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+
+#endif
--
1.7.0.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists