[<prev] [next>] [day] [month] [year] [list]
Message-ID: <4BC46BF6.5040408@vlnb.net>
Date: Tue, 13 Apr 2010 17:04:54 +0400
From: Vladislav Bolkhovitin <vst@...b.net>
To: linux-scsi@...r.kernel.org
CC: linux-kernel@...r.kernel.org,
scst-devel <scst-devel@...ts.sourceforge.net>,
James Bottomley <James.Bottomley@...senPartnership.com>,
Andrew Morton <akpm@...ux-foundation.org>,
FUJITA Tomonori <fujita.tomonori@....ntt.co.jp>,
Mike Christie <michaelc@...wisc.edu>,
Jeff Garzik <jeff@...zik.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Vu Pham <vuhuong@...lanox.com>,
Bart Van Assche <bart.vanassche@...il.com>,
James Smart <James.Smart@...lex.Com>,
Joe Eykholt <jeykholt@...co.com>, Andy Yan <ayan@...vell.com>,
linux-driver@...gic.com
Subject: Re: [PATCH][RFC 3/12/1/5] SCST core's scst_main.c
This patch contains file scst_main.c.
Signed-off-by: Vladislav Bolkhovitin <vst@...b.net>
---
scst_main.c | 2047 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 2047 insertions(+)
diff -uprN orig/linux-2.6.33/drivers/scst/scst_main.c linux-2.6.33/drivers/scst/scst_main.c
--- orig/linux-2.6.33/drivers/scst/scst_main.c
+++ linux-2.6.33/drivers/scst/scst_main.c
@@ -0,0 +1,2047 @@
+/*
+ * scst_main.c
+ *
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@...b.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/kthread.h>
+
+#include "scst.h"
+#include "scst_priv.h"
+#include "scst_mem.h"
+
+#if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
+#warning "HIGHMEM kernel configurations are fully supported, but not\
+ recommended for performance reasons. Consider changing VMSPLIT\
+ option or use a 64-bit configuration instead. See README file for\
+ details."
+#endif
+
+/**
+ ** SCST global variables. They are all uninitialized to have their layout in
+ ** memory be exactly as specified. Otherwise compiler puts zero-initialized
+ ** variable separately from nonzero-initialized ones.
+ **/
+
+/*
+ * Main SCST mutex. All targets, devices and dev_types management is done
+ * under this mutex.
+ *
+ * It must NOT be used in any works (schedule_work(), etc.), because
+ * otherwise a deadlock (double lock, actually) is possible, e.g., with
+ * scst_user detach_tgt(), which is called under scst_mutex and calls
+ * flush_scheduled_work().
+ */
+struct mutex scst_mutex;
+EXPORT_SYMBOL_GPL(scst_mutex);
+
+ /* All 3 protected by scst_mutex */
+struct list_head scst_template_list;
+struct list_head scst_dev_list;
+struct list_head scst_dev_type_list;
+
+spinlock_t scst_main_lock;
+
+static struct kmem_cache *scst_mgmt_cachep;
+mempool_t *scst_mgmt_mempool;
+static struct kmem_cache *scst_mgmt_stub_cachep;
+mempool_t *scst_mgmt_stub_mempool;
+static struct kmem_cache *scst_ua_cachep;
+mempool_t *scst_ua_mempool;
+static struct kmem_cache *scst_sense_cachep;
+mempool_t *scst_sense_mempool;
+static struct kmem_cache *scst_aen_cachep;
+mempool_t *scst_aen_mempool;
+struct kmem_cache *scst_tgtd_cachep;
+struct kmem_cache *scst_sess_cachep;
+struct kmem_cache *scst_acgd_cachep;
+
+unsigned int scst_setup_id;
+
+spinlock_t scst_init_lock;
+wait_queue_head_t scst_init_cmd_list_waitQ;
+struct list_head scst_init_cmd_list;
+unsigned int scst_init_poll_cnt;
+
+struct kmem_cache *scst_cmd_cachep;
+
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+unsigned long scst_trace_flag;
+#endif
+
+unsigned long scst_flags;
+atomic_t scst_cmd_count;
+
+struct scst_cmd_threads scst_main_cmd_threads;
+
+struct scst_tasklet scst_tasklets[NR_CPUS];
+
+spinlock_t scst_mcmd_lock;
+struct list_head scst_active_mgmt_cmd_list;
+struct list_head scst_delayed_mgmt_cmd_list;
+wait_queue_head_t scst_mgmt_cmd_list_waitQ;
+
+wait_queue_head_t scst_mgmt_waitQ;
+spinlock_t scst_mgmt_lock;
+struct list_head scst_sess_init_list;
+struct list_head scst_sess_shut_list;
+
+wait_queue_head_t scst_dev_cmd_waitQ;
+
+static struct mutex scst_suspend_mutex;
+/* protected by scst_suspend_mutex */
+static struct list_head scst_cmd_threads_list;
+
+int scst_threads;
+static struct task_struct *scst_init_cmd_thread;
+static struct task_struct *scst_mgmt_thread;
+static struct task_struct *scst_mgmt_cmd_thread;
+
+static int suspend_count;
+
+static int scst_virt_dev_last_id; /* protected by scst_mutex */
+
+static unsigned int scst_max_cmd_mem;
+unsigned int scst_max_dev_cmd_mem;
+
+module_param_named(scst_threads, scst_threads, int, 0);
+MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
+
+module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
+MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
+ "all SCSI commands of all devices at any given time in MB");
+
+module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
+MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
+ "by all SCSI commands of a device at any given time in MB");
+
+struct scst_dev_type scst_null_devtype = {
+ .name = "none",
+};
+
+static void __scst_resume_activity(void);
+
+/**
+ * __scst_register_target_template() - register target template.
+ * @vtt: target template
+ * @version: SCST_INTERFACE_VERSION version string to ensure that
+ * SCST core and the target driver use the same version of
+ * the SCST interface
+ *
+ * Description:
+ * Registers a target template and returns 0 on success or appropriate
+ * error code otherwise.
+ *
+ * Note: *vtt must be static!
+ */
+int __scst_register_target_template(struct scst_tgt_template *vtt,
+ const char *version)
+{
+ int res = 0;
+ struct scst_tgt_template *t;
+ static DEFINE_MUTEX(m);
+
+ INIT_LIST_HEAD(&vtt->tgt_list);
+
+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
+ PRINT_ERROR("Incorrect version of target %s", vtt->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ if (!vtt->detect) {
+ PRINT_ERROR("Target driver %s must have "
+ "detect() method.", vtt->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ if (!vtt->release) {
+ PRINT_ERROR("Target driver %s must have "
+ "release() method.", vtt->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ if (!vtt->xmit_response) {
+ PRINT_ERROR("Target driver %s must have "
+ "xmit_response() method.", vtt->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ if (vtt->threads_num < 0) {
+ PRINT_ERROR("Wrong threads_num value %d for "
+ "target \"%s\"", vtt->threads_num,
+ vtt->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ if (!vtt->enable_target || !vtt->is_target_enabled) {
+ PRINT_WARNING("Target driver %s doesn't have enable_target() "
+ "and/or is_target_enabled() method(s). This is unsafe "
+ "and can lead that initiators connected on the "
+ "initialization time can see an unexpected set of "
+ "devices or no devices at all!", vtt->name);
+ }
+
+ if (((vtt->add_target != NULL) && (vtt->del_target == NULL)) ||
+ ((vtt->add_target == NULL) && (vtt->del_target != NULL))) {
+ PRINT_ERROR("Target driver %s must either define both "
+ "add_target() and del_target(), or none.", vtt->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ res = scst_create_tgtt_sysfs(vtt);
+ if (res)
+ goto out_sysfs_err;
+
+ if (vtt->rdy_to_xfer == NULL)
+ vtt->rdy_to_xfer_atomic = 1;
+
+ if (mutex_lock_interruptible(&m) != 0)
+ goto out_sysfs_err;
+
+ if (mutex_lock_interruptible(&scst_mutex) != 0)
+ goto out_m_err;
+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
+ if (strcmp(t->name, vtt->name) == 0) {
+ PRINT_ERROR("Target driver %s already registered",
+ vtt->name);
+ mutex_unlock(&scst_mutex);
+ goto out_m_err;
+ }
+ }
+ mutex_unlock(&scst_mutex);
+
+ TRACE_DBG("%s", "Calling target driver's detect()");
+ res = vtt->detect(vtt);
+ TRACE_DBG("Target driver's detect() returned %d", res);
+ if (res < 0) {
+ PRINT_ERROR("%s", "The detect() routine failed");
+ res = -EINVAL;
+ goto out_m_err;
+ }
+
+ mutex_lock(&scst_mutex);
+ list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
+ mutex_unlock(&scst_mutex);
+
+ res = 0;
+
+ PRINT_INFO("Target template %s registered successfully", vtt->name);
+
+ mutex_unlock(&m);
+
+out:
+ return res;
+
+out_m_err:
+ mutex_unlock(&m);
+
+out_sysfs_err:
+ scst_tgtt_sysfs_put(vtt);
+
+out_err:
+ PRINT_ERROR("Failed to register target template %s", vtt->name);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(__scst_register_target_template);
+
+static int scst_check_non_gpl_target_template(struct scst_tgt_template *vtt)
+{
+ int res;
+
+ if (vtt->task_mgmt_affected_cmds_done || vtt->threads_num) {
+ PRINT_ERROR("Not allowed functionality in non-GPL version for "
+ "target template %s", vtt->name);
+ res = -EPERM;
+ goto out;
+ }
+
+ res = 0;
+
+out:
+ return res;
+}
+
+/**
+ * __scst_register_target_template_non_gpl() - register target template,
+ * non-GPL version
+ * @vtt: target template
+ * @version: SCST_INTERFACE_VERSION version string to ensure that
+ * SCST core and the target driver use the same version of
+ * the SCST interface
+ *
+ * Description:
+ * Registers a target template and returns 0 on success or appropriate
+ * error code otherwise.
+ *
+ * Note: *vtt must be static!
+ */
+int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
+ const char *version)
+{
+ int res;
+
+ res = scst_check_non_gpl_target_template(vtt);
+ if (res != 0)
+ goto out;
+
+ res = __scst_register_target_template(vtt, version);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL(__scst_register_target_template_non_gpl);
+
+/**
+ * scst_unregister_target_template() - unregister target template
+ */
+void scst_unregister_target_template(struct scst_tgt_template *vtt)
+{
+ struct scst_tgt *tgt;
+ struct scst_tgt_template *t;
+ int found = 0;
+
+ mutex_lock(&scst_mutex);
+
+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
+ if (strcmp(t->name, vtt->name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ PRINT_ERROR("Target driver %s isn't registered", vtt->name);
+ goto out_err_up;
+ }
+
+restart:
+ list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
+ mutex_unlock(&scst_mutex);
+ scst_unregister_target(tgt);
+ mutex_lock(&scst_mutex);
+ goto restart;
+ }
+ list_del(&vtt->scst_template_list_entry);
+
+ mutex_unlock(&scst_mutex);
+
+ scst_tgtt_sysfs_put(vtt);
+
+ PRINT_INFO("Target template %s unregistered successfully", vtt->name);
+
+out:
+ return;
+
+out_err_up:
+ mutex_unlock(&scst_mutex);
+ goto out;
+}
+EXPORT_SYMBOL(scst_unregister_target_template);
+
+/**
+ * scst_register_target() - register target
+ *
+ * Registers a target for template vtt and returns new target structure on
+ * success or NULL otherwise.
+ */
+struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
+ const char *target_name)
+{
+ struct scst_tgt *tgt;
+ int rc = 0;
+
+ rc = scst_alloc_tgt(vtt, &tgt);
+ if (rc != 0)
+ goto out_err;
+
+ rc = scst_suspend_activity(true);
+ if (rc != 0)
+ goto out_free_tgt_err;
+
+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
+ rc = -EINTR;
+ goto out_resume_free;
+ }
+
+ if (target_name != NULL) {
+
+ tgt->tgt_name = kmalloc(strlen(target_name) + 1, GFP_KERNEL);
+ if (tgt->tgt_name == NULL) {
+ TRACE(TRACE_OUT_OF_MEM, "Allocation of tgt name %s failed",
+ target_name);
+ rc = -ENOMEM;
+ goto out_unlock_resume;
+ }
+ strcpy(tgt->tgt_name, target_name);
+ } else {
+ static int tgt_num; /* protected by scst_mutex */
+ int len = strlen(vtt->name) +
+ strlen(SCST_DEFAULT_TGT_NAME_SUFFIX) + 11 + 1;
+
+ tgt->tgt_name = kmalloc(len, GFP_KERNEL);
+ if (tgt->tgt_name == NULL) {
+ TRACE(TRACE_OUT_OF_MEM, "Allocation of tgt name failed "
+ "(template name %s)", vtt->name);
+ rc = -ENOMEM;
+ goto out_unlock_resume;
+ }
+ sprintf(tgt->tgt_name, "%s%s%d", vtt->name,
+ SCST_DEFAULT_TGT_NAME_SUFFIX, tgt_num++);
+ }
+
+ tgt->default_acg = scst_alloc_add_acg(NULL, tgt->tgt_name);
+ if (tgt->default_acg == NULL)
+ goto out_free_tgt_name;
+
+ INIT_LIST_HEAD(&tgt->tgt_acg_list);
+
+ rc = scst_create_tgt_sysfs(tgt);
+ if (rc < 0)
+ goto out_clear_acg;
+
+ list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
+
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+
+ PRINT_INFO("Target %s for template %s registered successfully",
+ tgt->tgt_name, vtt->name);
+
+ TRACE_DBG("tgt %p", tgt);
+
+out:
+ return tgt;
+
+out_clear_acg:
+ scst_clear_acg(tgt->default_acg);
+
+out_free_tgt_name:
+ kfree(tgt->tgt_name);
+
+out_unlock_resume:
+ mutex_unlock(&scst_mutex);
+
+out_resume_free:
+ scst_resume_activity();
+
+out_free_tgt_err:
+ scst_tgt_sysfs_put(tgt); /* must not be called under scst_mutex */
+ tgt = NULL;
+
+out_err:
+ PRINT_ERROR("Failed to register target %s for template %s (error %d)",
+ (tgt->tgt_name != NULL) ? tgt->tgt_name : target_name,
+ vtt->name, rc);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(scst_register_target);
+
+/**
+ * scst_register_target_non_gpl() - register target, non-GPL version
+ *
+ * Registers a target for template vtt and returns new target structure on
+ * success or NULL otherwise.
+ */
+struct scst_tgt *scst_register_target_non_gpl(struct scst_tgt_template *vtt,
+ const char *target_name)
+{
+ struct scst_tgt *res;
+
+ if (scst_check_non_gpl_target_template(vtt)) {
+ res = NULL;
+ goto out;
+ }
+
+ res = scst_register_target(vtt, target_name);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL(scst_register_target_non_gpl);
+
+static inline int test_sess_list(struct scst_tgt *tgt)
+{
+ int res;
+ mutex_lock(&scst_mutex);
+ res = list_empty(&tgt->sess_list);
+ mutex_unlock(&scst_mutex);
+ return res;
+}
+
+/**
+ * scst_unregister_target() - unregister target
+ */
+void scst_unregister_target(struct scst_tgt *tgt)
+{
+ struct scst_session *sess;
+ struct scst_tgt_template *vtt = tgt->tgtt;
+ struct scst_acg *acg, *acg_tmp;
+
+ scst_tgt_sysfs_prepare_put(tgt);
+
+ TRACE_DBG("%s", "Calling target driver's release()");
+ tgt->tgtt->release(tgt);
+ TRACE_DBG("%s", "Target driver's release() returned");
+
+ mutex_lock(&scst_mutex);
+again:
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if (sess->shut_phase == SCST_SESS_SPH_READY) {
+ /*
+ * Sometimes it's hard for target driver to track all
+ * its sessions (see scst_local, for example), so let's
+ * help it.
+ */
+ mutex_unlock(&scst_mutex);
+ scst_unregister_session(sess, 0, NULL);
+ mutex_lock(&scst_mutex);
+ goto again;
+ }
+ }
+ mutex_unlock(&scst_mutex);
+
+ TRACE_DBG("%s", "Waiting for sessions shutdown");
+ wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
+ TRACE_DBG("%s", "wait_event() returned");
+
+ scst_suspend_activity(false);
+ mutex_lock(&scst_mutex);
+
+ list_del(&tgt->tgt_list_entry);
+
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+
+ scst_clear_acg(tgt->default_acg);
+
+ list_for_each_entry_safe(acg, acg_tmp, &tgt->tgt_acg_list,
+ acg_list_entry) {
+ scst_acg_sysfs_put(acg);
+ }
+
+ del_timer_sync(&tgt->retry_timer);
+
+ PRINT_INFO("Target %s for template %s unregistered successfully",
+ tgt->tgt_name, vtt->name);
+
+ scst_tgt_sysfs_put(tgt); /* must not be called under scst_mutex */
+
+ TRACE_DBG("Unregistering tgt %p finished", tgt);
+ return;
+}
+EXPORT_SYMBOL(scst_unregister_target);
+
+static int scst_susp_wait(bool interruptible)
+{
+ int res = 0;
+
+ if (interruptible) {
+ res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
+ (atomic_read(&scst_cmd_count) == 0),
+ SCST_SUSPENDING_TIMEOUT);
+ if (res <= 0) {
+ __scst_resume_activity();
+ if (res == 0)
+ res = -EBUSY;
+ } else
+ res = 0;
+ } else
+ wait_event(scst_dev_cmd_waitQ,
+ atomic_read(&scst_cmd_count) == 0);
+
+ TRACE_MGMT_DBG("wait_event() returned %d", res);
+ return res;
+}
+
+/**
+ * scst_suspend_activity() - globally suspend any activity
+ *
+ * Description:
+ * Globally suspends any activity and doesn't return, until there are any
+ * active commands (state after SCST_CMD_STATE_INIT). If "interruptible"
+ * is true, it returns after SCST_SUSPENDING_TIMEOUT or if it was interrupted
+ * by a signal with the corresponding error status < 0. If "interruptible"
+ * is false, it will wait virtually forever. On success returns 0.
+ *
+ * New arriving commands stay in the suspended state until
+ * scst_resume_activity() is called.
+ */
+int scst_suspend_activity(bool interruptible)
+{
+ int res = 0;
+ bool rep = false;
+
+ if (interruptible) {
+ if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
+ res = -EINTR;
+ goto out;
+ }
+ } else
+ mutex_lock(&scst_suspend_mutex);
+
+ TRACE_MGMT_DBG("suspend_count %d", suspend_count);
+ suspend_count++;
+ if (suspend_count > 1)
+ goto out_up;
+
+ set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
+ set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
+ /*
+ * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
+ * ordered with scst_cmd_count. Otherwise lockless logic in
+ * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
+ */
+ smp_mb__after_set_bit();
+
+ /*
+ * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
+ * information about scst_user behavior.
+ *
+ * ToDo: make the global suspending unneeded (switch to per-device
+ * reference counting? That would mean to switch off from lockless
+ * implementation of scst_translate_lun().. )
+ */
+
+ if (atomic_read(&scst_cmd_count) != 0) {
+ PRINT_INFO("Waiting for %d active commands to complete... This "
+ "might take few minutes for disks or few hours for "
+ "tapes, if you use long executed commands, like "
+ "REWIND or FORMAT. In case, if you have a hung user "
+ "space device (i.e. made using scst_user module) not "
+ "responding to any commands, if might take virtually "
+ "forever until the corresponding user space "
+ "program recovers and starts responding or gets "
+ "killed.", atomic_read(&scst_cmd_count));
+ rep = true;
+ }
+
+ res = scst_susp_wait(interruptible);
+ if (res != 0)
+ goto out_clear;
+
+ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
+ /* See comment about smp_mb() above */
+ smp_mb__after_clear_bit();
+
+ TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
+ atomic_read(&scst_cmd_count));
+
+ res = scst_susp_wait(interruptible);
+ if (res != 0)
+ goto out_clear;
+
+ if (rep)
+ PRINT_INFO("%s", "All active commands completed");
+
+out_up:
+ mutex_unlock(&scst_suspend_mutex);
+
+out:
+ return res;
+
+out_clear:
+ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
+ /* See comment about smp_mb() above */
+ smp_mb__after_clear_bit();
+ goto out_up;
+}
+EXPORT_SYMBOL_GPL(scst_suspend_activity);
+
+static void __scst_resume_activity(void)
+{
+ struct scst_cmd_threads *l;
+
+ suspend_count--;
+ TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
+ if (suspend_count > 0)
+ goto out;
+
+ clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
+ /*
+ * The barrier is needed to make sure all woken up threads see the
+ * cleared flag. Not sure if it's really needed, but let's be safe.
+ */
+ smp_mb__after_clear_bit();
+
+ list_for_each_entry(l, &scst_cmd_threads_list, lists_list_entry) {
+ wake_up_all(&l->cmd_list_waitQ);
+ }
+ wake_up_all(&scst_init_cmd_list_waitQ);
+
+ spin_lock_irq(&scst_mcmd_lock);
+ if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
+ struct scst_mgmt_cmd *m;
+ m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
+ mgmt_cmd_list_entry);
+ TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
+ "mgmt cmd list", m);
+ list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
+ }
+ spin_unlock_irq(&scst_mcmd_lock);
+ wake_up_all(&scst_mgmt_cmd_list_waitQ);
+
+out:
+ return;
+}
+
+/**
+ * scst_resume_activity() - globally resume all activities
+ *
+ * Resumes suspended by scst_suspend_activity() activities.
+ */
+void scst_resume_activity(void)
+{
+
+ mutex_lock(&scst_suspend_mutex);
+ __scst_resume_activity();
+ mutex_unlock(&scst_suspend_mutex);
+ return;
+}
+EXPORT_SYMBOL_GPL(scst_resume_activity);
+
+static int scst_register_device(struct scsi_device *scsidp)
+{
+ int res = 0;
+ struct scst_device *dev, *d;
+ struct scst_dev_type *dt;
+
+ res = scst_suspend_activity(true);
+ if (res != 0)
+ goto out_err;
+
+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
+ res = -EINTR;
+ goto out_resume;
+ }
+
+ res = scst_alloc_device(GFP_KERNEL, &dev);
+ if (res != 0)
+ goto out_up;
+
+ dev->type = scsidp->type;
+
+ dev->virt_name = kmalloc(50, GFP_KERNEL);
+ if (dev->virt_name == NULL) {
+ PRINT_ERROR("%s", "Unable to alloc device name");
+ res = -ENOMEM;
+ goto out_free_dev;
+ }
+ snprintf(dev->virt_name, 50, "%d:%d:%d:%d", scsidp->host->host_no,
+ scsidp->channel, scsidp->id, scsidp->lun);
+
+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
+ if (strcmp(d->virt_name, dev->virt_name) == 0) {
+ PRINT_ERROR("Device %s already exists", dev->virt_name);
+ res = -EEXIST;
+ goto out_free_dev;
+ }
+ }
+
+ dev->scsi_dev = scsidp;
+
+ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
+
+ res = scst_create_device_sysfs(dev);
+ if (res != 0)
+ goto out_free;
+
+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
+ if (dt->type == scsidp->type) {
+ res = scst_assign_dev_handler(dev, dt);
+ if (res != 0)
+ goto out_free;
+ break;
+ }
+ }
+
+out_up:
+ mutex_unlock(&scst_mutex);
+
+out_resume:
+ scst_resume_activity();
+
+out_err:
+ if (res == 0) {
+ PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
+ "type %d", scsidp->host->host_no, scsidp->channel,
+ scsidp->id, scsidp->lun, scsidp->type);
+ } else {
+ PRINT_ERROR("Failed to attach to scsi%d, channel %d, id %d, "
+ "lun %d, type %d", scsidp->host->host_no,
+ scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
+ }
+ return res;
+
+out_free:
+ list_del(&dev->dev_list_entry);
+
+out_free_dev:
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+ scst_device_sysfs_put(dev); /* must not be called under scst_mutex */
+ goto out_err;
+}
+
+static void scst_unregister_device(struct scsi_device *scsidp)
+{
+ struct scst_device *d, *dev = NULL;
+ struct scst_acg_dev *acg_dev, *aa;
+
+ scst_suspend_activity(false);
+ mutex_lock(&scst_mutex);
+
+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
+ if (d->scsi_dev == scsidp) {
+ dev = d;
+ TRACE_DBG("Target device %p found", dev);
+ break;
+ }
+ }
+ if (dev == NULL) {
+ PRINT_ERROR("%s", "Target device not found");
+ goto out_resume;
+ }
+
+ list_del(&dev->dev_list_entry);
+
+ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
+ dev_acg_dev_list_entry) {
+ scst_acg_remove_dev(acg_dev->acg, dev, true);
+ }
+
+ scst_assign_dev_handler(dev, &scst_null_devtype);
+
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+
+ scst_device_sysfs_put(dev); /* must not be called under scst_mutex */
+
+ PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
+ scsidp->host->host_no, scsidp->channel, scsidp->id,
+ scsidp->lun, scsidp->type);
+
+out:
+ return;
+
+out_resume:
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+ goto out;
+}
+
+static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
+{
+ int res = 0;
+
+ if (dev_handler->parse == NULL) {
+ PRINT_ERROR("scst dev handler %s must have "
+ "parse() method.", dev_handler->name);
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (((dev_handler->add_device != NULL) &&
+ (dev_handler->del_device == NULL)) ||
+ ((dev_handler->add_device == NULL) &&
+ (dev_handler->del_device != NULL))) {
+ PRINT_ERROR("Dev handler %s must either define both "
+ "add_device() and del_device(), or none.",
+ dev_handler->name);
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (dev_handler->exec == NULL) {
+#ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
+ dev_handler->exec_atomic = 1;
+#else
+ dev_handler->exec_atomic = 0;
+#endif
+ }
+
+ if (dev_handler->dev_done == NULL)
+ dev_handler->dev_done_atomic = 1;
+
+out:
+ return res;
+}
+
+/**
+ * scst_register_virtual_device() - register a virtual device.
+ * @dev_handler: the device's device handler
+ * @dev_name: the new device name, NULL-terminated string. Must be uniq
+ * among all virtual devices in the system.
+ *
+ * Registers a virtual device and returns assinged to the device ID on
+ * success, or negative value otherwise
+ */
+int scst_register_virtual_device(struct scst_dev_type *dev_handler,
+ const char *dev_name)
+{
+ int res, rc;
+ struct scst_device *dev;
+
+ if (dev_handler == NULL) {
+ PRINT_ERROR("%s: valid device handler must be supplied",
+ __func__);
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (dev_name == NULL) {
+ PRINT_ERROR("%s: device name must be non-NULL", __func__);
+ res = -EINVAL;
+ goto out;
+ }
+
+ res = scst_dev_handler_check(dev_handler);
+ if (res != 0)
+ goto out;
+
+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
+ if (strcmp(dev->virt_name, dev_name) == 0) {
+ PRINT_ERROR("Device %s already exists", dev_name);
+ res = -EEXIST;
+ goto out;
+ }
+ }
+
+ res = scst_suspend_activity(true);
+ if (res != 0)
+ goto out;
+
+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
+ res = -EINTR;
+ goto out_resume;
+ }
+
+ res = scst_alloc_device(GFP_KERNEL, &dev);
+ if (res != 0)
+ goto out_up;
+
+ dev->type = dev_handler->type;
+ dev->scsi_dev = NULL;
+ dev->virt_name = kstrdup(dev_name, GFP_KERNEL);
+ if (dev->virt_name == NULL) {
+ PRINT_ERROR("Unable to allocate virt_name for dev %s",
+ dev_name);
+ res = -ENOMEM;
+ goto out_release;
+ }
+ dev->virt_id = scst_virt_dev_last_id++;
+
+ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
+
+ res = dev->virt_id;
+
+ rc = scst_create_device_sysfs(dev);
+ if (rc != 0) {
+ res = rc;
+ goto out_free_del;
+ }
+
+ rc = scst_assign_dev_handler(dev, dev_handler);
+ if (rc != 0) {
+ res = rc;
+ goto out_free_del;
+ }
+
+out_up:
+ mutex_unlock(&scst_mutex);
+
+out_resume:
+ scst_resume_activity();
+
+out:
+ if (res > 0)
+ PRINT_INFO("Attached to virtual device %s (id %d)",
+ dev_name, dev->virt_id);
+ else
+ PRINT_INFO("Failed to attach to virtual device %s", dev_name);
+ return res;
+
+out_free_del:
+ list_del(&dev->dev_list_entry);
+
+out_release:
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+ scst_device_sysfs_put(dev); /* must not be called under scst_mutex */
+ goto out;
+}
+EXPORT_SYMBOL_GPL(scst_register_virtual_device);
+
+/**
+ * scst_unregister_virtual_device() - unegister a virtual device.
+ * @id: the device's ID, returned by the registration function
+ */
+void scst_unregister_virtual_device(int id)
+{
+ struct scst_device *d, *dev = NULL;
+ struct scst_acg_dev *acg_dev, *aa;
+
+ scst_suspend_activity(false);
+ mutex_lock(&scst_mutex);
+
+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
+ if (d->virt_id == id) {
+ dev = d;
+ TRACE_DBG("Target device %p (id %d) found", dev, id);
+ break;
+ }
+ }
+ if (dev == NULL) {
+ PRINT_ERROR("Target virtual device (id %d) not found", id);
+ goto out_unblock;
+ }
+
+ list_del(&dev->dev_list_entry);
+
+ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
+ dev_acg_dev_list_entry) {
+ scst_acg_remove_dev(acg_dev->acg, dev, true);
+ }
+
+ scst_assign_dev_handler(dev, &scst_null_devtype);
+
+ PRINT_INFO("Detached from virtual device %s (id %d)",
+ dev->virt_name, dev->virt_id);
+
+out_unblock:
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+
+ scst_device_sysfs_put(dev); /* must not be called under scst_mutex */
+ return;
+}
+EXPORT_SYMBOL_GPL(scst_unregister_virtual_device);
+
+/**
+ * __scst_register_dev_driver() - register pass-through dev handler driver
+ * @dev_type: dev handler template
+ * @version: SCST_INTERFACE_VERSION version string to ensure that
+ * SCST core and the dev handler use the same version of
+ * the SCST interface
+ *
+ * Description:
+ * Registers a pass-through dev handler driver. Returns 0 on success
+ * or appropriate error code otherwise.
+ *
+ * Note: *dev_type must be static!
+ */
+int __scst_register_dev_driver(struct scst_dev_type *dev_type,
+ const char *version)
+{
+ struct scst_dev_type *dt;
+ struct scst_device *dev;
+ int res;
+ int exist;
+
+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
+ PRINT_ERROR("Incorrect version of dev handler %s",
+ dev_type->name);
+ res = -EINVAL;
+ goto out_error;
+ }
+
+ res = scst_dev_handler_check(dev_type);
+ if (res != 0)
+ goto out_error;
+
+ res = scst_suspend_activity(true);
+ if (res != 0)
+ goto out_error;
+
+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
+ res = -EINTR;
+ goto out_err_res;
+ }
+
+ exist = 0;
+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
+ if (strcmp(dt->name, dev_type->name) == 0) {
+ PRINT_ERROR("Device type handler \"%s\" already "
+ "exist", dt->name);
+ exist = 1;
+ break;
+ }
+ }
+ if (exist)
+ goto out_up;
+
+ res = scst_create_devt_sysfs(dev_type);
+ if (res < 0)
+ goto out_free;
+
+ list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
+
+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
+ if (dev->scsi_dev == NULL || dev->handler != &scst_null_devtype)
+ continue;
+ if (dev->scsi_dev->type == dev_type->type)
+ scst_assign_dev_handler(dev, dev_type);
+ }
+
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+
+ if (res == 0) {
+ PRINT_INFO("Device handler \"%s\" for type %d registered "
+ "successfully", dev_type->name, dev_type->type);
+ }
+
+out:
+ return res;
+
+out_free:
+ scst_devt_sysfs_put(dev_type);
+
+out_up:
+ mutex_unlock(&scst_mutex);
+
+out_err_res:
+ scst_resume_activity();
+
+out_error:
+ PRINT_ERROR("Failed to register device handler \"%s\" for type %d",
+ dev_type->name, dev_type->type);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(__scst_register_dev_driver);
+
+/**
+ * scst_unregister_dev_driver() - unregister pass-through dev handler driver
+ */
+void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
+{
+ struct scst_device *dev;
+ struct scst_dev_type *dt;
+ int found = 0;
+
+ scst_suspend_activity(false);
+ mutex_lock(&scst_mutex);
+
+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
+ if (strcmp(dt->name, dev_type->name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ PRINT_ERROR("Dev handler \"%s\" isn't registered",
+ dev_type->name);
+ goto out_up;
+ }
+
+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
+ if (dev->handler == dev_type) {
+ scst_assign_dev_handler(dev, &scst_null_devtype);
+ TRACE_DBG("Dev handler removed from device %p", dev);
+ }
+ }
+
+ list_del(&dev_type->dev_type_list_entry);
+
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+
+ scst_devt_sysfs_put(dev_type);
+
+ PRINT_INFO("Device handler \"%s\" for type %d unloaded",
+ dev_type->name, dev_type->type);
+
+out:
+ return;
+
+out_up:
+ mutex_unlock(&scst_mutex);
+ scst_resume_activity();
+ goto out;
+}
+EXPORT_SYMBOL_GPL(scst_unregister_dev_driver);
+
+/**
+ * __scst_register_virtual_dev_driver() - register virtual dev handler driver
+ * @dev_type: dev handler template
+ * @version: SCST_INTERFACE_VERSION version string to ensure that
+ * SCST core and the dev handler use the same version of
+ * the SCST interface
+ *
+ * Description:
+ * Registers a virtual dev handler driver. Returns 0 on success or
+ * appropriate error code otherwise.
+ *
+ * Note: *dev_type must be static!
+ */
+int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
+ const char *version)
+{
+ int res;
+
+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
+ PRINT_ERROR("Incorrect version of virtual dev handler %s",
+ dev_type->name);
+ res = -EINVAL;
+ goto out_err;
+ }
+
+ res = scst_dev_handler_check(dev_type);
+ if (res != 0)
+ goto out_err;
+
+ res = scst_create_devt_sysfs(dev_type);
+ if (res < 0)
+ goto out_free;
+
+ if (dev_type->type != -1) {
+ PRINT_INFO("Virtual device handler %s for type %d "
+ "registered successfully", dev_type->name,
+ dev_type->type);
+ } else {
+ PRINT_INFO("Virtual device handler \"%s\" registered "
+ "successfully", dev_type->name);
+ }
+
+out:
+ return res;
+
+out_free:
+
+ scst_devt_sysfs_put(dev_type);
+
+out_err:
+ PRINT_ERROR("Failed to register virtual device handler \"%s\"",
+ dev_type->name);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(__scst_register_virtual_dev_driver);
+
+/**
+ * scst_unregister_virtual_dev_driver() - unregister virtual dev driver
+ */
+void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
+{
+
+ scst_devt_sysfs_put(dev_type);
+
+ PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
+ return;
+}
+EXPORT_SYMBOL_GPL(scst_unregister_virtual_dev_driver);
+
+/* scst_mutex supposed to be held */
+int scst_add_threads(struct scst_cmd_threads *cmd_threads,
+ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num)
+{
+ int res, i;
+ struct scst_cmd_thread_t *thr;
+ int n = 0, tgt_dev_num = 0;
+
+ list_for_each_entry(thr, &cmd_threads->threads_list, thread_list_entry) {
+ n++;
+ }
+
+ if (tgt_dev != NULL) {
+ struct scst_tgt_dev *t;
+ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ if (t == tgt_dev)
+ break;
+ tgt_dev_num++;
+ }
+ }
+
+ for (i = 0; i < num; i++) {
+ struct scst_cmd_thread_t *thr;
+
+ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
+ if (!thr) {
+ res = -ENOMEM;
+ PRINT_ERROR("fail to allocate thr %d", res);
+ goto out_error;
+ }
+
+ if (dev != NULL) {
+ char nm[14]; /* to limit the name's len */
+ strlcpy(nm, dev->virt_name, ARRAY_SIZE(nm));
+ thr->cmd_thread = kthread_create(scst_cmd_thread,
+ cmd_threads, "%s%d", nm, n++);
+ } else if (tgt_dev != NULL) {
+ char nm[11]; /* to limit the name's len */
+ strlcpy(nm, tgt_dev->dev->virt_name, ARRAY_SIZE(nm));
+ thr->cmd_thread = kthread_create(scst_cmd_thread,
+ cmd_threads, "%s%d_%d", nm, tgt_dev_num, n++);
+ } else
+ thr->cmd_thread = kthread_create(scst_cmd_thread,
+ cmd_threads, "scsi_tgt%d", n++);
+
+ if (IS_ERR(thr->cmd_thread)) {
+ res = PTR_ERR(thr->cmd_thread);
+ PRINT_ERROR("kthread_create() failed: %d", res);
+ kfree(thr);
+ goto out_error;
+ }
+
+ list_add(&thr->thread_list_entry, &cmd_threads->threads_list);
+ cmd_threads->nr_threads++;
+
+ wake_up_process(thr->cmd_thread);
+ }
+
+ res = 0;
+
+out:
+ return res;
+
+out_error:
+ scst_del_threads(cmd_threads, i);
+ goto out;
+}
+
+/* scst_mutex supposed to be held */
+void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num)
+{
+ struct scst_cmd_thread_t *ct, *tmp;
+
+ if (num == 0)
+ goto out;
+
+ list_for_each_entry_safe_reverse(ct, tmp, &cmd_threads->threads_list,
+ thread_list_entry) {
+ int rc;
+ struct scst_device *dev;
+
+ rc = kthread_stop(ct->cmd_thread);
+ if (rc < 0)
+ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
+
+ list_del(&ct->thread_list_entry);
+
+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
+ struct scst_tgt_dev *tgt_dev;
+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ scst_del_thr_data(tgt_dev, ct->cmd_thread);
+ }
+ }
+
+ kfree(ct);
+
+ cmd_threads->nr_threads--;
+
+ --num;
+ if (num == 0)
+ break;
+ }
+
+ EXTRACHECKS_BUG_ON((cmd_threads->nr_threads == 0) &&
+ (cmd_threads->io_context != NULL));
+
+out:
+ return;
+}
+
+/* The activity supposed to be suspended and scst_mutex held */
+void scst_stop_dev_threads(struct scst_device *dev)
+{
+ struct scst_tgt_dev *tgt_dev;
+
+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ scst_tgt_dev_stop_threads(tgt_dev);
+ }
+
+ if ((dev->threads_num > 0) &&
+ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED))
+ scst_del_threads(&dev->dev_cmd_threads, -1);
+ return;
+}
+
+/* The activity supposed to be suspended and scst_mutex held */
+int scst_create_dev_threads(struct scst_device *dev)
+{
+ int res = 0;
+ struct scst_tgt_dev *tgt_dev;
+
+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ res = scst_tgt_dev_setup_threads(tgt_dev);
+ if (res != 0)
+ goto out_err;
+ }
+
+ if ((dev->threads_num > 0) &&
+ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED)) {
+ res = scst_add_threads(&dev->dev_cmd_threads, dev, NULL,
+ dev->threads_num);
+ if (res != 0)
+ goto out_err;
+ }
+
+out:
+ return res;
+
+out_err:
+ scst_stop_dev_threads(dev);
+ goto out;
+}
+
+/* The activity supposed to be suspended and scst_mutex held */
+int scst_assign_dev_handler(struct scst_device *dev,
+ struct scst_dev_type *handler)
+{
+ int res = 0;
+ struct scst_tgt_dev *tgt_dev;
+ LIST_HEAD(attached_tgt_devs);
+
+ BUG_ON(handler == NULL);
+
+ if (dev->handler == handler)
+ goto out;
+
+ if (dev->handler == NULL)
+ goto assign;
+
+ if (dev->handler->detach_tgt) {
+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
+ tgt_dev);
+ dev->handler->detach_tgt(tgt_dev);
+ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
+ }
+ }
+
+ if (dev->handler->detach) {
+ TRACE_DBG("%s", "Calling dev handler's detach()");
+ dev->handler->detach(dev);
+ TRACE_DBG("%s", "Old handler's detach() returned");
+ }
+
+ scst_stop_dev_threads(dev);
+
+ scst_devt_dev_sysfs_put(dev);
+
+assign:
+ dev->handler = handler;
+
+ if (handler == NULL)
+ goto out;
+
+ dev->threads_num = handler->threads_num;
+ dev->threads_pool_type = handler->threads_pool_type;
+
+ res = scst_create_devt_dev_sysfs(dev);
+ if (res != 0)
+ goto out_null;
+
+ if (handler->attach) {
+ TRACE_DBG("Calling new dev handler's attach(%p)", dev);
+ res = handler->attach(dev);
+ TRACE_DBG("New dev handler's attach() returned %d", res);
+ if (res != 0) {
+ PRINT_ERROR("New device handler's %s attach() "
+ "failed: %d", handler->name, res);
+ goto out_remove_sysfs;
+ }
+ }
+
+ if (handler->attach_tgt) {
+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
+ dev_tgt_dev_list_entry) {
+ TRACE_DBG("Calling dev handler's attach_tgt(%p)",
+ tgt_dev);
+ res = handler->attach_tgt(tgt_dev);
+ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
+ if (res != 0) {
+ PRINT_ERROR("Device handler's %s attach_tgt() "
+ "failed: %d", handler->name, res);
+ goto out_err_detach_tgt;
+ }
+ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
+ &attached_tgt_devs);
+ }
+ }
+
+ res = scst_create_dev_threads(dev);
+ if (res != 0)
+ goto out_err_detach_tgt;
+
+out:
+ return res;
+
+out_err_detach_tgt:
+ if (handler && handler->detach_tgt) {
+ list_for_each_entry(tgt_dev, &attached_tgt_devs,
+ extra_tgt_dev_list_entry) {
+ TRACE_DBG("Calling handler's detach_tgt(%p)",
+ tgt_dev);
+ handler->detach_tgt(tgt_dev);
+ TRACE_DBG("%s", "Handler's detach_tgt() returned");
+ }
+ }
+ if (handler && handler->detach) {
+ TRACE_DBG("%s", "Calling handler's detach()");
+ handler->detach(dev);
+ TRACE_DBG("%s", "Handler's detach() returned");
+ }
+
+out_remove_sysfs:
+ scst_devt_dev_sysfs_put(dev);
+
+out_null:
+ dev->handler = &scst_null_devtype;
+ goto out;
+}
+
+/**
+ * scst_init_threads() - initialize SCST processing threads pool
+ *
+ * Initializes scst_cmd_threads structure
+ */
+void scst_init_threads(struct scst_cmd_threads *cmd_threads)
+{
+
+ spin_lock_init(&cmd_threads->cmd_list_lock);
+ INIT_LIST_HEAD(&cmd_threads->active_cmd_list);
+ init_waitqueue_head(&cmd_threads->cmd_list_waitQ);
+ INIT_LIST_HEAD(&cmd_threads->threads_list);
+
+ mutex_lock(&scst_suspend_mutex);
+ list_add_tail(&cmd_threads->lists_list_entry,
+ &scst_cmd_threads_list);
+ mutex_unlock(&scst_suspend_mutex);
+ return;
+}
+EXPORT_SYMBOL_GPL(scst_init_threads);
+
+/**
+ * scst_deinit_threads() - deinitialize SCST processing threads pool
+ *
+ * Deinitializes scst_cmd_threads structure
+ */
+void scst_deinit_threads(struct scst_cmd_threads *cmd_threads)
+{
+
+ mutex_lock(&scst_suspend_mutex);
+ list_del(&cmd_threads->lists_list_entry);
+ mutex_unlock(&scst_suspend_mutex);
+
+ BUG_ON(cmd_threads->io_context);
+ return;
+}
+EXPORT_SYMBOL_GPL(scst_deinit_threads);
+
+static void scst_stop_all_threads(void)
+{
+
+ mutex_lock(&scst_mutex);
+
+ scst_del_threads(&scst_main_cmd_threads, -1);
+
+ if (scst_mgmt_cmd_thread)
+ kthread_stop(scst_mgmt_cmd_thread);
+ if (scst_mgmt_thread)
+ kthread_stop(scst_mgmt_thread);
+ if (scst_init_cmd_thread)
+ kthread_stop(scst_init_cmd_thread);
+
+ mutex_unlock(&scst_mutex);
+ return;
+}
+
+static int scst_start_all_threads(int num)
+{
+ int res;
+
+ mutex_lock(&scst_mutex);
+
+ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, num);
+ if (res < 0)
+ goto out_unlock;
+
+ scst_init_cmd_thread = kthread_run(scst_init_thread,
+ NULL, "scsi_tgt_init");
+ if (IS_ERR(scst_init_cmd_thread)) {
+ res = PTR_ERR(scst_init_cmd_thread);
+ PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
+ scst_init_cmd_thread = NULL;
+ goto out_unlock;
+ }
+
+ scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
+ NULL, "scsi_tm");
+ if (IS_ERR(scst_mgmt_cmd_thread)) {
+ res = PTR_ERR(scst_mgmt_cmd_thread);
+ PRINT_ERROR("kthread_create() for TM failed: %d", res);
+ scst_mgmt_cmd_thread = NULL;
+ goto out_unlock;
+ }
+
+ scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
+ NULL, "scsi_tgt_mgmt");
+ if (IS_ERR(scst_mgmt_thread)) {
+ res = PTR_ERR(scst_mgmt_thread);
+ PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
+ scst_mgmt_thread = NULL;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&scst_mutex);
+ return res;
+}
+
+/**
+ * scst_get() - increase global SCST ref counter
+ *
+ * Increases global SCST ref counter that prevents from entering into suspended
+ * activities stage, so protects from any global management operations.
+ */
+void scst_get(void)
+{
+ __scst_get(0);
+}
+EXPORT_SYMBOL_GPL(scst_get);
+
+/**
+ * scst_put() - decrease global SCST ref counter
+ *
+ * Decreses global SCST ref counter that prevents from entering into suspended
+ * activities stage, so protects from any global management operations. On
+ * zero, if suspending activities is waiting, they will be suspended.
+ */
+void scst_put(void)
+{
+ __scst_put();
+}
+EXPORT_SYMBOL_GPL(scst_put);
+
+/**
+ * scst_get_setup_id() - return SCST setup ID
+ *
+ * Returns SCST setup ID. This ID can be used for multiple
+ * setups with the same configuration.
+ */
+unsigned int scst_get_setup_id(void)
+{
+ return scst_setup_id;
+}
+EXPORT_SYMBOL_GPL(scst_get_setup_id);
+
+static int scst_add(struct device *cdev, struct class_interface *intf)
+{
+ struct scsi_device *scsidp;
+ int res = 0;
+
+ scsidp = to_scsi_device(cdev->parent);
+
+ if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
+ res = scst_register_device(scsidp);
+ return res;
+}
+
+static void scst_remove(struct device *cdev, struct class_interface *intf)
+{
+ struct scsi_device *scsidp;
+
+ scsidp = to_scsi_device(cdev->parent);
+
+ if (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0)
+ scst_unregister_device(scsidp);
+ return;
+}
+
+static struct class_interface scst_interface = {
+ .add_dev = scst_add,
+ .remove_dev = scst_remove,
+};
+
+static void __init scst_print_config(void)
+{
+ char buf[128];
+ int i, j;
+
+ i = snprintf(buf, sizeof(buf), "Enabled features: ");
+ j = i;
+
+#ifdef CONFIG_SCST_STRICT_SERIALIZING
+ i += snprintf(&buf[i], sizeof(buf) - i, "STRICT_SERIALIZING");
+#endif
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_TRACING
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_DEBUG
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_DEBUG_TM
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_DEBUG_RETRY
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_DEBUG_OOM
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_DEBUG_SN
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_ALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ
+ i += snprintf(&buf[i], sizeof(buf) - i,
+ "%sALLOW_PASSTHROUGH_IO_SUBMIT_IN_SIRQ",
+ (j == i) ? "" : ", ");
+#endif
+
+#ifdef CONFIG_SCST_STRICT_SECURITY
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sSCST_STRICT_SECURITY",
+ (j == i) ? "" : ", ");
+#endif
+
+ if (j != i)
+ PRINT_INFO("%s", buf);
+}
+
+static int __init init_scst(void)
+{
+ int res, i;
+ int scst_num_cpus;
+
+ {
+ struct scsi_sense_hdr *shdr;
+ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
+ }
+ {
+ struct scst_tgt_dev *t;
+ struct scst_cmd *c;
+ BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
+ BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
+ }
+
+ mutex_init(&scst_mutex);
+ INIT_LIST_HEAD(&scst_template_list);
+ INIT_LIST_HEAD(&scst_dev_list);
+ INIT_LIST_HEAD(&scst_dev_type_list);
+ spin_lock_init(&scst_main_lock);
+ spin_lock_init(&scst_init_lock);
+ init_waitqueue_head(&scst_init_cmd_list_waitQ);
+ INIT_LIST_HEAD(&scst_init_cmd_list);
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
+#endif
+ atomic_set(&scst_cmd_count, 0);
+ spin_lock_init(&scst_mcmd_lock);
+ INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
+ INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
+ init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
+ init_waitqueue_head(&scst_mgmt_waitQ);
+ spin_lock_init(&scst_mgmt_lock);
+ INIT_LIST_HEAD(&scst_sess_init_list);
+ INIT_LIST_HEAD(&scst_sess_shut_list);
+ init_waitqueue_head(&scst_dev_cmd_waitQ);
+ mutex_init(&scst_suspend_mutex);
+ INIT_LIST_HEAD(&scst_cmd_threads_list);
+ scst_virt_dev_last_id = 1;
+
+ scst_init_threads(&scst_main_cmd_threads);
+
+ res = scst_lib_init();
+ if (res != 0)
+ goto out;
+
+ scst_num_cpus = num_online_cpus();
+
+ /* ToDo: register_cpu_notifier() */
+
+ if (scst_threads == 0)
+ scst_threads = scst_num_cpus;
+
+ if (scst_threads < 1) {
+ PRINT_ERROR("%s", "scst_threads can not be less than 1");
+ scst_threads = scst_num_cpus;
+ }
+
+#define INIT_CACHEP(p, s, o) do { \
+ p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
+ TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
+ sizeof(struct s)); \
+ if (p == NULL) { \
+ res = -ENOMEM; \
+ goto o; \
+ } \
+ } while (0)
+
+ INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out_lib_exit);
+ INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
+ out_destroy_mgmt_cache);
+ INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
+ out_destroy_mgmt_stub_cache);
+ {
+ struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
+ INIT_CACHEP(scst_sense_cachep, scst_sense,
+ out_destroy_ua_cache);
+ }
+ INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
+ INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
+ INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
+ INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
+ INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
+
+ scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
+ mempool_free_slab, scst_mgmt_cachep);
+ if (scst_mgmt_mempool == NULL) {
+ res = -ENOMEM;
+ goto out_destroy_acg_cache;
+ }
+
+ /*
+ * All mgmt stubs, UAs and sense buffers are bursty and loosing them
+ * may have fatal consequences, so let's have big pools for them.
+ */
+
+ scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
+ mempool_free_slab, scst_mgmt_stub_cachep);
+ if (scst_mgmt_stub_mempool == NULL) {
+ res = -ENOMEM;
+ goto out_destroy_mgmt_mempool;
+ }
+
+ scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
+ mempool_free_slab, scst_ua_cachep);
+ if (scst_ua_mempool == NULL) {
+ res = -ENOMEM;
+ goto out_destroy_mgmt_stub_mempool;
+ }
+
+ scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
+ mempool_free_slab, scst_sense_cachep);
+ if (scst_sense_mempool == NULL) {
+ res = -ENOMEM;
+ goto out_destroy_ua_mempool;
+ }
+
+ scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
+ mempool_free_slab, scst_aen_cachep);
+ if (scst_aen_mempool == NULL) {
+ res = -ENOMEM;
+ goto out_destroy_sense_mempool;
+ }
+
+ res = scst_sysfs_init();
+ if (res != 0)
+ goto out_destroy_aen_mempool;
+
+ if (scst_max_cmd_mem == 0) {
+ struct sysinfo si;
+ si_meminfo(&si);
+#if BITS_PER_LONG == 32
+ scst_max_cmd_mem = min(
+ (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
+ >> 20) >> 2, (uint64_t)1 << 30);
+#else
+ scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
+ >> 20) >> 2;
+#endif
+ }
+
+ if (scst_max_dev_cmd_mem != 0) {
+ if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
+ PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
+ "scst_max_cmd_mem (%d)",
+ scst_max_dev_cmd_mem,
+ scst_max_cmd_mem);
+ scst_max_dev_cmd_mem = scst_max_cmd_mem;
+ }
+ } else
+ scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
+
+ res = scst_sgv_pools_init(
+ ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
+ if (res != 0)
+ goto out_sysfs_cleanup;
+
+ res = scsi_register_interface(&scst_interface);
+ if (res != 0)
+ goto out_destroy_sgv_pool;
+
+ for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
+ spin_lock_init(&scst_tasklets[i].tasklet_lock);
+ INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
+ tasklet_init(&scst_tasklets[i].tasklet,
+ (void *)scst_cmd_tasklet,
+ (unsigned long)&scst_tasklets[i]);
+ }
+
+ TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
+ scst_threads);
+
+ res = scst_start_all_threads(scst_threads);
+ if (res < 0)
+ goto out_thread_free;
+
+ PRINT_INFO("SCST version %s loaded successfully (max mem for "
+ "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
+ scst_max_cmd_mem, scst_max_dev_cmd_mem);
+
+ scst_print_config();
+
+out:
+ return res;
+
+out_thread_free:
+ scst_stop_all_threads();
+
+ scsi_unregister_interface(&scst_interface);
+
+out_destroy_sgv_pool:
+ scst_sgv_pools_deinit();
+
+out_sysfs_cleanup:
+ scst_sysfs_cleanup();
+
+out_destroy_aen_mempool:
+ mempool_destroy(scst_aen_mempool);
+
+out_destroy_sense_mempool:
+ mempool_destroy(scst_sense_mempool);
+
+out_destroy_ua_mempool:
+ mempool_destroy(scst_ua_mempool);
+
+out_destroy_mgmt_stub_mempool:
+ mempool_destroy(scst_mgmt_stub_mempool);
+
+out_destroy_mgmt_mempool:
+ mempool_destroy(scst_mgmt_mempool);
+
+out_destroy_acg_cache:
+ kmem_cache_destroy(scst_acgd_cachep);
+
+out_destroy_tgt_cache:
+ kmem_cache_destroy(scst_tgtd_cachep);
+
+out_destroy_sess_cache:
+ kmem_cache_destroy(scst_sess_cachep);
+
+out_destroy_cmd_cache:
+ kmem_cache_destroy(scst_cmd_cachep);
+
+out_destroy_aen_cache:
+ kmem_cache_destroy(scst_aen_cachep);
+
+out_destroy_sense_cache:
+ kmem_cache_destroy(scst_sense_cachep);
+
+out_destroy_ua_cache:
+ kmem_cache_destroy(scst_ua_cachep);
+
+out_destroy_mgmt_stub_cache:
+ kmem_cache_destroy(scst_mgmt_stub_cachep);
+
+out_destroy_mgmt_cache:
+ kmem_cache_destroy(scst_mgmt_cachep);
+
+out_lib_exit:
+ scst_lib_exit();
+ goto out;
+}
+
+static void __exit exit_scst(void)
+{
+
+ /* ToDo: unregister_cpu_notifier() */
+
+ scst_sysfs_cleanup();
+
+ scst_stop_all_threads();
+
+ scst_deinit_threads(&scst_main_cmd_threads);
+
+ scsi_unregister_interface(&scst_interface);
+
+ scst_sgv_pools_deinit();
+
+#define DEINIT_CACHEP(p) do { \
+ kmem_cache_destroy(p); \
+ p = NULL; \
+ } while (0)
+
+ mempool_destroy(scst_mgmt_mempool);
+ mempool_destroy(scst_mgmt_stub_mempool);
+ mempool_destroy(scst_ua_mempool);
+ mempool_destroy(scst_sense_mempool);
+ mempool_destroy(scst_aen_mempool);
+
+ DEINIT_CACHEP(scst_mgmt_cachep);
+ DEINIT_CACHEP(scst_mgmt_stub_cachep);
+ DEINIT_CACHEP(scst_ua_cachep);
+ DEINIT_CACHEP(scst_sense_cachep);
+ DEINIT_CACHEP(scst_aen_cachep);
+ DEINIT_CACHEP(scst_cmd_cachep);
+ DEINIT_CACHEP(scst_sess_cachep);
+ DEINIT_CACHEP(scst_tgtd_cachep);
+ DEINIT_CACHEP(scst_acgd_cachep);
+
+ scst_lib_exit();
+
+ PRINT_INFO("%s", "SCST unloaded");
+ return;
+}
+
+module_init(init_scst);
+module_exit(exit_scst);
+
+MODULE_AUTHOR("Vladislav Bolkhovitin");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI target core");
+MODULE_VERSION(SCST_VERSION_STRING);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists