svn commit: r272616 - stable/10/sys/cam/ctl
Alexander Motin
mav at FreeBSD.org
Mon Oct 6 12:35:43 UTC 2014
Author: mav
Date: Mon Oct 6 12:35:41 2014
New Revision: 272616
URL: https://svnweb.freebsd.org/changeset/base/272616
Log:
MFC r271309:
Improve cache control support, including DPO/FUA flags and the mode page.
At this moment it works only for files and ZVOLs in device mode since BIOs
have no respective respective cache control flags (DPO/FUA).
Modified:
stable/10/sys/cam/ctl/ctl.c
stable/10/sys/cam/ctl/ctl.h
stable/10/sys/cam/ctl/ctl_backend_block.c
stable/10/sys/cam/ctl/ctl_io.h
stable/10/sys/cam/ctl/ctl_private.h
Directory Properties:
stable/10/ (props changed)
Modified: stable/10/sys/cam/ctl/ctl.c
==============================================================================
--- stable/10/sys/cam/ctl/ctl.c Mon Oct 6 12:31:51 2014 (r272615)
+++ stable/10/sys/cam/ctl/ctl.c Mon Oct 6 12:35:41 2014 (r272616)
@@ -263,7 +263,7 @@ static struct scsi_caching_page caching_
static struct scsi_caching_page caching_page_changeable = {
/*page_code*/SMS_CACHING_PAGE,
/*page_length*/sizeof(struct scsi_caching_page) - 2,
- /*flags1*/ 0,
+ /*flags1*/ SCP_WCE | SCP_RCD,
/*ret_priority*/ 0,
/*disable_pf_transfer_len*/ {0, 0},
/*min_prefetch*/ {0, 0},
@@ -6265,6 +6265,53 @@ ctl_control_page_handler(struct ctl_scsi
}
int
+ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr)
+{
+ struct scsi_caching_page *current_cp, *saved_cp, *user_cp;
+ struct ctl_lun *lun;
+ int set_ua;
+ uint32_t initidx;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+ set_ua = 0;
+
+ user_cp = (struct scsi_caching_page *)page_ptr;
+ current_cp = (struct scsi_caching_page *)
+ (page_index->page_data + (page_index->page_len *
+ CTL_PAGE_CURRENT));
+ saved_cp = (struct scsi_caching_page *)
+ (page_index->page_data + (page_index->page_len *
+ CTL_PAGE_SAVED));
+
+ mtx_lock(&lun->lun_lock);
+ if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) !=
+ (user_cp->flags1 & (SCP_WCE | SCP_RCD)))
+ set_ua = 1;
+ current_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
+ current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
+ saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
+ saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
+ if (set_ua != 0) {
+ int i;
+ /*
+ * Let other initiators know that the mode
+ * parameters for this LUN have changed.
+ */
+ for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+ if (i == initidx)
+ continue;
+
+ lun->pending_ua[i] |= CTL_UA_MODE_CHANGE;
+ }
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ return (0);
+}
+
+int
ctl_power_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
@@ -8992,17 +9039,14 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
- int fua, dpo;
- int retval;
+ int flags, retval;
int isread;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
- fua = 0;
- dpo = 0;
-
+ flags = 0;
retval = CTL_RETVAL_COMPLETE;
isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10
@@ -9048,12 +9092,10 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct scsi_rw_10 *cdb;
cdb = (struct scsi_rw_10 *)ctsio->cdb;
-
if (cdb->byte2 & SRW10_FUA)
- fua = 1;
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SRW10_DPO)
- dpo = 1;
-
+ flags |= CTL_LLF_DPO;
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_2btoul(cdb->length);
break;
@@ -9062,17 +9104,9 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct scsi_write_verify_10 *cdb;
cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
-
- /*
- * XXX KDM we should do actual write verify support at some
- * point. This is obviously fake, we're just translating
- * things to a write. So we don't even bother checking the
- * BYTCHK field, since we don't do any verification. If
- * the user asks for it, we'll just pretend we did it.
- */
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SWV_DPO)
- dpo = 1;
-
+ flags |= CTL_LLF_DPO;
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_2btoul(cdb->length);
break;
@@ -9082,11 +9116,10 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct scsi_rw_12 *cdb;
cdb = (struct scsi_rw_12 *)ctsio->cdb;
-
if (cdb->byte2 & SRW12_FUA)
- fua = 1;
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SRW12_DPO)
- dpo = 1;
+ flags |= CTL_LLF_DPO;
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_4btoul(cdb->length);
break;
@@ -9095,13 +9128,11 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct scsi_write_verify_12 *cdb;
cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
-
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SWV_DPO)
- dpo = 1;
-
+ flags |= CTL_LLF_DPO;
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_4btoul(cdb->length);
-
break;
}
case READ_16:
@@ -9109,12 +9140,10 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct scsi_rw_16 *cdb;
cdb = (struct scsi_rw_16 *)ctsio->cdb;
-
if (cdb->byte2 & SRW12_FUA)
- fua = 1;
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SRW12_DPO)
- dpo = 1;
-
+ flags |= CTL_LLF_DPO;
lba = scsi_8btou64(cdb->addr);
num_blocks = scsi_4btoul(cdb->length);
break;
@@ -9123,10 +9152,9 @@ ctl_read_write(struct ctl_scsiio *ctsio)
struct scsi_write_verify_16 *cdb;
cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
-
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SWV_DPO)
- dpo = 1;
-
+ flags |= CTL_LLF_DPO;
lba = scsi_8btou64(cdb->addr);
num_blocks = scsi_4btoul(cdb->length);
break;
@@ -9144,13 +9172,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
}
/*
- * XXX KDM what do we do with the DPO and FUA bits? FUA might be
- * interesting for us, but if RAIDCore is in write-back mode,
- * getting it to do write-through for a particular transaction may
- * not be possible.
- */
-
- /*
* The first check is to make sure we're in bounds, the second
* check is to catch wrap-around problems. If the lba + num blocks
* is less than the lba, then we've wrapped around and the block
@@ -9174,11 +9195,22 @@ ctl_read_write(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
+ /* Set FUA and/or DPO if caches are disabled. */
+ if (isread) {
+ if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
+ SCP_RCD) != 0)
+ flags |= CTL_LLF_FUA | CTL_LLF_DPO;
+ } else {
+ if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
+ SCP_WCE) == 0)
+ flags |= CTL_LLF_FUA;
+ }
+
lbalen = (struct ctl_lba_len_flags *)
&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
lbalen->lba = lba;
lbalen->len = num_blocks;
- lbalen->flags = isread ? CTL_LLF_READ : CTL_LLF_WRITE;
+ lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
ctsio->kern_rel_offset = 0;
@@ -9204,7 +9236,8 @@ ctl_cnw_cont(union ctl_io *io)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
lbalen = (struct ctl_lba_len_flags *)
&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
- lbalen->flags = CTL_LLF_WRITE;
+ lbalen->flags &= ~CTL_LLF_COMPARE;
+ lbalen->flags |= CTL_LLF_WRITE;
CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
retval = lun->backend->data_submit((union ctl_io *)ctsio);
@@ -9218,16 +9251,13 @@ ctl_cnw(struct ctl_scsiio *ctsio)
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
- int fua, dpo;
- int retval;
+ int flags, retval;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
- fua = 0;
- dpo = 0;
-
+ flags = 0;
retval = CTL_RETVAL_COMPLETE;
switch (ctsio->cdb[0]) {
@@ -9235,11 +9265,10 @@ ctl_cnw(struct ctl_scsiio *ctsio)
struct scsi_compare_and_write *cdb;
cdb = (struct scsi_compare_and_write *)ctsio->cdb;
-
if (cdb->byte2 & SRW10_FUA)
- fua = 1;
+ flags |= CTL_LLF_FUA;
if (cdb->byte2 & SRW10_DPO)
- dpo = 1;
+ flags |= CTL_LLF_DPO;
lba = scsi_8btou64(cdb->addr);
num_blocks = cdb->length;
break;
@@ -9257,13 +9286,6 @@ ctl_cnw(struct ctl_scsiio *ctsio)
}
/*
- * XXX KDM what do we do with the DPO and FUA bits? FUA might be
- * interesting for us, but if RAIDCore is in write-back mode,
- * getting it to do write-through for a particular transaction may
- * not be possible.
- */
-
- /*
* The first check is to make sure we're in bounds, the second
* check is to catch wrap-around problems. If the lba + num blocks
* is less than the lba, then we've wrapped around and the block
@@ -9285,6 +9307,11 @@ ctl_cnw(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
+ /* Set FUA if write cache is disabled. */
+ if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
+ SCP_WCE) == 0)
+ flags |= CTL_LLF_FUA;
+
ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
ctsio->kern_rel_offset = 0;
@@ -9300,7 +9327,7 @@ ctl_cnw(struct ctl_scsiio *ctsio)
&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
lbalen->lba = lba;
lbalen->len = num_blocks;
- lbalen->flags = CTL_LLF_COMPARE;
+ lbalen->flags = CTL_LLF_COMPARE | flags;
CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
retval = lun->backend->data_submit((union ctl_io *)ctsio);
@@ -9314,7 +9341,7 @@ ctl_verify(struct ctl_scsiio *ctsio)
struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
- int bytchk, dpo;
+ int bytchk, flags;
int retval;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
@@ -9322,7 +9349,7 @@ ctl_verify(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
bytchk = 0;
- dpo = 0;
+ flags = CTL_LLF_FUA;
retval = CTL_RETVAL_COMPLETE;
switch (ctsio->cdb[0]) {
@@ -9333,7 +9360,7 @@ ctl_verify(struct ctl_scsiio *ctsio)
if (cdb->byte2 & SVFY_BYTCHK)
bytchk = 1;
if (cdb->byte2 & SVFY_DPO)
- dpo = 1;
+ flags |= CTL_LLF_DPO;
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_2btoul(cdb->length);
break;
@@ -9345,7 +9372,7 @@ ctl_verify(struct ctl_scsiio *ctsio)
if (cdb->byte2 & SVFY_BYTCHK)
bytchk = 1;
if (cdb->byte2 & SVFY_DPO)
- dpo = 1;
+ flags |= CTL_LLF_DPO;
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_4btoul(cdb->length);
break;
@@ -9357,7 +9384,7 @@ ctl_verify(struct ctl_scsiio *ctsio)
if (cdb->byte2 & SVFY_BYTCHK)
bytchk = 1;
if (cdb->byte2 & SVFY_DPO)
- dpo = 1;
+ flags |= CTL_LLF_DPO;
lba = scsi_8btou64(cdb->addr);
num_blocks = scsi_4btoul(cdb->length);
break;
@@ -9399,10 +9426,10 @@ ctl_verify(struct ctl_scsiio *ctsio)
lbalen->lba = lba;
lbalen->len = num_blocks;
if (bytchk) {
- lbalen->flags = CTL_LLF_COMPARE;
+ lbalen->flags = CTL_LLF_COMPARE | flags;
ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
} else {
- lbalen->flags = CTL_LLF_VERIFY;
+ lbalen->flags = CTL_LLF_VERIFY | flags;
ctsio->kern_total_len = 0;
}
ctsio->kern_rel_offset = 0;
Modified: stable/10/sys/cam/ctl/ctl.h
==============================================================================
--- stable/10/sys/cam/ctl/ctl.h Mon Oct 6 12:31:51 2014 (r272615)
+++ stable/10/sys/cam/ctl/ctl.h Mon Oct 6 12:35:41 2014 (r272616)
@@ -162,6 +162,8 @@ int ctl_ffz(uint32_t *mask, uint32_t siz
int ctl_set_mask(uint32_t *mask, uint32_t bit);
int ctl_clear_mask(uint32_t *mask, uint32_t bit);
int ctl_is_set(uint32_t *mask, uint32_t bit);
+int ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
+ struct ctl_page_index *page_index, uint8_t *page_ptr);
int ctl_control_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
uint8_t *page_ptr);
Modified: stable/10/sys/cam/ctl/ctl_backend_block.c
==============================================================================
--- stable/10/sys/cam/ctl/ctl_backend_block.c Mon Oct 6 12:31:51 2014 (r272615)
+++ stable/10/sys/cam/ctl/ctl_backend_block.c Mon Oct 6 12:35:41 2014 (r272616)
@@ -205,7 +205,6 @@ struct ctl_be_block_io {
struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
struct iovec xiovecs[CTLBLK_MAX_SEGS];
int bio_cmd;
- int bio_flags;
int num_segs;
int num_bios_sent;
int num_bios_done;
@@ -602,7 +601,11 @@ ctl_be_block_dispatch_file(struct ctl_be
file_data = &be_lun->backend.file;
io = beio->io;
- flags = beio->bio_flags;
+ flags = 0;
+ if (ARGS(io)->flags & CTL_LLF_DPO)
+ flags |= IO_DIRECT;
+ if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
+ flags |= IO_SYNC;
bzero(&xuio, sizeof(xuio));
if (beio->bio_cmd == BIO_READ) {
@@ -652,8 +655,7 @@ ctl_be_block_dispatch_file(struct ctl_be
* So, to attempt to provide some barrier semantics in the
* BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
*/
- error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
- (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
+ error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred);
VOP_UNLOCK(be_lun->vn, 0);
SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
@@ -690,8 +692,7 @@ ctl_be_block_dispatch_file(struct ctl_be
* So if we've got the BIO_ORDERED flag set, we want
* IO_SYNC in either the UFS or ZFS case.
*/
- error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
- IO_SYNC : 0, file_data->cred);
+ error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred);
VOP_UNLOCK(be_lun->vn, 0);
vn_finished_write(mountpoint);
@@ -755,7 +756,11 @@ ctl_be_block_dispatch_zvol(struct ctl_be
dev_data = &be_lun->backend.dev;
io = beio->io;
- flags = beio->bio_flags;
+ flags = 0;
+ if (ARGS(io)->flags & CTL_LLF_DPO)
+ flags |= IO_DIRECT;
+ if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
+ flags |= IO_SYNC;
bzero(&xuio, sizeof(xuio));
if (beio->bio_cmd == BIO_READ) {
@@ -783,10 +788,10 @@ ctl_be_block_dispatch_zvol(struct ctl_be
mtx_unlock(&be_lun->io_lock);
if (beio->bio_cmd == BIO_READ) {
- error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, 0);
+ error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags);
SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
} else {
- error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, 0);
+ error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags);
SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
}
@@ -877,7 +882,6 @@ ctl_be_block_unmap_dev_range(struct ctl_
while (len > 0) {
bio = g_alloc_bio();
bio->bio_cmd = BIO_DELETE;
- bio->bio_flags |= beio->bio_flags;
bio->bio_dev = dev_data->cdev;
bio->bio_offset = off;
bio->bio_length = MIN(len, maxlen);
@@ -976,7 +980,6 @@ ctl_be_block_dispatch_dev(struct ctl_be_
KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
bio->bio_cmd = beio->bio_cmd;
- bio->bio_flags |= beio->bio_flags;
bio->bio_dev = dev_data->cdev;
bio->bio_caller1 = beio;
bio->bio_length = min(cur_size, max_iosize);
@@ -1055,15 +1058,6 @@ ctl_be_block_cw_dispatch_ws(struct ctl_b
return;
}
- /*
- * If the I/O came down with an ordered or head of queue tag, set
- * the BIO_ORDERED attribute. For head of queue tags, that's
- * pretty much the best we can do.
- */
- if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
- || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
- beio->bio_flags = BIO_ORDERED;
-
switch (io->scsiio.tag_type) {
case CTL_TAG_ORDERED:
beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
@@ -1161,15 +1155,6 @@ ctl_be_block_cw_dispatch_unmap(struct ct
return;
}
- /*
- * If the I/O came down with an ordered or head of queue tag, set
- * the BIO_ORDERED attribute. For head of queue tags, that's
- * pretty much the best we can do.
- */
- if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
- || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
- beio->bio_flags = BIO_ORDERED;
-
switch (io->scsiio.tag_type) {
case CTL_TAG_ORDERED:
beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
@@ -1308,20 +1293,6 @@ ctl_be_block_dispatch(struct ctl_be_bloc
bptrlen = PRIV(io);
bptrlen->ptr = (void *)beio;
- /*
- * If the I/O came down with an ordered or head of queue tag, set
- * the BIO_ORDERED attribute. For head of queue tags, that's
- * pretty much the best we can do.
- *
- * XXX KDM we don't have a great way to easily know about the FUA
- * bit right now (it is decoded in ctl_read_write(), but we don't
- * pass that knowledge to the backend), and in any case we would
- * need to determine how to handle it.
- */
- if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
- || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
- beio->bio_flags = BIO_ORDERED;
-
switch (io->scsiio.tag_type) {
case CTL_TAG_ORDERED:
beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
Modified: stable/10/sys/cam/ctl/ctl_io.h
==============================================================================
--- stable/10/sys/cam/ctl/ctl_io.h Mon Oct 6 12:31:51 2014 (r272615)
+++ stable/10/sys/cam/ctl/ctl_io.h Mon Oct 6 12:35:41 2014 (r272616)
@@ -139,6 +139,8 @@ struct ctl_lba_len_flags {
uint64_t lba;
uint32_t len;
uint32_t flags;
+#define CTL_LLF_FUA 0x04000000
+#define CTL_LLF_DPO 0x08000000
#define CTL_LLF_READ 0x10000000
#define CTL_LLF_WRITE 0x20000000
#define CTL_LLF_VERIFY 0x40000000
Modified: stable/10/sys/cam/ctl/ctl_private.h
==============================================================================
--- stable/10/sys/cam/ctl/ctl_private.h Mon Oct 6 12:31:51 2014 (r272615)
+++ stable/10/sys/cam/ctl/ctl_private.h Mon Oct 6 12:35:41 2014 (r272616)
@@ -304,7 +304,7 @@ static const struct ctl_page_index page_
{SMS_RIGID_DISK_PAGE, 0, sizeof(struct scsi_rigid_disk_page), NULL,
CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
{SMS_CACHING_PAGE, 0, sizeof(struct scsi_caching_page), NULL,
- CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
+ CTL_PAGE_FLAG_DISK_ONLY, NULL, ctl_caching_sp_handler},
{SMS_CONTROL_MODE_PAGE, 0, sizeof(struct scsi_control_page), NULL,
CTL_PAGE_FLAG_NONE, NULL, ctl_control_page_handler},
{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, PWR_SUBPAGE_CODE,
More information about the svn-src-stable-10
mailing list